code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
import json
import os
import random
import re
from itertools import product
import numpy as np
import pandas as pd
from more_itertools import distinct_combinations
from plotnine import *
from sklearn import feature_extraction, metrics
ROOT_PATH = os.path.dirname(os.path.abspath(os.getcwd()))
def inspect_df(df: pd.DataFrame, n : int=5) -> pd.DataFrame:
"""Helper method to easily inspect DataFrames."""
print(f'shape: {df.shape}')
return df.head(n)
```
# Table of Contents
- [Exploratory Data Analysis](#Exploratory-Data-Analysis)
- [A Baseline Model: random classifier](#A-Baseline-Model:-random-classifier)
- [A Better Baseline Model: \<page title\> similarity](#A-Better-Baseline-Model:-<page-title>-similarity)
- [Feature Extraction](#Feature-Extraction)
# Exploratory Data Analysis
```
def json_loader(dirpath: str) -> list:
"""Discover all .json files and gather their respective data, given a `dirpath`.
"""
data = []
for subdir in os.listdir(dirpath):
temp = os.path.join(dirpath, subdir)
for datafile in os.listdir(temp):
with open(os.path.join(temp, datafile), 'r') as f:
spec = json.loads(f.read())
# keep global identifier, format it as in the labelled dataset
spec['id'] = subdir + '//' + datafile.split('.json')[0]
data.append(spec)
return data
data = json_loader(dirpath=os.path.join(ROOT_PATH, 'data/2013_camera_specs'))
specs = pd.DataFrame(data)
inspect_df(specs)
specs.set_index('id', inplace=True)
labels = pd.read_csv(os.path.join(ROOT_PATH, 'data/sigmod_medium_labelled_dataset.csv'))
inspect_df(labels)
matched_products = labels['label'] == 1
matched_products.value_counts()
ggplot() + \
geom_bar(mapping=aes(x=matched_products), colour='white') + \
labs(title='same products ?', x='') + \
coord_flip()
specs_info = specs.describe()
specs_info = specs_info.transpose()
inspect_df(specs_info)
specs_info['support'] = specs_info['count'] / len(specs.index)
specs_info = specs_info.sort_values(by='support', ascending=False)
specs_info.head(10)
top10 = list(specs_info.head(10).index)
```
These are the 10 camera specs (attributes) with the highest support.
```
specs[top10]
```
# A Baseline Model: random classifier
```
inspect_df(labels)
def random_classifier(*args):
"""A random classifier.
Returns: True of False (i.e. if products are the same)
"""
return random.random() > 0.5
predictions = labels.apply(random_classifier, axis=1)
metrics.accuracy_score(predictions, labels['label'])
metrics.precision_score(predictions, labels['label'])
metrics.recall_score(predictions, labels['label'])
metrics.f1_score(predictions, labels['label'])
```
This is a good indication of the model performance: **f1 = 0.1337**
```
metrics.confusion_matrix(predictions, labels['label'])
```
This is the initial, baseline performance. Our model should easily outperform this random classifier.
# A Better Baseline Model: \<page title\> similarity
```
ggplot() + \
geom_histogram(mapping=aes(x=specs['<page title>'].map(len)), colour='white', bins=30) + \
xlab('<page title>: no. of characters ')
ggplot() + \
geom_histogram(mapping=aes(x=specs['<page title>'].map(lambda title: len(title.split()))), colour='white', bins=30) + \
xlab('<page title>: no. of words')
```
We will use a BoW model + a text similarity algorithm + a suitable threshold in order to assert whether two cameras are the same.
```
def get_corpus(data: pd.DataFrame) -> np.ndarray:
return data['<page title>'].values
vectorizer = feature_extraction.text.CountVectorizer()
vectorizer.fit(get_corpus(specs))
def create_dataset(data: pd.DataFrame, labels: pd.DataFrame, features: list):
"""Helper method that creates a dataset.
"""
left_part = pd.merge(labels, data[features], how='inner', left_on='left_spec_id', right_on='id')
right_part = pd.merge(labels, data[features], how='inner', left_on='right_spec_id', right_on='id')
dataset = pd.merge(left_part, right_part, how='inner', on=('left_spec_id', 'right_spec_id'),
suffixes=('_left', '_right'))
dataset['label'] = dataset['label_left']
dataset.drop(['label_left', 'label_right'], axis=1, inplace=True)
dataset.set_index(['left_spec_id', 'right_spec_id'], inplace=True)
return dataset
X = create_dataset(data=specs, labels=labels, features=top10[0])
inspect_df(X)
def pagetitle_similarity(title1: str, title2: str) -> float:
vec1 = vectorizer.transform([title1])
vec2 = vectorizer.transform([title2])
return metrics.pairwise.cosine_similarity(vec1, vec2).take(0)
X['similarity'] = X[['<page title>_left', '<page title>_right']].apply(lambda x: pagetitle_similarity(x[0], x[1]), axis=1)
X[X['label'] == 1]['similarity'].mean()
X[X['label'] == 0]['similarity'].mean()
X['predictions'] = X['similarity'].map(lambda score: score > 0.5)
metrics.accuracy_score(X['predictions'], X['label'])
metrics.precision_score(X['predictions'], X['label'])
metrics.recall_score(X['predictions'], X['label'])
metrics.f1_score(X['predictions'], X['label'])
```
With this model we improved: **f1 = 0.3843**
```
metrics.confusion_matrix(X['predictions'], X['label'])
```
We can definitely improve over this by a better choice of text embeddings or similarity algorithm.
But, all things considered, any approach that relies on the notion of similarity between page titles could not drastically improve the 0.38 F1 score.
It is time to proceed with an ML approach.
# Feature Extraction
```
MAX_PRODUCTS = 1000
camera_pairs = list(distinct_combinations(specs.index[:MAX_PRODUCTS], 2))
inspect_df(specs[top10])
```
### brand
```
for brand in specs[specs['brand'].notna()]['brand'].tolist():
if not isinstance(brand, str):
print(brand)
def get_brand(value: str) -> str:
if isinstance(value, str):
return value
try:
brands = sorted(value, key=len, reverse=True)
return brands[0]
except (KeyError, TypeError):
return None
specs['brand'] = specs['brand'].map(get_brand)
specs['brand'].value_counts()[:40]
```
### model
```
for model in specs[specs['model'].notna()]['model'].tolist():
if not isinstance(model, str):
print(model)
def get_model(value: str) -> str:
if isinstance(value, str):
return value
try:
models = sorted(value, key=len, reverse=True)
return models[0]
except (KeyError, TypeError):
return None
specs['model'] = specs['model'].map(get_model)
```
### megapixels
```
for mp in specs[specs['megapixels'].notna()]['megapixels'].tolist():
if not isinstance(mp, str):
print(mp)
def extract_number(value: str) -> int:
match = re.search(r'\d{0,2}(.\d)?', value)
try:
return float(match.group(0)) if match else None
except ValueError:
return None
def get_megapixels(value: str) -> int:
if isinstance(value, str):
return extract_number(value)
try:
mps = sorted(value, key=len, reverse=True)
return extract_number(mps[0])
except (KeyError, TypeError):
return None
specs['megapixels'] = specs['megapixels'].map(get_megapixels)
specs['megapixels'] = pd.to_numeric(specs['megapixels'])
specs['megapixels'].value_counts()
```
### type
```
for ctype in specs[specs['type'].notna()]['type'].tolist():
if not isinstance(ctype, str):
print(ctype)
def get_type(value: str) -> str:
if isinstance(value, str):
return value
try:
types = sorted(value, key=len, reverse=True)
return types[0]
except (KeyError, TypeError):
return None
specs['type'] = specs['type'].map(get_type)
specs['type'].value_counts()[0:40]
```
| github_jupyter |
```
# default_exp checker
```
# Dependency Checker
> A pragmatic way to talk with pypi and find out what dependencies are out of date
```
#hide
from nbverbose.showdoc import *
```
## Dependency Traversing
Sometimes, we may want to check the current installed versions of a project's basic dependencies, and further check if those dependencies are out of date. `dependency_checker` is designed around this concept, utilizing the `pipdeptree` library.
```
#export
import json, ast, pipdeptree, sys, subprocess
#export
def get_installed_dependencies(
package_name:str, # The name of a python package
depth_limit:int=1, # How deep to follow nested dependencies
include_self:bool=False, # Whether to include the original library in the results
) -> dict: # A dictionary of {package:version}
"Recursively grabs dependencies of python package"
pkgs = pipdeptree.get_installed_distributions(local_only=False, user_only=False)
tree = pipdeptree.PackageDAG.from_pkgs(pkgs)
tree = tree.filter([package_name], None)
curr_depth=0
def _get_deps(j, dep_dict={}, curr_depth=0):
if curr_depth > depth_limit: return dep_dict
if isinstance(j, list):
for a in j:
_get_deps(a, dep_dict, curr_depth)
elif isinstance(j, dict):
if 'package_name' in j.keys():
if j['package_name'] not in dep_dict.keys():
dep_dict[j['package_name']] = j['installed_version']
if 'dependencies' in j.keys():
curr_depth += 1
return _get_deps(j['dependencies'], dep_dict, curr_depth)
return dep_dict
deps = _get_deps(ast.literal_eval(pipdeptree.render_json_tree(tree, 4)), {})
if not include_self: deps.pop(package_name, None)
return deps
```
This function operates by traversing a DAG and grabbing dependencies of projects found from it. Generally a depth of 1 is recommended, below is a quick guide to what will be returned at each depth.
**0**: A depth of zero will an empty dictionary unless `include_self` is `True`. If so, it will include only the library name:
```
deps = get_installed_dependencies('pipdeptree', depth_limit=0)
assert deps == {}
deps = get_installed_dependencies('pipdeptree', depth_limit=0, include_self=True)
assert deps == {'pipdeptree':'2.1.0'}
```
**1**: A depth of one will return the project and its main dependencies (if `include_self` is `True`), such as those stated in the `requirements.txt` as well as packages such as `pip`
```
deps = get_installed_dependencies('pipdeptree', depth_limit=1, include_self=True)
assert len(deps.keys()) == 2
assert all(package in deps.keys() for package in ('pipdeptree', 'pip'))
deps = get_installed_dependencies('pipdeptree', depth_limit=1, include_self=False)
assert len(deps.keys()) == 1
assert 'pip' in deps.keys()
```
**2+**: A depth of two or greater will return the dependencies for each of the dependencies above that layer. These allow for more fine-grained requirements
## Checking for New Versions
Given these dependencies, we can also then check for a new version to see if an upgrade is available. This is what the `is_latest_version` function is designed for:
```
#export
def is_latest_version(
package_name:str, # The name of a pip python package
current_version:str, # The installed version of a package, such as "1.2.3"
) -> bool: # Whether the versions are the same
"Compares the current version with the latest version, and returns if they are different"
latest_version = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '{}==random'.format(package_name)], capture_output=True, text=True))
latest_version = latest_version[latest_version.find('(from versions:')+15:]
latest_version = latest_version[:latest_version.find(')')]
latest_version = latest_version.replace(' ','').split(',')[-1]
if latest_version == current_version:
return True
else:
return False
using_latest_version = is_latest_version('pipdeptree', '2.0.9')
assert using_latest_version == False
```
Here we tested if `pipdeptree` is the latest version. The version we specified is one less than that of the latest release at the time of development. We got `False`, meaning a newer version is available.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Distributed Training with DTensors
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/dtensor_ml_tutorial"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Overview
DTensor provides a way for you to distribute the training of your model across devices to improve efficiency, reliability and scalability. For more details on DTensor concepts, see [The DTensor Programming Guide](https://www.tensorflow.org/guide/dtensor_overview).
In this tutorial, you will train a Sentiment Analysis model with DTensor. Three distributed training schemes are demonstrated with this example:
- Data Parallel training, where the training samples are sharded (partitioned) to devices.
- Model Parallel training, where the model variables are sharded to devices.
- Spatial Parallel training, where the features of input data are sharded to devices. (Also known as [Spatial Partitioning](https://cloud.google.com/blog/products/ai-machine-learning/train-ml-models-on-large-images-and-3d-volumes-with-spatial-partitioning-on-cloud-tpus))
The training portion of this tutorial is inspired [A Kaggle guide on Sentiment Analysis](https://www.kaggle.com/code/anasofiauzsoy/yelp-review-sentiment-analysis-tensorflow-tfds/notebook) notebook. To learn about the complete training and evaluation workflow (without DTensor), refer to that notebook.
This tutorial will walk through the following steps:
- First start with some data cleaning to obtain a `tf.data.Dataset` of tokenized sentences and their polarity.
- Next build an MLP model with custom Dense and BatchNorm layers. Use a `tf.Module` to track the inference variables. The model constructor takes additional `Layout` arguments to control the sharding of variables.
- For training, you will first use data parallel training together with `tf.experimental.dtensor`'s checkpoint feature. Then continue with Model Parallel Training and Spatial Parallel Training.
- The final section briefly describes the interaction between `tf.saved_model` and `tf.experimental.dtensor` as of TensorFlow 2.9.
## Setup
DTensor is part of TensorFlow 2.9.0 release.
```
!pip install --quiet --upgrade --pre tensorflow tensorflow-datasets
```
Next, import `tensorflow` and `tensorflow.experimental.dtensor`. Then configure TensorFlow to use 8 virtual CPUs.
Even though this example uses CPUs, DTensor works the same way on CPU, GPU or TPU devices.
```
import tempfile
import numpy as np
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.experimental import dtensor
print('TensorFlow version:', tf.__version__)
def configure_virtual_cpus(ncpu):
phy_devices = tf.config.list_physical_devices('CPU')
tf.config.set_logical_device_configuration(phy_devices[0], [
tf.config.LogicalDeviceConfiguration(),
] * ncpu)
configure_virtual_cpus(8)
DEVICES = [f'CPU:{i}' for i in range(8)]
tf.config.list_logical_devices('CPU')
```
## Download the dataset
Download the IMDB reviews data set to train the sentiment analysis model.
```
train_data = tfds.load('imdb_reviews', split='train', shuffle_files=True, batch_size=64)
train_data
```
## Prepare the data
First tokenize the text. Here use an extension of one-hot encoding, the `'tf_idf'` mode of `tf.keras.layers.TextVectorization`.
- For the sake of speed, limit the number of tokens to 1200.
- To keep the `tf.Module` simple, run `TextVectorization` as a preprocessing step before the training.
The final result of the data cleaning section is a `Dataset` with the tokenized text as `x` and label as `y`.
**Note**: Running `TextVectorization` as a preprocessing step is **neither a usual practice nor a recommended one** as doing so assumes the training data fits into the client memory, which is not always the case.
```
text_vectorization = tf.keras.layers.TextVectorization(output_mode='tf_idf', max_tokens=1200, output_sequence_length=None)
text_vectorization.adapt(data=train_data.map(lambda x: x['text']))
def vectorize(features):
return text_vectorization(features['text']), features['label']
train_data_vec = train_data.map(vectorize)
train_data_vec
```
## Build a neural network with DTensor
Now build a Multi-Layer Perceptron (MLP) network with `DTensor`. The network will use fully connected Dense and BatchNorm layers.
`DTensor` expands TensorFlow through single-program multi-data (SPMD) expansion of regular TensorFlow Ops according to the `dtensor.Layout` attributes of their input `Tensor` and variables.
Variables of `DTensor` aware layers are `dtensor.DVariable`, and the constructors of `DTensor` aware layer objects take additional `Layout` inputs in addition to the usual layer parameters.
Note: As of TensorFlow 2.9, Keras layers such as `tf.keras.layer.Dense`, and `tf.keras.layer.BatchNormalization` accepts `dtensor.Layout` arguments. Refer to the [DTensor Keras Integration Tutorial](/tutorials/distribute/dtensor_keras_tutorial) for more information using Keras with DTensor.
### Dense Layer
The following custom Dense layer defines 2 layer variables: $W_{ij}$ is the variable for weights, and $b_i$ is the variable for the biases.
$$
y_j = \sigma(\sum_i x_i W_{ij} + b_j)
$$
### Layout deduction
This result comes from the following observations:
- The preferred DTensor sharding for operands to a matrix dot product $t_j = \sum_i x_i W_{ij}$ is to shard $\mathbf{W}$ and $\mathbf{x}$ the same way along the $i$-axis.
- The preferred DTensor sharding for operands to a matrix sum $t_j + b_j$, is to shard $\mathbf{t}$ and $\mathbf{b}$ the same way along the $j$-axis.
```
class Dense(tf.Module):
def __init__(self, input_size, output_size,
init_seed, weight_layout, activation=None):
super().__init__()
random_normal_initializer = tf.function(tf.random.stateless_normal)
self.weight = dtensor.DVariable(
dtensor.call_with_layout(
random_normal_initializer, weight_layout,
shape=[input_size, output_size],
seed=init_seed
))
if activation is None:
activation = lambda x:x
self.activation = activation
# bias is sharded the same way as the last axis of weight.
bias_layout = weight_layout.delete([0])
self.bias = dtensor.DVariable(
dtensor.call_with_layout(tf.zeros, bias_layout, [output_size]))
def __call__(self, x):
y = tf.matmul(x, self.weight) + self.bias
y = self.activation(y)
return y
```
### BatchNorm
A batch normalization layer helps avoid collapsing modes while training. In this case, adding batch normalization layers helps model training avoid producing a model that only produces zeros.
The constructor of the custom `BatchNorm` layer below does not take a `Layout` argument. This is because `BatchNorm` has no layer variables. This still works with DTensor because 'x', the only input to the layer, is already a DTensor that represents the global batch.
Note: With DTensor, the input Tensor 'x' always represents the global batch. Therefore `tf.nn.batch_normalization` is applied to the global batch. This differs from training with `tf.distribute.MirroredStrategy`, where Tensor 'x' only represents the per-replica shard of the batch (the local batch).
```
class BatchNorm(tf.Module):
def __init__(self):
super().__init__()
def __call__(self, x, training=True):
if not training:
# This branch is not used in the Tutorial.
pass
mean, variance = tf.nn.moments(x, axes=[0])
return tf.nn.batch_normalization(x, mean, variance, 0.0, 1.0, 1e-5)
```
A full featured batch normalization layer (such as `tf.keras.layers.BatchNormalization`) will need Layout arguments for its variables.
```
def make_keras_bn(bn_layout):
return tf.keras.layers.BatchNormalization(gamma_layout=bn_layout,
beta_layout=bn_layout,
moving_mean_layout=bn_layout,
moving_variance_layout=bn_layout,
fused=False)
```
### Putting Layers Together
Next, build a Multi-layer perceptron (MLP) network with the building blocks above. The diagram below shows the axis relationships between the input `x` and the weight matrices for the two `Dense` layers without any DTensor sharding or replication applied.
<img src="https://www.tensorflow.org/images/dtensor/no_dtensor.png" alt="The input and weight matrices for a non distributed model." class="no-filter">
The output of the first `Dense` layer is passed into the input of the second `Dense` layer (after the `BatchNorm`). Therefore, the preferred DTensor sharding for the output of first `Dense` layer ($\mathbf{W_1}$) and the input of second `Dense` layer ($\mathbf{W_2}$) is to shard $\mathbf{W_1}$ and $\mathbf{W_2}$ the same way along the common axis $\hat{j}$,
$$
\mathsf{Layout}[{W_{1,ij}}; i, j] = \left[\hat{i}, \hat{j}\right] \\
\mathsf{Layout}[{W_{2,jk}}; j, k] = \left[\hat{j}, \hat{k} \right]
$$
Even though the layout deduction shows that the 2 layouts are not independent, for the sake of simplicity of the model interface, `MLP` will take 2 `Layout` arguments, one per Dense layer.
```
from typing import Tuple
class MLP(tf.Module):
def __init__(self, dense_layouts: Tuple[dtensor.Layout, dtensor.Layout]):
super().__init__()
self.dense1 = Dense(
1200, 48, (1, 2), dense_layouts[0], activation=tf.nn.relu)
self.bn = BatchNorm()
self.dense2 = Dense(48, 2, (3, 4), dense_layouts[1])
def __call__(self, x):
y = x
y = self.dense1(y)
y = self.bn(y)
y = self.dense2(y)
return y
```
The trade-off between correctness in layout deduction constraints and simplicity of API is a common design point of APIs that uses DTensor.
It is also possible to capture the dependency between `Layout`'s with a different API. For example, the `MLPStricter` class creates the `Layout` objects in the constructor.
```
class MLPStricter(tf.Module):
def __init__(self, mesh, input_mesh_dim, inner_mesh_dim1, output_mesh_dim):
super().__init__()
self.dense1 = Dense(
1200, 48, (1, 2), dtensor.Layout([input_mesh_dim, inner_mesh_dim1], mesh),
activation=tf.nn.relu)
self.bn = BatchNorm()
self.dense2 = Dense(48, 2, (3, 4), dtensor.Layout([inner_mesh_dim1, output_mesh_dim], mesh))
def __call__(self, x):
y = x
y = self.dense1(y)
y = self.bn(y)
y = self.dense2(y)
return y
```
To make sure the model runs, probe your model with fully replicated layouts and a fully replicated batch of `'x'` input.
```
WORLD = dtensor.create_mesh([("world", 8)], devices=DEVICES)
model = MLP([dtensor.Layout.replicated(WORLD, rank=2),
dtensor.Layout.replicated(WORLD, rank=2)])
sample_x, sample_y = train_data_vec.take(1).get_single_element()
sample_x = dtensor.copy_to_mesh(sample_x, dtensor.Layout.replicated(WORLD, rank=2))
print(model(sample_x))
```
## Moving data to the device
Usually, `tf.data` iterators (and other data fetching methods) yield tensor objects backed by the local host device memory. This data must be transferred to the accelerator device memory that backs DTensor's component tensors.
`dtensor.copy_to_mesh` is unsuitable for this situation because it replicates input tensors to all devices due to DTensor's global perspective. So in this tutorial, you will use a helper function `repack_local_tensor`, to facilitate the transfer of data. This helper function uses `dtensor.pack` to send (and only send) the shard of the global batch that is intended for a replica to the device backing the replica.
This simplified function assumes single-client. Determining the correct way to split the local tensor and the mapping between the pieces of the split and the local devices can be laboring in a multi-client application.
Additional DTensor API to simplify `tf.data` integration is planned, supporting both single-client and multi-client applications. Please stay tuned.
```
def repack_local_tensor(x, layout):
"""Repacks a local Tensor-like to a DTensor with layout.
This function assumes a single-client application.
"""
x = tf.convert_to_tensor(x)
sharded_dims = []
# For every sharded dimension, use tf.split to split the along the dimension.
# The result is a nested list of split-tensors in queue[0].
queue = [x]
for axis, dim in enumerate(layout.sharding_specs):
if dim == dtensor.UNSHARDED:
continue
num_splits = layout.shape[axis]
queue = tf.nest.map_structure(lambda x: tf.split(x, num_splits, axis=axis), queue)
sharded_dims.append(dim)
# Now we can build the list of component tensors by looking up the location in
# the nested list of split-tensors created in queue[0].
components = []
for locations in layout.mesh.local_device_locations():
t = queue[0]
for dim in sharded_dims:
split_index = locations[dim] # Only valid on single-client mesh.
t = t[split_index]
components.append(t)
return dtensor.pack(components, layout)
```
## Data parallel training
In this section, you will train your MLP model with data parallel training. The following sections will demonstrate model parallel training and spatial parallel training.
Data parallel training is a commonly used scheme for distributed machine learning:
- Model variables are replicated on N devices each.
- A global batch is split into N per-replica batches.
- Each per-replica batch is trained on the replica device.
- The gradient is reduced before weight up data is collectively performed on all replicas.
Data parallel training provides nearly linear speedup regarding the number of devices.
### Creating a data parallel mesh
A typical data parallelism training loop uses a DTensor `Mesh` that consists of a single `batch` dimension, where each device becomes a replica that receives a shard from the global batch.
<img src="https://www.tensorflow.org/images/dtensor/dtensor_data_para.png" alt="Data parallel mesh" class="no-filter">
The replicated model runs on the replica, therefore the model variables are fully replicated (unsharded).
```
mesh = dtensor.create_mesh([("batch", 8)], devices=DEVICES)
model = MLP([dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh),
dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh),])
```
### Packing training data to DTensors
The training data batch should be packed into DTensors sharded along the `'batch'`(first) axis, such that DTensor will evenly distribute the training data to the `'batch'` mesh dimension.
**Note**: In DTensor, the `batch size` always refers to the global batch size. The batch size should be chosen such that it can be divided evenly by the size of the `batch` mesh dimension.
```
def repack_batch(x, y, mesh):
x = repack_local_tensor(x, layout=dtensor.Layout(['batch', dtensor.UNSHARDED], mesh))
y = repack_local_tensor(y, layout=dtensor.Layout(['batch'], mesh))
return x, y
sample_x, sample_y = train_data_vec.take(1).get_single_element()
sample_x, sample_y = repack_batch(sample_x, sample_y, mesh)
print('x', sample_x[:, 0])
print('y', sample_y)
```
### Training step
This example uses a Stochastic Gradient Descent optimizer with the Custom Training Loop (CTL). Consult the [Custom Training Loop guide](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch) and [Walk through](https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough) for more information on those topics.
The `train_step` is encapsulated as a `tf.function` to indicate this body is to be traced as a TensorFlow Graph. The body of `train_step` consists of a forward inference pass, a backward gradient pass, and the variable update.
Note that the body of `train_step` does not contain any special DTensor annotations. Instead, `train_step` only contains high-level TensorFlow operations that process the input `x` and `y` from the global view of the input batch and the model. All of the DTensor annotations (`Mesh`, `Layout`) are factored out of the train step.
```
# Refer to the CTL (custom training loop guide)
@tf.function
def train_step(model, x, y, learning_rate=tf.constant(1e-4)):
with tf.GradientTape() as tape:
logits = model(x)
# tf.reduce_sum sums the batch sharded per-example loss to a replicated
# global loss (scalar).
loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y))
parameters = model.trainable_variables
gradients = tape.gradient(loss, parameters)
for parameter, parameter_gradient in zip(parameters, gradients):
parameter.assign_sub(learning_rate * parameter_gradient)
# Define some metrics
accuracy = 1.0 - tf.reduce_sum(tf.cast(tf.argmax(logits, axis=-1, output_type=tf.int64) != y, tf.float32)) / x.shape[0]
loss_per_sample = loss / len(x)
return {'loss': loss_per_sample, 'accuracy': accuracy}
```
### Checkpointing
You can checkpoint a DTensor model using `dtensor.DTensorCheckpoint`. The format of a DTensor checkpoint is fully compatible with a Standard TensorFlow Checkpoint. There is ongoing work to consolidate `dtensor.DTensorCheckpoint` into `tf.train.Checkpoint`.
When a DTensor checkpoint is restored, `Layout`s of variables can be different from when the checkpoint is saved. This tutorial makes use of this feature to continue the training in the Model Parallel training and Spatial Parallel training sections.
```
CHECKPOINT_DIR = tempfile.mkdtemp()
def start_checkpoint_manager(mesh, model):
ckpt = dtensor.DTensorCheckpoint(mesh, root=model)
manager = tf.train.CheckpointManager(ckpt, CHECKPOINT_DIR, max_to_keep=3)
if manager.latest_checkpoint:
print("Restoring a checkpoint")
ckpt.restore(manager.latest_checkpoint).assert_consumed()
else:
print("new training")
return manager
```
### Training loop
For the data parallel training scheme, train for epochs and report the progress. 3 epochs is insufficient for training the model -- an accuracy of 50% is as good as randomly guessing.
Enable checkpointing so that you can pick up the training later. In the following section, you will load the checkpoint and train with a different parallel scheme.
```
num_epochs = 2
manager = start_checkpoint_manager(mesh, model)
for epoch in range(num_epochs):
step = 0
pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()), stateful_metrics=[])
metrics = {'epoch': epoch}
for x,y in train_data_vec:
x, y = repack_batch(x, y, mesh)
metrics.update(train_step(model, x, y, 1e-2))
pbar.update(step, values=metrics.items(), finalize=False)
step += 1
manager.save()
pbar.update(step, values=metrics.items(), finalize=True)
```
## Model Parallel Training
If you switch to a 2 dimensional `Mesh`, and shard the model variables along the second mesh dimension, then the training becomes Model Parallel.
In Model Parallel training, each model replica spans multiple devices (2 in this case):
- There are 4 model replicas, and the training data batch is distributed to the 4 replicas.
- The 2 devices within a single model replica receive replicated training data.
<img src="https://www.tensorflow.org/images/dtensor/dtensor_model_para.png" alt="Model parallel mesh" class="no-filter">
```
mesh = dtensor.create_mesh([("batch", 4), ("model", 2)], devices=DEVICES)
model = MLP([dtensor.Layout([dtensor.UNSHARDED, "model"], mesh),
dtensor.Layout(["model", dtensor.UNSHARDED], mesh)])
```
As the training data is still sharded along the batch dimension, you can reuse the same `repack_batch` function as the Data Parallel training case. DTensor will automatically replicate the per-replica batch to all devices inside the replica along the `"model"` mesh dimension.
```
def repack_batch(x, y, mesh):
x = repack_local_tensor(x, layout=dtensor.Layout(['batch', dtensor.UNSHARDED], mesh))
y = repack_local_tensor(y, layout=dtensor.Layout(['batch'], mesh))
return x, y
```
Next run the training loop. The training loop reuses the same checkpoint manager as the Data Parallel training example, and the code looks identical.
You can continue training the data parallel trained model under model parallel training.
```
num_epochs = 2
manager = start_checkpoint_manager(mesh, model)
for epoch in range(num_epochs):
step = 0
pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()))
metrics = {'epoch': epoch}
for x,y in train_data_vec:
x, y = repack_batch(x, y, mesh)
metrics.update(train_step(model, x, y, 1e-2))
pbar.update(step, values=metrics.items(), finalize=False)
step += 1
manager.save()
pbar.update(step, values=metrics.items(), finalize=True)
```
## Spatial Parallel Training
When training data of very high dimensionality (e.g. a very large image or a video), it may be desirable to shard along the feature dimension. This is called [Spatial Partitioning](https://cloud.google.com/blog/products/ai-machine-learning/train-ml-models-on-large-images-and-3d-volumes-with-spatial-partitioning-on-cloud-tpus), which was first introduced into TensorFlow for training models with large 3-d input samples.
<img src="https://www.tensorflow.org/images/dtensor/dtensor_spatial_para.png" alt="Spatial parallel mesh" class="no-filter">
DTensor also supports this case. The only change you need to do is to create a Mesh that includes a `feature` dimension, and apply the corresponding `Layout`.
```
mesh = dtensor.create_mesh([("batch", 2), ("feature", 2), ("model", 2)], devices=DEVICES)
model = MLP([dtensor.Layout(["feature", "model"], mesh),
dtensor.Layout(["model", dtensor.UNSHARDED], mesh)])
```
Shard the input data along the `feature` dimension when packing the input tensors to DTensors. You do this with a slightly different repack function, `repack_batch_for_spt`, where `spt` stands for Spatial Parallel Training.
```
def repack_batch_for_spt(x, y, mesh):
# Shard data on feature dimension, too
x = repack_local_tensor(x, layout=dtensor.Layout(["batch", 'feature'], mesh))
y = repack_local_tensor(y, layout=dtensor.Layout(["batch"], mesh))
return x, y
```
The Spatial parallel training can also continue from a checkpoint created with other parallell training schemes.
```
num_epochs = 2
manager = start_checkpoint_manager(mesh, model)
for epoch in range(num_epochs):
step = 0
metrics = {'epoch': epoch}
pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()))
for x, y in train_data_vec:
x, y = repack_batch_for_spt(x, y, mesh)
metrics.update(train_step(model, x, y, 1e-2))
pbar.update(step, values=metrics.items(), finalize=False)
step += 1
manager.save()
pbar.update(step, values=metrics.items(), finalize=True)
```
## SavedModel and DTensor
The integration of DTensor and SavedModel is still under development. This section only describes the current status quo for TensorFlow 2.9.0.
As of TensorFlow 2.9.0, `tf.saved_model` only accepts DTensor models with fully replicated variables.
As a workaround, you can convert a DTensor model to a fully replicated one by reloading a checkpoint. However, after a model is saved, all DTensor annotations are lost and the saved signatures can only be used with regular Tensors, not DTensors.
```
mesh = dtensor.create_mesh([("world", 1)], devices=DEVICES[:1])
mlp = MLP([dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh),
dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh)])
manager = start_checkpoint_manager(mesh, mlp)
model_for_saving = tf.keras.Sequential([
text_vectorization,
mlp
])
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def run(inputs):
return {'result': model_for_saving(inputs)}
tf.saved_model.save(
model_for_saving, "/tmp/saved_model",
signatures=run)
```
As of TensorFlow 2.9.0, you can only call a loaded signature with a regular Tensor, or a fully replicated DTensor (which will be converted to a regular Tensor).
```
sample_batch = train_data.take(1).get_single_element()
sample_batch
loaded = tf.saved_model.load("/tmp/saved_model")
run_sig = loaded.signatures["serving_default"]
result = run_sig(sample_batch['text'])['result']
np.mean(tf.argmax(result, axis=-1) == sample_batch['label'])
```
## What's next?
This tutorial demonstrated building and training an MLP sentiment analysis model with DTensor.
Through `Mesh` and `Layout` primitives, DTensor can transform a TensorFlow `tf.function` to a distributed program suitable for a variety of training schemes.
In a real-world machine learning application, evaluation and cross-validation should be applied to avoid producing an over-fitted model. The techniques introduced in this tutorial can also be applied to introduce parallelism to evaluation.
Composing a model with `tf.Module` from scratch is a lot of work, and reusing existing building blocks such as layers and helper functions can drastically speed up model development.
As of TensorFlow 2.9, all Keras Layers under `tf.keras.layers` accepts DTensor layouts as their arguments, and can be used to build DTensor models. You can even directly reuse a Keras model with DTensor without modifying the model implementation. Refer to the [DTensor Keras Integration Tutorial](https://www.tensorflow.org/tutorials/distribute/dtensor_keras_tutorial) for information on using DTensor Keras.
| github_jupyter |
SPARQL Transformer evaluation
=========================
This notebook contains some quantitative measures for the evaluation of SPARQL Transformer.
```
import json
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ipywidgets import FloatProgress
from IPython.display import display
from SPARQLWrapper import SPARQLWrapper, JSON
from SPARQLTransformer import sparqlTransformer
input_folder = './sparql'
ENDPOINT = 'http://0.0.0.0:7790/sparql'
# ENDPOINT = 'http://dbpedia.org/sparql'
json_queries_files = list(filter(lambda x: x.endswith('.json'), os.listdir(input_folder)))
json_queries_files.sort()
rq_queries_files = [f.replace('.json', '.rq') for f in json_queries_files]
json_queries = [json.load(open('%s/%s' % (input_folder, f), 'r')) for f in json_queries_files]
rq_queries = [open('%s/%s' % (input_folder, f), 'r').read() for f in rq_queries_files]
json_queries_files
```
The test queries have been taken from the __[DBpedia wiki](https://wiki.dbpedia.org/OnlineAccess)__.
Those SELECT queries have been manually converted in json query, making sure that the transformed query was equal to the original one (variable names apart).
The following table shows, for each query:
- `n vars`, how many variable are selected
- `levels`, how many levels are present in the json prototype, considered that `1` refers to a flat object (all properties attached to the root) and `2` at one level of nested object
- `features` included in the query
| name | n vars | levels | features |
|--------------------------|--------|--------|----------------------|
|1.Born_in_Berlin | 4 | 1 | filter, orderby |
|2.German_musicians | 4 | 1 | lang filter, optional|
|3.Musicians_born_in_Berlin| 4 | 1 | lang filter |
|4.Soccer_players | 5 | 2 | filter, orderby |
|5.Games | 2 | 1 | orderby |
Functions for executing the query and returning the bindings.
- For JSON queries, we use **SPARQLTransformer**.
- For SPARQL queries, we use **SPARQLWrapper** (which is also internally used by SPARQLTransformer).
```
def sparql_exec(query):
sparql = SPARQLWrapper(ENDPOINT)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
return result["results"]["bindings"]
def json_exec(query, debug=False):
return sparqlTransformer(query, {'endpoint': ENDPOINT, 'debug': debug})
```
Functions for running the test for a particular query (sparql or json).
The test measure the **execution time** of the query (including any parsing task) and the **number of results**.
```
def test_atom(query, typ='sparql'):
start = time.time()
if typ == 'sparql':
r = sparql_exec(query)
else:
r = json_exec(query)
end = time.time()
timing = end - start
return len(r), timing
```
We will execute the test multiple times for each query, to obtain an average result as much as possible not correlated to the network/server workload.
In particular, each test would be executed `num_iteration` times. Each couple of consecutive iteration will be separated by `sleep_time` seconds.
```
num_iteration = 100
sleep_time = 5
def mean_without_outliers(x):
df = pd.DataFrame(x)
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
return float(df[(df >= Q1-1.5*IQR ) | (df <= Q3+1.5*IQR)].mean())
test_results = []
all_timings = []
for i, json_query in enumerate(json_queries):
# queries
json_query = json_queries[i]
rq_query = rq_queries[i]
title = rq_queries_files[i].replace('.rq', '')
print(title)
# progress bars
fs = FloatProgress(min=0, max=num_iteration, description='SPARQL test:')
display(fs)
fj = FloatProgress(min=0, max=num_iteration, description='JSON test:')
display(fj)
sparql_time = []
sparql_results = 0
json_time = []
json_results = 0
for j in np.arange(num_iteration):
if (i + j) > 0 :
time.sleep(sleep_time)
sparql_results, t = test_atom(rq_query, typ='sparql')
sparql_time.append(t)
fs.value += 1
for j in np.arange(num_iteration):
time.sleep(sleep_time)
json_results, t = test_atom(json_query, typ='json')
json_time.append(t)
fj.value += 1
ts = np.mean(sparql_time)
tj = np.mean(json_time)
time_diff = (tj - ts)
time_diff_percent = 100 * time_diff / np.mean([ts,tj])
test_results.append({
'name': title,
'time_sparql': ts,
'result_sparql': sparql_results,
'time_json': tj ,
'result_json': json_results,
'time_diff': '{0:.2g}'.format(time_diff),
'time_diff_percent': '{0:.2g}%'.format(time_diff_percent)
});
all_timings.append({
'name': title,
'json': json_time,
'sparql': sparql_time
})
```
Those plots show that over the whole test, some query tooks much longer to be executed. The **outliers** are clearly visible as dots.
When computing the mean, we excluded all the outliers, where an outlier stands outside the IQR (see [definition](https://www.purplemath.com/modules/boxwhisk3.htm)).
```
for i, json_query in enumerate(json_queries):
tim = all_timings[i]
a = np.array([np.hstack(tim['sparql']), np.hstack(tim['json'])]).transpose()
df = pd.DataFrame(a, columns=['SPARQL', 'JSON'])
bp = df.boxplot(vert=False, figsize=(16,4))
fig = np.asarray(bp).reshape(-1)[0].get_figure()
fig.suptitle(tim['name'])
plt.show()
pd.DataFrame.from_dict(test_results)
```
The table give us two different informations.
#### Time difference
The execution time of JSON queries (`time_json`) is quite close to the one of SPARQL ones (`time_sparql`). The difference in percentage (`time_diff`) never overcomes few hundredths of a second.
#### Result difference
The number of results (bindings) returned by SPARQL Transformer (`result_json`) is always lower than the ones returned by the endpoint (`result_json`). This is due to the fact that the latter represents all the combination of values as distinct bindings, while the former aggregates the results with the same id.
### Example of result for `1.Born_in_Berlin`.
An interest case is the 2nd result about [Prince Adalbert of Prussia](http://dbpedia.org/resource/Prince_Adalbert_of_Prussia_(1811โ1873)), which has 4 names and 2 differently formatted death date. This is represented with 4 * 2 = 8 bindings, then merged with SPARQL Transformer
```
# SPARQL query
sparql_exec(rq_queries[0])[1:9]
# SPARQL query
json_exec(json_queries[0])[1]
test_results
```
| github_jupyter |
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
from datetime import datetime
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "../output_data/cities.csv"
output_data_file_2 = "../output_data/cities_clean.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
plt.ioff()
# Save config information.
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
# Build partial query URL
query_url = f"{url}appid={weather_api_key}&units={units}&q="
```
## Generate Cities List
```
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
#assign list variables to build data table.
City = []
Lat = []
Lng = []
Max_Temp= []
Humidity= []
Cloudiness= []
Wind_Speed= []
Country = []
Date = []
cities
#cities = ['new norfolk', 'barrow', 'barentsburg', 'staromaryevka', 'thompson', 'yumen', 'bathsheba',\
# 'ushuaia', 'yar-sale', 'nishihara', 'leningradskiy', 'iqaluit', 'severo-kurilsk']
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
url = "http://api.openweathermap.org/data/2.5/weather?"
group_item_number = np.uint8(5)
i = np.uint8(0)
j = np.uint8(0)
item = np.uint16(0)
if len(cities)%group_item_number:
group_number = int(len(cities) / group_item_number) + 1
else:
group_number = len(cities) / group_item_number
for i in range(0, group_number):
j=0
while ((j < group_item_number) & (item < (len(cities)-1))):
item = i * (group_item_number) + j
city = cities[item]
print(f'Processing record {item} of group {i} item {j} | {city}')
try:
response = requests.get(query_url + city).json()
#City.append()
Lat.append(response['coord']['lat'])
Lng.append(response['coord']['lon'])
Max_Temp.append(response['main']['temp_max'])
Humidity.append(response['main']['humidity'])
Cloudiness.append(response['clouds']['all'])
Wind_Speed.append(response['wind']['speed'])
Country.append(response['sys']['country'])
Date.append(response['dt'])
except KeyError:
print('Data for ',city,' not available. Skipping......')
Lat.append(np.nan)
Lng.append(np.nan)
Max_Temp.append(np.nan)
Humidity.append(np.nan)
Cloudiness.append(np.nan)
Wind_Speed.append(np.nan)
Country.append(np.nan)
Date.append(np.nan)
j = j + 1
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
main_df = pd.DataFrame({'city' : cities, 'Latitude' : Lat, 'Longitude': Lng, "Max temp, F": Max_Temp, "Humidity %" : Humidity, \
"Cloudiness" : Cloudiness, 'Wind Speed, mph' : Wind_Speed, 'Country' : Country , 'Date' : Date})
len(main_df)
main_df
#clean up data by removing rows with NaN
main_df = main_df.drop(labels = main_df[pd.isna(main_df['Latitude'])]['Latitude'].index)
main_df.reset_index(drop = True)
main_df.to_csv(output_data_file)
```
## Inspect the data and remove the cities where the humidity > 100%.
----
Skip this step if there are no cities that have humidity > 100%.
```
#there are no cities with humidity over 100%
main_df.loc[(main_df['Humidity %'] > 100)][:]
# Get the indices of cities that have humidity over 100%.
humidity_index = main_df.loc[(main_df['Humidity %'] > 100)].index
humidity_index
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
clean_city_data = main_df.drop(index = humidity_index)
clean_city_data
#This line is here because I had one datapoint with a 800 mph wind speed.
clean_city_data.loc[clean_city_data['Wind Speed, mph'] > 100,'Wind Speed, mph'] = 0
# Extract relevant fields from the data frame
# Export the City_Data into a csv
clean_city_data.to_csv(output_data_file_2, index = False)
```
## Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
## Latitude vs. Temperature Plot
```
now = datetime.now()
date = now.strftime("%m/%d/%Y")
temperature_title = "Latitude versus Temperature\n"+date
clean_city_data.plot(x = 'Latitude', y = 'Max temp, F', kind='scatter', title = temperature_title)
plt.show()
```
pretty clear correlation between latitude and temperature.
## Latitude vs. Humidity Plot
```
humidity_title = "Latitude versus Humidity\n"+date
clean_city_data.plot(x = 'Latitude', y = 'Humidity %', kind='scatter', title = humidity_title)
plt.show()
```
Weak correlation between Humidity and latitude
## Latitude vs. Cloudiness Plot
```
cloudiness_title = "Latitude versus Cloudiness\n"+date
clean_city_data.plot(x = 'Latitude', y = 'Cloudiness', kind='scatter', title = cloudiness_title)
plt.show()
```
No correlation between latitude and Cloudiness
## Latitude vs. Wind Speed Plot
```
wind_speed_title = "Latitude versus Wind Speed\n"+date
clean_city_data.plot(x = 'Latitude', y = 'Wind Speed, mph', kind='scatter', title = wind_speed_title)
plt.show()
```
No correlation between Wind Speed and latitude
## Linear Regression
```
# OPTIONAL: Create a function to create Linear Regression plots
def linear_regression(x_values, y_values, graph_title):
slope, intercept, r_value, p_value, std_err = linregress(x_values, y_values)
plt.scatter(x = x_values, y = y_values)
return
# Create Northern and Southern Hemisphere DataFrames
#I created a dataframe of dataframes. One is 'north' and the other 'south'
city_north_df = clean_city_data.loc[clean_city_data['Latitude'] >= 0][:]
city_south_df = clean_city_data.loc[clean_city_data['Latitude'] <= 0][:]
city_north_df.head()
city_south_df.head()
data_df = {'North' : city_north_df, 'South' : city_south_df}
```
#### Generate all graphs
```
#these 2 lists are used in for loops to go through each graph possibility and plot it.
hemisphere_list = ['North', 'South']
y_axis_data_list = ['Max temp, F', 'Humidity %', 'Cloudiness', 'Wind Speed, mph']
#this function simply takes in the slope, intercept and max and min latitudes
#it returns the corresponding y_values for plotting
def y_values(slope, intercept, x_max, x_min):
y_max = slope * x_max + intercept
y_min = slope * x_min + intercept
return (y_max, y_min)
#main graphing function to complete all graphs.
def graph_function(hemisphere, y_axis_data):
plt.figure()
plt.scatter(x = data_df[hemisphere]['Latitude'], y = data_df[hemisphere][y_axis_data])
plt.title('{}ern Hemisphere - {} vs. Latitude\n Linear Regression'.format(hemisphere, y_axis_data))
slope, intercept, r_value, p_value, std_err = linregress(x = data_df[hemisphere]['Latitude'], y = data_df[hemisphere][y_axis_data])
r_squared = r_value ** 2
x_max = data_df[hemisphere]['Latitude'].max()
x_min = data_df[hemisphere]['Latitude'].min()
y_max, y_min = y_values(slope, intercept, x_max, x_min)
x_line = [x_max, x_min]
y_line = [y_max, y_min]
#print(y_max)
plt.plot(x_line, y_line, color = 'red')
text_equation = 'y = {:.2f} * x + {:.2f}\nr**2 = {:.4f}'.format(slope, intercept, r_squared)
plt.text(x_min, y_max,text_equation, color = 'r', bbox=dict(facecolor='w', alpha=0.5))
plt.legend(['correlation', 'raw data'] , bbox_to_anchor=(1.05, 1), loc = 'upper left')
filename = "./plots/graph%sern_hemisphere_and_%s.png" % (hemisphere, y_axis_data)
plt.savefig(filename, bbox_inches='tight')
#this line is key to making the graphs plot one after another
plt.show()
#This loop goes through both hemispheres and then all of the y-axis data that is required.
#values are defined in lists that that then reference the dataframe of dataframes. One for nothern
#and one for southern hemispheres.
for hemisphere in hemisphere_list:
for y_axis_data in y_axis_data_list:
graph_function(hemisphere, y_axis_data)
```
The graphs pretty much speak for themselves. Only observed correlation with Max Temperature. All other variables seem to not correlate.
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
| github_jupyter |
```
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
access_token = "your_access_token"
access_token_secret = "your_access_secret_token"
consumer_key = "your_consumer_key"
consumer_secret = "your_consumer_secret"
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
print(data)
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'honda'
stream.filter(track=['honda','Honda','HONDA'])
```
### Then from your terminal, execute this script with output piped to a text file: your_script.py > tweets_data.txt
# Then run this script below to create a Python dataframe of the tweets data
```
%matplotlib inline
import json
import string
import pandas as pd
import matplotlib.pyplot as plt
from os import path
pd.set_option("display.max_rows",1000)
pd.set_option("display.max_columns",20)
pd.set_option("display.max_colwidth",150)
d = path.dirname('/home/pybokeh/temp/')
tweets_data = []
tweets_file = open(path.join(d, 'cancer_tweets_data.txt'),'r')
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
print(len(tweets_data))
tweets = pd.DataFrame()
tweets['text'] = [tweet['text'] for tweet in tweets_data]
tweets['lang'] = [tweet['lang'] for tweet in tweets_data]
tweets['retweeted'] = [tweet['retweeted'] for tweet in tweets_data]
tweets.head()
english_tweets = tweets[(tweets['lang']=='en') & (tweets['retweeted']==False)]
english_tweets.drop_duplicates(subset='text');
text = ''
for line in english_tweets['text']:
text = text + ' ' + line
text = text.replace("'s",'')
%matplotlib inline
from os import path
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
d = path.dirname('/home/pybokeh/Downloads/')
# Read the whole text.
#text = strWords
#text = open(path.join(d, 'alice.txt')).read()
additional_words = [
'rt',
'ebay',
'co',
't',
'amp',
'https'
]
for word in additional_words:
STOPWORDS.add(word)
# read the mask image
# taken from
# http://www.stencilry.org/stencils/movies/alice%20in%20wonderland/255fk.jpg
#honda_mask = imread(path.join(d, "honda_logo_mask.png"), flatten=True)
#wc = WordCloud(background_color="black", max_words=2000, mask=honda_mask, stopwords=STOPWORDS)
# generate word cloud
wc = WordCloud(width=800, height=600).generate(text)
# store to file
wc.to_file(path.join(d, "cancer_word_cloud.png"))
# show
plt.imshow(wc)
plt.axis("off")
#plt.figure()
#plt.imshow(honda_mask, cmap=plt.cm.gray)
#plt.axis("off")
plt.show()
prevent = tweets[(tweets['text'].str.contains('food')) | (tweets['text'].str.contains('nutrient'))]
prevent['text']
wc.process_text(text)[:50]
STOPWORDS
```
| github_jupyter |
# Numerical Stability and Initialization
:label:`sec_numerical_stability`
Thus far, every model that we have implemented
required that we initialize its parameters
according to some pre-specified distribution.
Until now, we took the initialization scheme for granted,
glossing over the details of how these choices are made.
You might have even gotten the impression that these choices
are not especially important.
To the contrary, the choice of initialization scheme
plays a significant role in neural network learning,
and it can be crucial for maintaining numerical stability.
Moreover, these choices can be tied up in interesting ways
with the choice of the nonlinear activation function.
Which function we choose and how we initialize parameters
can determine how quickly our optimization algorithm converges.
Poor choices here can cause us to encounter
exploding or vanishing gradients while training.
In this section, we delve into these topics with greater detail
and discuss some useful heuristics
that you will find useful
throughout your career in deep learning.
## Vanishing and Exploding Gradients
Consider a deep network with $L$ layers,
input $\mathbf{x}$ and output $\mathbf{o}$.
With each layer $l$ defined by a transformation $f_l$
parameterized by weights $\mathbf{W}^{(l)}$,
whose hidden variable is $\mathbf{h}^{(l)}$ (let $\mathbf{h}^{(0)} = \mathbf{x}$),
our network can be expressed as:
$$\mathbf{h}^{(l)} = f_l (\mathbf{h}^{(l-1)}) \text{ and thus } \mathbf{o} = f_L \circ \ldots \circ f_1(\mathbf{x}).$$
If all the hidden variables and the input are vectors,
we can write the gradient of $\mathbf{o}$ with respect to
any set of parameters $\mathbf{W}^{(l)}$ as follows:
$$\partial_{\mathbf{W}^{(l)}} \mathbf{o} = \underbrace{\partial_{\mathbf{h}^{(L-1)}} \mathbf{h}^{(L)}}_{ \mathbf{M}^{(L)} \stackrel{\mathrm{def}}{=}} \cdot \ldots \cdot \underbrace{\partial_{\mathbf{h}^{(l)}} \mathbf{h}^{(l+1)}}_{ \mathbf{M}^{(l+1)} \stackrel{\mathrm{def}}{=}} \underbrace{\partial_{\mathbf{W}^{(l)}} \mathbf{h}^{(l)}}_{ \mathbf{v}^{(l)} \stackrel{\mathrm{def}}{=}}.$$
In other words, this gradient is
the product of $L-l$ matrices
$\mathbf{M}^{(L)} \cdot \ldots \cdot \mathbf{M}^{(l+1)}$
and the gradient vector $\mathbf{v}^{(l)}$.
Thus we are susceptible to the same
problems of numerical underflow that often crop up
when multiplying together too many probabilities.
When dealing with probabilities, a common trick is to
switch into log-space, i.e., shifting
pressure from the mantissa to the exponent
of the numerical representation.
Unfortunately, our problem above is more serious:
initially the matrices $\mathbf{M}^{(l)}$ may have a wide variety of eigenvalues.
They might be small or large, and
their product might be *very large* or *very small*.
The risks posed by unstable gradients
go beyond numerical representation.
Gradients of unpredictable magnitude
also threaten the stability of our optimization algorithms.
We may be facing parameter updates that are either
(i) excessively large, destroying our model
(the *exploding gradient* problem);
or (ii) excessively small
(the *vanishing gradient* problem),
rendering learning impossible as parameters
hardly move on each update.
### Vanishing Gradients
One frequent culprit causing the vanishing gradient problem
is the choice of the activation function $\sigma$
that is appended following each layer's linear operations.
Historically, the sigmoid function
$1/(1 + \exp(-x))$ (introduced in :numref:`sec_mlp`)
was popular because it resembles a thresholding function.
Since early artificial neural networks were inspired
by biological neural networks,
the idea of neurons that fire either *fully* or *not at all*
(like biological neurons) seemed appealing.
Let us take a closer look at the sigmoid
to see why it can cause vanishing gradients.
```
%matplotlib inline
from d2l import mxnet as d2l
from mxnet import autograd, np, npx
npx.set_np()
x = np.arange(-8.0, 8.0, 0.1)
x.attach_grad()
with autograd.record():
y = npx.sigmoid(x)
y.backward()
d2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))
```
As you can see, the sigmoid's gradient vanishes
both when its inputs are large and when they are small.
Moreover, when backpropagating through many layers,
unless we are in the Goldilocks zone, where
the inputs to many of the sigmoids are close to zero,
the gradients of the overall product may vanish.
When our network boasts many layers,
unless we are careful, the gradient
will likely be cut off at some layer.
Indeed, this problem used to plague deep network training.
Consequently, ReLUs, which are more stable
(but less neurally plausible),
have emerged as the default choice for practitioners.
### Exploding Gradients
The opposite problem, when gradients explode,
can be similarly vexing.
To illustrate this a bit better,
we draw 100 Gaussian random matrices
and multiply them with some initial matrix.
For the scale that we picked
(the choice of the variance $\sigma^2=1$),
the matrix product explodes.
When this happens due to the initialization
of a deep network, we have no chance of getting
a gradient descent optimizer to converge.
```
M = np.random.normal(size=(4, 4))
print('a single matrix', M)
for i in range(100):
M = np.dot(M, np.random.normal(size=(4, 4)))
print('after multiplying 100 matrices', M)
```
### Breaking the Symmetry
Another problem in neural network design
is the symmetry inherent in their parametrization.
Assume that we have a simple MLP
with one hidden layer and two units.
In this case, we could permute the weights $\mathbf{W}^{(1)}$
of the first layer and likewise permute
the weights of the output layer
to obtain the same function.
There is nothing special differentiating
the first hidden unit vs. the second hidden unit.
In other words, we have permutation symmetry
among the hidden units of each layer.
This is more than just a theoretical nuisance.
Consider the aforementioned one-hidden-layer MLP
with two hidden units.
For illustration,
suppose that the output layer transforms the two hidden units into only one output unit.
Imagine what would happen if we initialized
all of the parameters of the hidden layer
as $\mathbf{W}^{(1)} = c$ for some constant $c$.
In this case, during forward propagation
either hidden unit takes the same inputs and parameters,
producing the same activation,
which is fed to the output unit.
During backpropagation,
differentiating the output unit with respect to parameters $\mathbf{W}^{(1)}$ gives a gradient whose elements all take the same value.
Thus, after gradient-based iteration (e.g., minibatch stochastic gradient descent),
all the elements of $\mathbf{W}^{(1)}$ still take the same value.
Such iterations would
never *break the symmetry* on its own
and we might never be able to realize
the network's expressive power.
The hidden layer would behave
as if it had only a single unit.
Note that while minibatch stochastic gradient descent would not break this symmetry,
dropout regularization would!
## Parameter Initialization
One way of addressing---or at least mitigating---the
issues raised above is through careful initialization.
Additional care during optimization
and suitable regularization can further enhance stability.
### Default Initialization
In the previous sections, e.g., in :numref:`sec_linear_concise`,
we used a normal distribution
to initialize the values of our weights.
If we do not specify the initialization method, the framework will
use a default random initialization method, which often works well in practice
for moderate problem sizes.
### Xavier Initialization
:label:`subsec_xavier`
Let us look at the scale distribution of
an output (e.g., a hidden variable) $o_{i}$ for some fully-connected layer
*without nonlinearities*.
With $n_\mathrm{in}$ inputs $x_j$
and their associated weights $w_{ij}$ for this layer,
an output is given by
$$o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j.$$
The weights $w_{ij}$ are all drawn
independently from the same distribution.
Furthermore, let us assume that this distribution
has zero mean and variance $\sigma^2$.
Note that this does not mean that the distribution has to be Gaussian,
just that the mean and variance need to exist.
For now, let us assume that the inputs to the layer $x_j$
also have zero mean and variance $\gamma^2$
and that they are independent of $w_{ij}$ and independent of each other.
In this case, we can compute the mean and variance of $o_i$ as follows:
$$
\begin{aligned}
E[o_i] & = \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\&= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] \\&= 0, \\
\mathrm{Var}[o_i] & = E[o_i^2] - (E[o_i])^2 \\
& = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\
& = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\
& = n_\mathrm{in} \sigma^2 \gamma^2.
\end{aligned}
$$
One way to keep the variance fixed
is to set $n_\mathrm{in} \sigma^2 = 1$.
Now consider backpropagation.
There we face a similar problem,
albeit with gradients being propagated from the layers closer to the output.
Using the same reasoning as for forward propagation,
we see that the gradients' variance can blow up
unless $n_\mathrm{out} \sigma^2 = 1$,
where $n_\mathrm{out}$ is the number of outputs of this layer.
This leaves us in a dilemma:
we cannot possibly satisfy both conditions simultaneously.
Instead, we simply try to satisfy:
$$
\begin{aligned}
\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }
\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}.
\end{aligned}
$$
This is the reasoning underlying the now-standard
and practically beneficial *Xavier initialization*,
named after the first author of its creators :cite:`Glorot.Bengio.2010`.
Typically, the Xavier initialization
samples weights from a Gaussian distribution
with zero mean and variance
$\sigma^2 = \frac{2}{n_\mathrm{in} + n_\mathrm{out}}$.
We can also adapt Xavier's intuition to
choose the variance when sampling weights
from a uniform distribution.
Note that the uniform distribution $U(-a, a)$ has variance $\frac{a^2}{3}$.
Plugging $\frac{a^2}{3}$ into our condition on $\sigma^2$
yields the suggestion to initialize according to
$$U\left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right).$$
Though the assumption for nonexistence of nonlinearities
in the above mathematical reasoning
can be easily violated in neural networks,
the Xavier initialization method
turns out to work well in practice.
### Beyond
The reasoning above barely scratches the surface
of modern approaches to parameter initialization.
A deep learning framework often implements over a dozen different heuristics.
Moreover, parameter initialization continues to be
a hot area of fundamental research in deep learning.
Among these are heuristics specialized for
tied (shared) parameters, super-resolution,
sequence models, and other situations.
For instance,
Xiao et al. demonstrated the possibility of training
10000-layer neural networks without architectural tricks
by using a carefully-designed initialization method :cite:`Xiao.Bahri.Sohl-Dickstein.ea.2018`.
If the topic interests you we suggest
a deep dive into this module's offerings,
reading the papers that proposed and analyzed each heuristic,
and then exploring the latest publications on the topic.
Perhaps you will stumble across or even invent
a clever idea and contribute an implementation to deep learning frameworks.
## Summary
* Vanishing and exploding gradients are common issues in deep networks. Great care in parameter initialization is required to ensure that gradients and parameters remain well controlled.
* Initialization heuristics are needed to ensure that the initial gradients are neither too large nor too small.
* ReLU activation functions mitigate the vanishing gradient problem. This can accelerate convergence.
* Random initialization is key to ensure that symmetry is broken before optimization.
* Xavier initialization suggests that, for each layer, variance of any output is not affected by the number of inputs, and variance of any gradient is not affected by the number of outputs.
## Exercises
1. Can you design other cases where a neural network might exhibit symmetry requiring breaking besides the permutation symmetry in an MLP's layers?
1. Can we initialize all weight parameters in linear regression or in softmax regression to the same value?
1. Look up analytic bounds on the eigenvalues of the product of two matrices. What does this tell you about ensuring that gradients are well conditioned?
1. If we know that some terms diverge, can we fix this after the fact? Look at the paper on layer-wise adaptive rate scaling for inspiration :cite:`You.Gitman.Ginsburg.2017`.
[Discussions](https://discuss.d2l.ai/t/103)
| github_jupyter |
# hello paddle: ไปๆฎ้็จๅบ่ตฐๅๆบๅจๅญฆไน ็จๅบ
**ไฝ่
:** [PaddlePaddle](https://github.com/PaddlePaddle) <br>
**ๆฅๆ:** 2021.12 <br>
**ๆ่ฆ:** ่ฟ็ฏ็คบไพๅไฝ ไป็ปๆฎ้็จๅบ่ทๆบๅจๅญฆไน ็จๅบ็ๅบๅซ๏ผๅนถๅธฆ็ไฝ ็จ้ฃๆกจๆกๆถ๏ผๅฎ็ฐ็ฌฌไธไธชๆบๅจๅญฆไน ็จๅบใ
## ไธใๆฎ้็จๅบ่ทๆบๅจๅญฆไน ็จๅบ็้ป่พๅบๅซ
ไฝไธบไธๅๅผๅ่
๏ผไฝ ๆ็ๆ็ๅผๅงๅญฆไน ไธ้จ็ผ็จ่ฏญ่จ๏ผๆ่
ไธไธชๆทฑๅบฆๅญฆไน ๆกๆถ็ๆนๅผ๏ผๅฏ่ฝๆฏ้่ฟไธไธชhello world็จๅบใ
ๅญฆไน ้ฃๆกจไนๅฏไปฅ่ฟๆ ท๏ผ่ฟ็ฏๅฐ็คบไพๆ็จๅฐไผ้่ฟไธไธช้ๅธธ็ฎๅ็็คบไพๆฅๅไฝ ๅฑ็คบๅฆไฝๅผๅงไฝฟ็จ้ฃๆกจใ
ๆบๅจๅญฆไน ็จๅบ่ท้ๅธธ็็จๅบๆๅคง็ไธๅๆฏ๏ผ้ๅธธ็็จๅบๆฏๅจ็ปๅฎ่พๅ
ฅ็ๆ
ๅตไธ๏ผ้่ฟๅ่ฏ่ฎก็ฎๆบๅค็ๆฐๆฎ็่งๅ๏ผ็ถๅๅพๅฐๅค็ๅ็็ปๆใ่ๆบๅจๅญฆไน ็จๅบๅๆฏๅจๅนถไธ็ฅ้่ฟไบ่งๅ็ๆ
ๅตไธ๏ผ่ฎฉๆบๅจๆฅไปๆฐๆฎๅฝไธญ**ๅญฆไน **ๅบๆฅ่งๅใ
ไฝไธบ็ญ่บซ๏ผๅ
ๆฅ็็้ๅธธ็็จๅบๆๅ็ไบๆ
ใ
็ฐๅจ้ขไธด่ฟๆ ทไธไธชไปปๅก๏ผ
ไนๅๅบ็ง่ฝฆ็ๆถๅ๏ผไผๆไธไธช10ๅ
็่ตทๆญฅไปท๏ผๅช่ฆไธ่ฝฆๅฐฑ้่ฆๆถๅใๅบ็ง่ฝฆๆฏ่ก้ฉถ1ๅ
ฌ้๏ผ้่ฆๅๆฏไปๆฏๅ
ฌ้2ๅ
็่ก้ฉถ่ดน็จใๅฝไธไธชไนๅฎขๅๅฎๅบ็ง่ฝฆไนๅ๏ผ่ฝฆไธ็่ฎกไปทๅจ้่ฆ็ฎๅบๆฅ่ฏฅไนๅฎข้่ฆๆฏไป็ไน่ฝฆ่ดน็จใ
ๅฆๆ็จpythonๆฅๅฎ็ฐ่ฏฅๅ่ฝ๏ผไผๅฆไธๆ็คบ๏ผ
```
def calculate_fee(distance_travelled):
return 10 + 2 * distance_travelled
for x in [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]:
print(calculate_fee(x))
```
ๆฅไธๆฅ๏ผๆ้ฎ้ข็จๅพฎๅๆขไธไธ๏ผ็ฐๅจ็ฅ้ไนๅฎขๆฏๆฌกไนๅๅบ็ง่ฝฆ็ๅ
ฌ้ๆฐ๏ผไน็ฅ้ไนๅฎขๆฏๆฌกไธ่ฝฆ็ๆถๅๆฏไป็ปๅบ็ง่ฝฆๅธๆบ็ๆป่ดน็จใไฝๆฏๅนถไธ็ฅ้ไน่ฝฆ็่ตทๆญฅไปท๏ผไปฅๅๆฏๅ
ฌ้่ก้ฉถ่ดน็จๆฏๅคๅฐใๅธๆ่ฎฉๆบๅจไป่ฟไบๆฐๆฎๅฝไธญๅญฆไน ๅบๆฅ่ฎก็ฎๆป่ดน็จ็่งๅใ
ๆดๅ
ทไฝ็๏ผๆณ่ฆ่ฎฉๆบๅจๅญฆไน ็จๅบ้่ฟๆฐๆฎๅญฆไน ๅบๆฅไธ้ข็ๅ
ฌๅผๅฝไธญ็ๅๆฐ `w` ๅๅๆฐ `b`๏ผ่ฟๆฏไธไธช้ๅธธ็ฎๅ็็คบไพ๏ผๆไปฅ`w`ๅ`b`้ฝๆฏๆตฎ็นๆฐ๏ผ้็ๅฏนๆทฑๅบฆๅญฆไน ไบ่งฃ็ๆทฑๅ
ฅ๏ผไฝ ๅฐไผ็ฅ้`w`ๅ`b`้ๅธธๆ
ๅตไธไผๆฏ็ฉ้ตๅๅ้๏ผใ่ฟๆ ท๏ผๅฝไธๆฌกไน่ฝฆ็ๆถๅ๏ผ็ฅ้ไบ่ก้ฉถ้็จ`distance_travelled`็ๆถๅ๏ผๅฐฑๅฏไปฅไผฐ็ฎๅบๆฅ็จๆท็ๆป่ดน็จ`total_fee`ไบใ
```
total_fee = w * distance_travelled + b
```
ๆฅไธๆฅ๏ผ็็็จ้ฃๆกจๅฆไฝๅฎ็ฐ่ฟไธชhello, world็บงๅซ็ๆบๅจๅญฆไน ็จๅบใ
## ไบใๅฏผๅ
ฅ้ฃๆกจ
ไธบไบ่ฝๅคไฝฟ็จ้ฃๆกจ๏ผ้่ฆๅ
็จpython็`import`่ฏญๅฅๅฏผๅ
ฅ้ฃๆกจ`paddle`ใ
ๅๆถ๏ผไธบไบ่ฝๅคๆดๅฅฝ็ๅฏนๆฐ็ป่ฟ่ก่ฎก็ฎๅๅค็๏ผ่ฟ้่ฆๅฏผๅ
ฅ`numpy`ใ
ๅฆๆไฝ ๆฏๅจๆฌๆบ่ฟ่ก่ฟไธชnotebook๏ผ่ไธ่ฟๆฒกๆๅฎ่ฃ
้ฃๆกจ๏ผ่ฏทๅ
ๅ่ๅฎ็ฝ[ๅฎ่ฃ
](https://www.paddlepaddle.org.cn/install/quick) Paddle 2.2.0ใ
```
import paddle
print("paddle " + paddle.__version__)
```
## ไธใๅๅคๆฐๆฎ
ๅจ่ฟไธชๆบๅจๅญฆไน ไปปๅกไธญ๏ผๅทฒ็ป็ฅ้ไบไนๅฎข็่ก้ฉถ้็จ`distance_travelled`๏ผๅๅฏนๅบ็๏ผ่ฟไบไนๅฎข็ๆป่ดน็จ`total_fee`ใ
้ๅธธๆ
ๅตไธ๏ผๅจๆบๅจๅญฆไน ไปปๅกไธญ๏ผๅ`distance_travelled`่ฟๆ ท็่พๅ
ฅๅผ๏ผไธ่ฌ่ขซ็งฐไธบ`x`๏ผๆ่
็นๅพ`feature`๏ผ๏ผๅ`total_fee`่ฟๆ ท็่พๅบๅผ๏ผไธ่ฌ่ขซ็งฐไธบ`y`๏ผๆ่
ๆ ็ญพ`label`)ใ
ๅฏไปฅ็จ`paddle.to_tensor`ๆ็คบไพๆฐๆฎ่ฝฌๆขไธบpaddle็Tensorๆฐๆฎใ
```
x_data = paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]])
y_data = paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]])
```
## ๅใ็จ้ฃๆกจๅฎไนๆจกๅ็่ฎก็ฎ
ไฝฟ็จ้ฃๆกจๅฎไนๆจกๅ็่ฎก็ฎ็่ฟ็จ๏ผๆฌ่ดจไธ๏ผๆฏ็จpython๏ผ้่ฟ้ฃๆกจๆไพ็API๏ผๆฅๅ่ฏ้ฃๆกจ่ฎก็ฎ่งๅ็่ฟ็จใๅ้กพไธไธ๏ผๆณ่ฆ้่ฟ้ฃๆกจ็จๆบๅจๅญฆไน ๆนๆณ๏ผไปๆฐๆฎๅฝไธญๅญฆไน ๅบๆฅๅฆไธๅ
ฌๅผๅฝไธญ็`w`ๅ`b`ใ่ฟๆ ทๅจๆชๆฅ๏ผ็ปๅฎ`x`ๆถๅฐฑๅฏไปฅไผฐ็ฎๅบๆฅ`y`ๅผ๏ผไผฐ็ฎๅบๆฅ็`y`่ฎฐไธบ`y_predict`๏ผ
```
y_predict = w * x + b
```
ๅฐไผ็จ้ฃๆกจ็็บฟๆงๅๆขๅฑ๏ผ`paddle.nn.Linear`ๆฅๅฎ็ฐ่ฟไธช่ฎก็ฎ่ฟ็จ๏ผ่ฟไธชๅ
ฌๅผ้็ๅ้`x, y, w, b, y_predict`๏ผๅฏนๅบ็้ฃๆกจ้้ข็[Tensorๆฆๅฟต](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/tensor.html)ใ
**็จๅพฎ่กฅๅ
ไธไธ**
ๅจ่ฟ้็็คบไพไธญ๏ผๆ นๆฎ็ป้ช๏ผๅทฒ็ปไบๅ
็ฅ้ไบ`distance_travelled`ๅ`total_fee`ไน้ดๆฏ็บฟๆง็ๅ
ณ็ณป๏ผ่ๅจๆดๅฎ้
็้ฎ้ขๅฝไธญ๏ผ`x`ๅ`y`็ๅ
ณ็ณป้ๅธธๆฏ้็บฟๆง็๏ผๅ ๆญคไนๅฐฑ้่ฆไฝฟ็จๆดๅค็ฑปๅ๏ผไนๆดๅคๆ็็ฅ็ป็ฝ็ปใ(ๆฏๅฆ๏ผBMIๆๆฐ่ทไฝ ็่บซ้ซๅฐฑไธๆฏ็บฟๆงๅ
ณ็ณป๏ผไธๅผ ๅพ็้็ๆไธชๅ็ด ๅผ่ท่ฟไธชๅพ็ๆฏ็ซ่ฟๆฏ็ไนไธๆฏ็บฟๆงๅ
ณ็ณปใ๏ผ
```
linear = paddle.nn.Linear(in_features=1, out_features=1)
```
## ไบใๅๅคๅฅฝ่ฟ่ก้ฃๆกจ
ๆบๅจ๏ผ่ฎก็ฎๆบ๏ผๅจไธๅผๅง็ๆถๅไผ้ไพฟ็`w`ๅ`b`๏ผๅ
็็ๆบๅจ็็ๆไนๆ ทใไฝ ๅบ่ฏฅๅฏไปฅ็ๅฐ๏ผ่ฟๆถๅ็`w`ๆฏไธไธช้ๆบๅผ๏ผ`b`ๆฏ0.0๏ผ่ฟๆฏ้ฃๆกจ็ๅๅงๅ็ญ็ฅ๏ผไนๆฏ่ฟไธช้ขๅๅธธ็จ็ๅๅงๅ็ญ็ฅใ๏ผๅฆๆไฝ ๆฟๆ๏ผไนๅฏไปฅ้็จๅ
ถไป็ๅๅงๅ็ๆนๅผ๏ผไปๅไฝ ไนไผ็ๅฐ๏ผ้ๆฉไธๅ็ๅๅงๅ็ญ็ฅไนๆฏๅฏนไบๅๅฅฝๆทฑๅบฆๅญฆไน ไปปๅกๆฅ่ฏดๅพ้่ฆ็ไธ็น๏ผใ
```
w_before_opt = linear.weight.numpy().item()
b_before_opt = linear.bias.numpy().item()
print("w before optimize: {}".format(w_before_opt))
print("b before optimize: {}".format(b_before_opt))
```
## ๅ
ญใๅ่ฏ้ฃๆกจๆไนๆ ทๅญฆไน
ๅ้ขๅฎไนๅฅฝไบ็ฅ็ป็ฝ็ป๏ผๅฐฝ็ฎกๆฏไธไธชๆ็ฎๅ็็ฅ็ป็ฝ็ป๏ผ๏ผ่ฟ้่ฆๅ่ฏ้ฃๆกจ๏ผๆไนๆ ทๅป**ๅญฆไน **๏ผไป่่ฝๅพๅฐๅๆฐ`w`ๅ`b`ใ
่ฟไธช่ฟ็จ็ฎๅ็ๆฅ้่ฟฐไธไธ๏ผไฝ ๅบ่ฏฅๅฐฑไผๅคง่ดๆ็ฝไบ๏ผๅฐฝ็ฎก่ๅ็็่ฎบๅ็ฅ่ฏ่ฟ้่ฆ้ๆญฅ็ๅปๅญฆไน ๏ผใๅจๆบๅจๅญฆไน /ๆทฑๅบฆๅญฆไน ๅฝไธญ๏ผๆบๅจ๏ผ่ฎก็ฎๆบ๏ผๅจๆๅผๅง็ๆถๅ๏ผๅพๅฐๅๆฐ`w`ๅ`b`็ๆนๅผๆฏ้ไพฟ็ไธไธ๏ผ็จ่ฟ็ง้ไพฟ็ๆตๅพๅฐ็ๅๆฐๅผ๏ผๅป่ฟ่ก่ฎก็ฎ๏ผ้ขๆต๏ผ็ๆถๅ๏ผๅพๅฐ็`y_predict`๏ผ่ทๅฎ้
็`y`ๅผไธๅฎๆฏๆ**ๅทฎ่ท**็ใๆฅไธๆฅ๏ผๆบๅจไผๆ นๆฎ่ฟไธชๅทฎ่ทๆฅ**่ฐๆด`w`ๅ`b`**๏ผ้็่ฟๆ ท็้ๆญฅ็่ฐๆด๏ผ`w`ๅ`b`ไผ่ถๆฅ่ถๆญฃ็กฎ๏ผ`y_predict`่ท`y`ไน้ด็ๅทฎ่ทไนไผ่ถๆฅ่ถๅฐ๏ผไป่ๆ็ป่ฝๅพๅฐๅฅฝ็จ็`w`ๅ`b`ใ่ฟไธช่ฟ็จๅฐฑๆฏๆบๅจ**ๅญฆไน **็่ฟ็จใ
็จๆดๅ ๆๆฏ็่ฏญ่จๆฅ่ฏด๏ผ่กก้**ๅทฎ่ท**็ๅฝๆฐ๏ผไธไธชๅ
ฌๅผ๏ผๅฐฑๆฏๆๅคฑๅฝๆฐ๏ผ็จๆฅ**่ฐๆด**ๅๆฐ็ๆนๆณๅฐฑๆฏไผๅ็ฎๆณใ
ๅจๆฌ็คบไพๅฝไธญ๏ผ็จๆ็ฎๅ็ๅๆน่ฏฏๅทฎ(mean square error)ไฝไธบๆๅคฑๅฝๆฐ(`paddle.nn.MSELoss`)๏ผๅๆๅธธ่ง็ไผๅ็ฎๆณSGD๏ผstocastic gradient descent)ไฝไธบไผๅ็ฎๆณ๏ผไผ ็ป`paddle.optimizer.SGD`็ๅๆฐ`learning_rate`๏ผไฝ ๅฏไปฅ็่งฃไธบๆงๅถๆฏๆฌก่ฐๆด็ๆญฅๅญๅคงๅฐ็ๅๆฐ๏ผใ
```
mse_loss = paddle.nn.MSELoss()
sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters = linear.parameters())
```
## ไธใ่ฟ่กไผๅ็ฎๆณ
ๆฅไธๆฅ๏ผ่ฎฉ้ฃๆกจ่ฟ่กไธไธ่ฟไธชไผๅ็ฎๆณ๏ผ่ฟไผๆฏไธไธชๅ้ขไป็ป่ฟ็้ๆญฅ่ฐๆดๅๆฐ็่ฟ็จ๏ผไฝ ๅบ่ฏฅๅฏไปฅ็ๅฐlossๅผ๏ผ่กก้`y`ๅ`y_predict`็ๅทฎ่ท็`loss`)ๅจไธๆญ็้ไฝใ
```
total_epoch = 5000
for i in range(total_epoch):
y_predict = linear(x_data)
loss = mse_loss(y_predict, y_data)
loss.backward()
sgd_optimizer.step()
sgd_optimizer.clear_grad()
if i%1000 == 0:
print("epoch {} loss {}".format(i, loss.numpy()))
print("finished training๏ผ loss {}".format(loss.numpy()))
```
## ๅ
ซใๆบๅจๅญฆไน ๅบๆฅ็ๅๆฐ
็ป่ฟไบ่ฟๆ ท็ๅฏนๅๆฐ`w`ๅ`b`็่ฐๆด๏ผ**ๅญฆไน **)๏ผๅ้่ฟไธ้ข็็จๅบ๏ผๆฅ็็็ฐๅจ็ๅๆฐๅๆไบๅคๅฐใไฝ ๅบ่ฏฅไผๅ็ฐ`w`ๅๆไบๅพๆฅ่ฟ2.0็ไธไธชๅผ๏ผ`b`ๅๆไบๆฅ่ฟ10.0็ไธไธชๅผใ่ฝ็ถๅนถไธๆฏๆญฃๅฅฝ็2ๅ10๏ผไฝๅดๆฏไปๆฐๆฎๅฝไธญๅญฆไน ๅบๆฅ็่ฟไธ้็ๆจกๅ็ๅๆฐ๏ผๅฏไปฅๅจๆชๆฅ็ๆถๅ๏ผ็จไป่ฟๆนๆฐๆฎๅฝไธญๅญฆไน ๅฐ็ๅๆฐๆฅ้ขไผฐไบใ๏ผๅฆๆไฝ ๆฟๆ๏ผไนๅฏไปฅ้่ฟ่ฎฉๆบๅจๅคๅญฆไน ไธๆฎตๆถ้ด๏ผไป่ๅพๅฐๆดๅ ๆฅ่ฟ2.0ๅ10.0็ๅๆฐๅผใ)
```
w_after_opt = linear.weight.numpy().item()
b_after_opt = linear.bias.numpy().item()
print("w after optimize: {}".format(w_after_opt))
print("b after optimize: {}".format(b_after_opt))
```
## ไนใhello paddle
้่ฟ่ฟไธชๅฐ็คบไพ๏ผๅธๆไฝ ๅทฒ็ปๅๆญฅไบ่งฃไบ้ฃๆกจ๏ผ่ฝๅจๆฅไธๆฅ้็ๅฏน้ฃๆกจ็ๆดๅคๅญฆไน ๏ผๆฅ่งฃๅณๅฎ้
้ๅฐ็้ฎ้ขใ
```
print("hello paddle")
```
| github_jupyter |
```
%run ..\notebooks\Util_func.ipynb
# # Bay Area
# REG = 'BayArea'
# base_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\Processing_20200228\2_tour_extract\wt_wkday'
# allwk_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\Processing_20200228\2_tour_extract\wt_7day'
# raw_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\Processing_20200228\spatial_join'
# out_file = r'out\%s_3_TNCTrips_Purpose.xlsx' %REG
# # SANDAG
# REG = 'SANDAG'
# base_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SANDAG\2_tour_extract\wt_wkday'
# allwk_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SANDAG\2_tour_extract\wt_7day'
# raw_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SANDAG'
# out_file = r'out2\%s_3_TNCTrips_Purpose.xlsx' %REG
# SCAG
REG = 'SCAG'
base_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SCAG_dataset_2020-02-27\2_tour_extract\wt_wkday'
allwk_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SCAG_dataset_2020-02-27\2_tour_extract\wt_7day'
raw_dir = r'Q:\Data\Surveys\HouseholdSurveys\MTC-SFCTA2018\SCAG_dataset_2020-02-27'
out_file = r'out2\%s_3_TNCTrips_Purpose.xlsx' %REG
## Process person records
tmp_df = pd.read_csv(join(raw_dir, 'ex_person_wZones.csv'))
if REG=='SANDAG' or REG=='SCAG':
tmp_df2 = pd.read_csv(join(raw_dir, 'ex2_person.tsv'), sep='\t')
tmp_df = tmp_df.merge(tmp_df2[['hh_id','person_id','raceeth_new_imputed']], how='left')
tmp_df = tmp_df[['hh_id','person_id','person_num','raceeth_new_imputed','income_imputed','gender','age',
'wt_alladult_mon','wt_alladult_tue','wt_alladult_wed','wt_alladult_thu','wt_alladult_fri',
'wt_alladult_sat','wt_alladult_sun']]
tmp_df['person_id'] = tmp_df['person_id'].round().astype('int64')
tmp_df = tmp_df.rename(columns={'raceeth_new_imputed':'raceeth','income_imputed':'hinc'})
tmp_df = tmp_df.rename(columns={'hh_id':'hhno','person_num':'pno'})
per_df = pd.read_csv(join(base_dir,'survey2018_precx.dat'), sep=' ')
per_df = per_df.merge(tmp_df, how='left')
## Process trip records
raw_trips = pd.read_csv(join(raw_dir, 'ex_trip_wZones.csv'))
raw_trips = raw_trips[['hh_id','person_num','trip_num','mode_uber','mode_lyft','mode_type_imputed']]
raw_trips = raw_trips.rename(columns={'hh_id':'hhno','person_num':'pno','trip_num':'tsvid'})
trip_df = pd.read_csv(join(base_dir,'survey2018_tripx.dat'), sep=' ')
req_percols = ['hhno','pno','raceeth','hinc','gender','age',
'wt_alladult_mon','wt_alladult_tue','wt_alladult_wed','wt_alladult_thu','wt_alladult_fri',
'wt_alladult_sat','wt_alladult_sun']
def prep_df(df):
df = link_dt(df)
df = df.loc[df['mode']==9, ]
df = df.merge(raw_trips, how='left')
df = df[df['mode_type_imputed']!=4] #remove taxi trips
df['tnc_type'] = 3 # prem/other
df.loc[(df['mode_uber']==1) | (df['mode_lyft']==1), 'tnc_type'] = 1 # pooled
df.loc[(df['mode_uber']==2) | (df['mode_lyft']==2), 'tnc_type'] = 2 # regular
df = df.merge(per_df[req_percols],
how='left', on=['hhno','pno'])
df.loc[df['gender']==997, 'gender'] = 5 #Other
df.loc[df['gender']==999, 'gender'] = 6 #NoAnswer
df.loc[df['gender'].isin([-9998, 995]), 'gender'] = 6 #Missing
df['dephr'] = (df['deptm']/100).astype(int)
df['count'] = 1
df = df[(df['trexpfac']>0) & (df['mode']>0)]
df = df[(df['otaz']>0) & (df['dtaz']>0)]
return df
trip_df = prep_df(trip_df)
col_dict = {
'dpurp': {
'desc': 'DPurp',
'col': 'dpurp',
'vals': range(0,8),
'labels': ['1_Home','2_Work','3_School','4_Escort','5_PersBus','6_Shop','7_Meal','8_SocRec']
},
'raceeth': {
'desc': 'RaceEth',
'col': 'raceeth',
'vals': range(1,6),
'labels': ['1_Hispanic','2_Black','3_Asian/PI','4_White','5_Other']
},
'hinc': {
'desc': 'HHInc',
'col': 'hinc',
'vals': range(1,9),
'labels': ['1_25K','2_25_50K','3_50_75K','4_75_100K','5_100_150K','6_150_200K','7_200_250K','8_250K']
},
'age': {
'desc': 'Age',
'col': 'age',
'vals': range(4,11),
'labels': ['18-24','25-34','35-44','45-54','55-64','65-74','75+']
},
'gender': {
'desc': 'Gend',
'col': 'gender',
'vals': range(1,7),
'labels': ['1_F','2_M','3_Trns','4_NBin','5_Oth','6_Miss']
},
'tncmode': {
'desc': 'TNCMode',
'col': 'tnc_type',
'vals': range(1,4),
'labels': ['2_Pool','1_Reg','3_PremOth']
},
'tod': {
'desc': 'TOD',
'col': 'dephr',
'vals': range(0,24),
'labels': ['10_0AM','11_1AM','12_2AM','13_3AM','14_4AM','15_5AM','16_6AM','17_7AM',
'18_8AM','19_9AM','20_10AM','21_11AM','22_12AM','23_1PM','24_2PM','25_3PM',
'26_4PM','27_5PM','28_6PM','29_7PM','30_8PM','31_9PM','32_10PM','33_11PM']
},
'day': {
'desc': 'DOW',
'col': 'day',
'vals': range(1,8),
'labels': ['1_Mon','2_Tue','3_Wed','4_Thu','5_Fri','6_Sat','7_Sun']
}
}
fname = out_file
writer = pd.ExcelWriter(fname, engine='xlsxwriter')
workbook = writer.book
format1 = workbook.add_format({'num_format': '#,##0.0'})
from xlsxwriter.utility import xl_rowcol_to_cell
row = 0
sname = 'Weekday'
d1_dict = col_dict['dpurp']
title = 'TNC Trips by ' + d1_dict['desc']
tab = prep_data_1d(trip_df, d1_dict['desc'],d1_dict['col'], 'trexpfac', d1_dict['vals'], d1_dict['labels'])
row = write_to_excel(tab, sname, title, row)
title = 'Column Shares by ' + d1_dict['desc']
row = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)
tab2 = tab.copy()
tab2.iloc[-1,-1] = tab2.iloc[-1,0]
title = 'Column Shares 95% CI by ' + d1_dict['desc']
row = write_to_excel(getSharesIdxCI95(tab.copy()), sname, title, row)
tab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])
_ = writer.sheets[sname].set_column(tab_range, 11, format1)
wt_cols = ['count', 'trexpfac']
wt_desc = ['(Unweighted)', '(Weighted)']
for key in ['tod','raceeth', 'hinc', 'age', 'gender', 'tncmode']:
d2_dict = col_dict[key]
row = 0
sname = d2_dict['desc']
for wc, wd in zip(wt_cols, wt_desc):
title = 'TNC Trips by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
tab, tab_fmt = prep_data_2d(trip_df,d1_dict['col'],d1_dict['vals'],d1_dict['labels'],
d2_dict['col'],d2_dict['vals'],d2_dict['labels'],wc)
row = write_to_excel(tab.astype('float64'), sname, title, row)
if wc == 'count':
tab2 = tab.copy()
else:
tab2.iloc[:-1,:-1] = tab.iloc[:-1,:-1]
title = 'Column Shares by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
row = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)
title = 'Column Shares 95% CI by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
row = write_to_excel(getSharesIdxCI95(tab2.copy()), sname, title, row)
tab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])
_ = writer.sheets[sname].set_column(tab_range, 11, format1)
## Process all week trip records
trip_df = pd.read_csv(join(allwk_dir,'survey2018_tripx.dat'), sep=' ')
trip_df = prep_df(trip_df)
DOW_LOOKUP = {1:'mon',2:'tue',3:'wed',4:'thu',5:'fri',6:'sat',7:'sun'}
trip_df['trexpfac'] = 0
for dow_num, dow in DOW_LOOKUP.items():
trip_df.loc[trip_df['day']==dow_num, 'trexpfac'] = trip_df.loc[trip_df['day']==dow_num, 'wt_alladult_'+dow]
trip_df['trexpfac'] = trip_df['trexpfac'].fillna(0)
row = 0
sname = 'DOW'
d2_dict = col_dict['day']
for wc, wd in zip(wt_cols, wt_desc):
title = 'TNC Trips by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
tab, tab_fmt = prep_data_2d(trip_df,d1_dict['col'],d1_dict['vals'],d1_dict['labels'],
d2_dict['col'],d2_dict['vals'],d2_dict['labels'],wc)
row = write_to_excel(tab.astype('float64'), sname, title, row)
if wc == 'count':
tab2 = tab.copy()
else:
tab2.iloc[:-1,:-1] = tab.iloc[:-1,:-1]
title = 'Column Shares by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
row = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)
title = 'Column Shares 95% CI by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd
row = write_to_excel(getSharesIdxCI95(tab2.copy()), sname, title, row)
tab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])
_ = writer.sheets[sname].set_column(tab_range, 11, format1)
writer.save()
```
| github_jupyter |
# Accumulation of roundoof error
In this notebook we'll study some effects of accumulation of roundoof error.
# Unstable Algorithms
We need to solve this integral for $n=1,2,....8$
$$y_n=\int_0^1\frac{x^n}{x+5}$$
We write the equation like this:
$$y_n = \frac{1}{n} - 5y_{n-1}$$
$$y_{1}=1-5(y_{0}+\epsilon )=1-5y_{0}-5\epsilon$$
$$y_{2}={\frac {1}{2}}-5(1-5y_{0}-5\epsilon )={\frac {1}{2}}-5+25y_{0}+5^{2}\epsilon$$
$$\vdots$$
$$y_{n}=\ldots +5^{n}\epsilon$$
The roundoff error is amplified ,$\mathcal{O}(5^n)$, in succeeding calculations so this algorithm is unstable.
```
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
import sys
def function(y0, n):
y_sol = np.zeros(n)
y_sol[0] = y0
for i in range (1,n):
y_sol[i] = 1/n - 5*y_sol[i-1]
return y_sol
n = 8
x = np.linspace(-1,1,8)
y0 = 0
y = function(y0, n)
plt.plot(x,y)
# The value of 'y' goes to infinity
```
# Conditioned problems
Even if a stable algorithm is used, the solution to a problem is still inaccurate due to the accumulation of roundoff error when the problem itself is ill-conditioned.
## Dangers of Higher-Order Polynomial Interpolation
In 1901, Carl Runge published a study on the dangers of higher-
order polynomial interpolation. He looked at the following simple-looking function:
$$f(x) = \frac{1}{1+25x^2}$$
which is now called Rungeโs function
```
x = np.linspace(-1,1,10)
y = 1/(1 + 25*x**2)
xx = np.linspace(-1,1,100)
p = np.polyfit(x,y,4)
y4 = np.polyval(p,xx)
yr = 1/(1 + 25*xx**2)
plt.plot(x,y,'o')
plt.plot(xx,y4)
plt.plot(xx,yr,'--')
plt.legend(['','Polynomial fit','Runge function'])
# The polynomial does a poor job of following Rungeโs function
# Continuing with the analysis,
# the 20th-order polynomial can be generated and plotted
x = np.linspace(-1,1,10)
y = 1/(1 + 25*x**2)
xx = np.linspace(-1,1,100)
p = np.polyfit(x,y,20)
y4 = np.polyval(p,xx)
yr = 1/(1+25*xx**2)
plt.plot(x,y,'o')
plt.plot(xx,y4)
plt.plot(xx,yr,'--')
plt.legend(['','Polynomial fit','Runge function'])
# The polynomial does a poor job of following Rungeโs function
```
Although there may be certain contexts where higher-order polynomials are neces-
sary, they are usually to be avoided. In most engineering and scientific contexts, lower-
order polynomials of the type described in this chapter can be used effectively to capture
the curving trends of data without suffering from oscillations
[Real world example: Patriot missile failure due to magnification of roundoff error](https://en.wikipedia.org/wiki/Round-off_error)
# Error Estimates for Iterative Methods
The approximation of $e$ using Maclaurin series expansion
$$e^x = 1+ x+ \frac{x^2}{2!}+\frac{x^3}{3!}+\frac{x^4}{4!} ... \frac{x^n}{n!}$$
```
def maclaurin(x, esp, max_int):
"""
Maclaurin series of exponential function
input:
x = value at which series evaluated
esp = stopping criterion (default = 0.0001)
max_int = maximum iterations (default = 50)
output:
fx = estimated value
ea = approximate relative error (%)
iter = number of iterations
"""
iter = 1
sol = 1
ea = 100
while sol:
sol_old = sol
sol = sol + x**iter / np.math.factorial(iter)
iter += 1
if sol != 0:
ea = np.abs((sol - sol_old)/sol)*100
if ea <= esp and iter>= max_int:
break
fx = sol
return fx, ea, iter
maclaurin(1,1e-6,100)
e, a, inte = maclaurin(1,1e-6,100)
# np.exp(1) return the true value of the number 'e'
# At least if a better approximation than our method
print('The error is: '+ str(np.exp(1) - e))
print("The epsilon funciton build in python is: "+str(sys.float_info.epsilon))
```
The 52 bits used for the mantissa correspond to about 15 to 16 base-10 digits, so in our programming language the machine epsilvol is $10^{-16}$
Remember that?
$$lim_{n\to\infty}(1 + \frac{1}{n})^n = e = 2.718281828...$$
Let's use the power of python to calculate
```
def euler(n):
return (1 + 1/n)**n
euler(10000)
# We can write 10^16 like 10E16 or 10e16
# What just happen?
euler(10e16)
```
When $n$ becames bigger than $10^{15}$ our functions stop to increase and start to oscillating
```
x = np.linspace(1,1e16,100)
y = euler(x)
y2 = np.exp(1)
plt.xscale('log')
plt.axhline(y=y2, color='r', linestyle='--')
plt.plot(x,y)
plt.title("euler function in lin-log scale")
plt.legend(["Real Value of Euler Number", "f(n) "])
```
| github_jupyter |
# What does data look like
## What libraries should I import?
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
## How to read data?
Dummy data for the following exercises is provided [here](https://ruhr-uni-bochum.sciebo.de/s/Svwxncw01Ir9uxw).
```
file = '/Users/guillermo/Downloads/pose-3d.csv'
data = pd.read_csv(file, header=0)
```
## How is my data structured?
```
data.info()
np.shape(data)
data
```
### Cleaning data
```
coords = data.loc[:, ~data.columns.str.contains(
'score|error|ncams|fnum|center|M_')]
scores = data.loc[:, data.columns.str.contains('score')]
```
### Changing the data structure
```
# Let us transform the data to be centered around a reference point
centered_coords = coords.copy()
for i in range(centered_coords.shape[1]):
if '_x' in centered_coords.columns[i]:
centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,
centered_coords.columns[i]].subtract(coords.loc[:, "nose1_x"].values)
elif '_y' in centered_coords.columns[i]:
centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,
centered_coords.columns[i]].subtract(coords.loc[:, "nose1_y"].values)
elif '_z' in centered_coords.columns[i]:
centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,
centered_coords.columns[i]].subtract(coords.loc[:, "nose1_z"].values)
else:
pass
centered_coords
# What is the difference between pandas Data Frame and numpy Array?
coords_egocentric = centered_coords.to_numpy()
coords_egocentric
```
## Reading DeepLabCut Data
Note that DeepLabCut files contain multiple headers
```
# .h5 vs csv with multiple headings
file = '/Users/guillermo/Downloads/DLC_data.csv'
data = pd.read_csv(file, header=0)
data
```
You can specify multiple headers in `pd.read_csv(file, header=[0,1,2])`, but your data frame will be a little more difficult to subset, as columns will be a MultiIndex array.
```
data = pd.read_csv(file, header=[0, 1, 2])
data
data.columns
data.columns.get_level_values(1)
data.columns.get_level_values(2)
```
Better rename the columns of your data frame to avoid MultiIndex
```
data.columns.get_level_values(1) + '_' + data.columns.get_level_values(1)
new_col_names = list(data.columns.get_level_values(
1) + '_' + data.columns.get_level_values(2))
data.columns = new_col_names
data
```
## What does my data tell me?
```
# Does this make sense?
coords.mean(axis='columns')
# What about this?
coords.mean(axis='index')
coords['lefteye1_x'].mean()
coords.describe()
```
## How could my data look like
```
scores.hist(figsize=(20, 20))
scores.boxplot(column=['chin_score', 'lefteye1_score'], figsize=(10, 10))
x_coords = coords.loc[:, coords.columns.str.contains('_x')]
y_coords = coords.loc[:, coords.columns.str.contains('_y')]
z_coords = coords.loc[:, coords.columns.str.contains('_z')]
t = 0
fig = plt.figure(figsize=(6, 4), dpi=100)
ax = fig.add_subplot(projection='3d')
x_points = x_coords[t:t+1]
y_points = y_coords[t:t+1]
z_points = z_coords[t:t+1]
ax.scatter3D(x_points, y_points, z_points)
ax.view_init(11, 280)
ax.set(xlabel='X axis', ylabel='Y axis', zlabel='Z axis')
plt.title("My First Plot")
```
In the following section we will learn to calculate some easy kinematic features to better understand our data.
## Bonus
```
def face_skeleton(pose):
"""
The face_skeleton function defines a mesh skeleton by connecting the facial landmarks as defined below.
This function is directly passed to plot_3Dpose.
"""
skeletons = []
for n in range(len(pose)): # read out n_components from different poses
lefteye = [pose[n]['lefteye1_x'], pose[n]['lefteye2_x']], [
pose[n]['lefteye1_y'], pose[n]['lefteye2_y']], [pose[n]['lefteye1_z'], pose[n]['lefteye2_z']]
righteye = [pose[n]['righteye1_x'], pose[n]['righteye2_x']], [
pose[n]['righteye1_y'], pose[n]['righteye2_y']], [pose[n]['righteye1_z'], pose[n]['righteye2_z']]
leyebrow = [pose[n]['leyebrow1_x'], pose[n]['leyebrow2_x'], pose[n]['leyebrow3_x']], [pose[n]['leyebrow1_y'], pose[n]
['leyebrow2_y'], pose[n]['leyebrow3_y']], [pose[n]['leyebrow1_z'], pose[n]['leyebrow2_z'], pose[n]['leyebrow3_z']]
reyebrow = [pose[n]['reyebrow1_x'], pose[n]['reyebrow2_x'], pose[n]['reyebrow3_x']], [pose[n]['reyebrow1_y'], pose[n]
['reyebrow2_y'], pose[n]['reyebrow3_y']], [pose[n]['reyebrow1_z'], pose[n]['reyebrow2_z'], pose[n]['reyebrow3_z']]
nose = [pose[n]['nose1_x'], pose[n]['nose3_x'], pose[n]['nose2_x'], pose[n]['nose4_x'], pose[n]['nose1_x']], [pose[n]['nose1_y'], pose[n]['nose3_y'], pose[n]
['nose2_y'], pose[n]['nose4_y'], pose[n]['nose1_y']], [pose[n]['nose1_z'], pose[n]['nose3_z'], pose[n]['nose2_z'], pose[n]['nose4_z'], pose[n]['nose1_z']]
lips = [pose[n]['uplip_x'], pose[n]['llip_x'], pose[n]['lowlip_x'], pose[n]['rlip_x'], pose[n]['uplip_x']], [pose[n]['uplip_y'], pose[n]['llip_y'], pose[n]
['lowlip_y'], pose[n]['rlip_y'], pose[n]['uplip_y']], [pose[n]['uplip_z'], pose[n]['llip_z'], pose[n]['lowlip_z'], pose[n]['rlip_z'], pose[n]['uplip_z']]
face = [pose[n]['rear_x'], pose[n]['chin_x'], pose[n]['lear_x']], [pose[n]['rear_y'], pose[n]
['chin_y'], pose[n]['lear_y']], [pose[n]['rear_z'], pose[n]['chin_z'], pose[n]['lear_z']]
skeleton = lefteye, righteye, leyebrow, reyebrow, nose, lips, face
skeletons.append(skeleton)
return skeletons
def plot_3Dpose(pose, elevation, azimuth):
"""
This plot function takes the average pose coordinates of facial landmarks, creates a skeleton and visualizes the facial expression
in a 3D coordinate system with predefined elevantion and azimuth angles.
"""
skeletons = face_skeleton(pose)
ncols = 3
nrows = math.ceil(len(pose)/ncols)
width = ncols*6
height = nrows * 5
fig, axes = plt.subplots(nrows, ncols, figsize=(
width, height), subplot_kw=dict(projection='3d'))
for ax, n in zip(axes.flat, range(len(pose))):
x_points = pose[n][['_x' in s for s in pose[n].index]]
y_points = pose[n][['_y' in s for s in pose[n].index]]
z_points = pose[n][['_z' in s for s in pose[n].index]]
ax.scatter3D(x_points, y_points, z_points)
ax.view_init(elevation, azimuth)
ax.set(xlabel='X axis', ylabel='Y axis', zlabel='Z axis')
ax.set_title('Predicted Pose: %d' % (n+1))
for i in range(len(skeletons[0])):
x = skeletons[n][i][0]
y = skeletons[n][i][1]
z = skeletons[n][i][2]
ax.plot(x, y, z, color='g')
plt.suptitle(
'Hidden Markov Model predictions with N = %d Components' % len(pose))
plt.show()
return
def split_data(data, prediction):
"""
The split_data function will be used to split time series data into smaller
chunks by the prediction variable.
"""
n = max(prediction)+1 # read out the number of predicted components
data['pred'] = prediction
grouped = data.groupby(data.pred)
predictions = [grouped.get_group(i) for i in range(n)]
pose = [predictions[i].mean() for i in range(n)]
return predictions, pose
from hmmlearn import hmm
import math
# change the number of components you expect to find in your data
model1 = hmm.GaussianHMM(n_components=9, covariance_type="full")
model1.fit(coords)
pred1 = model1.predict(coords)
_, pose1 = split_data(centered_coords, pred1)
plot_3Dpose(pose1, 11, 280)
```
| github_jupyter |
```
import pandas as pd
import re
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import collections
```
**Load the data and split them**
```
oush_df = pd.read_csv('./reuters_10class.csv')
print(len(oush_df))
# reuters_df = pd.read_csv('./reuters1000.csv')
# reuters_df.to_csv('./reuters1000.csv',index=False)
# print('Finish')
train_size = int(0.7 * len(oush_df))
oush_Y_train = oush_df.iloc[:train_size,1:].values
oush_Y_test = oush_df.iloc[train_size:,1:].values
# reuters_Y_train = reuters_df.iloc[:train_size,1:5].values
# reuters_Y_test = reuters_df.iloc[train_size:,1:5].values
# print(reuters_Y_train.shape)
def cleanse(text):
text = text.lower()
filters = ['"','#','$','%','&','\(','\)','\*','\+',',','-','\.','/',':',';','<','=','>','\?','@','\[','\\','\]'
,'^','_',' ','\{','\|','\}','~','\t','\n','\x97','\x96']
text = re.sub("<.*?>",' ',text)
text = re.sub("|".join(filters)," ",text)
return text
def encoder(dataset,max_feature):
content = dataset.iloc[:,0].values.tolist()
freq=pd.Series(' '.join(content).split()).value_counts()
total_size = len(content)
new_contents = []
document = []
from nltk.stem import PorterStemmer
st = PorterStemmer()
for text in content:
new_text = cleanse(text)
split_content = word_tokenize(new_text)
words = [word for word in split_content if word not in stopwords.words('english')]
words = [WordNetLemmatizer().lemmatize(w) for w in words]
# words = [st.stem(w) for w in words]
words = [w for w in words if w not in freq[:10]]
new_contents.append([(' '.join(words))])
# counter = collections.Counter([tk for st in new_contents for tk in st[0].split()])
# counter = list(dict(filter(lambda x: x[1] >= 1, counter.items())).keys())
# contents = []
# for text in new_contents:
# words = [w for w in text[0].split() if w in counter]
# if len(words):
# contents.append([' '.join(words)])
# new_contents = contents
# features = len(counter)
documents = [new_content[0] for new_content in new_contents]
vectorize = CountVectorizer(max_features=400)
vectorize.fit(documents)
features = len(vectorize.vocabulary_)
X_train = np.ones(shape=[train_size,features],dtype=np.int64)
X_test = np.ones(shape=[len(new_contents)-train_size,features])
for idx,new_content in enumerate(new_contents):
word_vector = vectorize.transform(new_content).toarray()
if idx >= train_size:
X_test[idx-train_size] = word_vector
else:
X_train[idx] = word_vector
return X_train,X_test
max_feature = 400
oush_X_train,oush_X_test = encoder(oush_df,max_feature)
print(oush_X_train.shape,oush_Y_test.shape)
# reuters_X_train,reuters_X_test = encoder(reuters_df,max_feature)
oush_X_test.shape
np.save('./traindataReuters_10class.npy',oush_X_train)
np.save('./trainlabelReuters_10class.npy',oush_Y_train)
np.save('./testdataReuters_10class.npy',oush_X_test)
np.save('./testlabelReuters_10class.npy',oush_Y_test)
print('Finish')
```
| github_jupyter |
# Inventory Control with Lead Times and Multiple Suppliers
## Description
One potential application of reinforcement learning involves ordering supplies with mutliple suppliers having various lead times and costs in order to meet a changing demand. Lead time in inventory management is the lapse in time between when an order is placed to replenish inventory and when the order is received. This affects the amount of stock a supplier needs to hold at any point in time. Moreover, due to having multiple suppliers, at every stage the supplier is faced with a decision on how much to order from each supplier, noting that more costly suppliers might have to be used to replenish the inventory from a shorter lead time.
The inventory control model addresses this by modeling an environment where there are multiplie suppliers with different costs and lead times. Orders must be placed with these suppliers to have an on-hand inventory to meet a changing demand. However, both having supplies on backorder and holding unused inventory have associated costs. The goal of the agent is to choose the amount to order from each supplier to maximize the revenue earned.
At each time step, an order is placed to each supplier. If previous orders have waited for the length of their supplier's lead time, then these orders will become part of the on-hand inventory. The demand is then randomly chosen from a user-selected distribution and is subtracted from the on-hand inventory. If the on-hand inventory would become less than zero, than items are considered to be on backorder which decreases the reward. The demand is subtracted from the on-hand inventory to calculate on-hand inventory for the start of the next time step. A remaining inventory (a positive nonzero number) at the end of this calculation negatively influences the reward proportional to the holding costs. There are two ways that the inventory can be setup for the environment. The first allows negative inventory to be accumulated. In this case the on-hand inventory is offset by adding the value of the maximum inventory. This is done so that the observation space can be properly represented using AI Gym. This allows for backorder costs to be calculated if the inventory were to go become negative. The second way does not allow for inventory to become negative. Backorders are still calculated and they still negatively influence reward, but the inventory is reset to 0 for the next timestep after the reward calculation. The inventory is not offset by any number in this version of the environment.
## Model Assumptions
* Backorders are not retroactively fulfilled. If a high demand would cause inventory to become negative, this unfulfilled demand is not met later when there may be some inventory being held at the end of a timestep.
## Environment
### Dynamics
#### State Space
The state space is $S = [0,\text{Max-Order}]^{L_1} \times [0,\text{Max-Order}]^{L_2} \times ... \times [0,\text{Max-Order}]^{L_N} \times I$ where $N$ is the number of suppliers and $[0,\text{Max-Order}]^{L_i}$ represents a list of integers between zero and the max order amount, maxorder (specified in the configuration), with the length of the lead time of supplier $i$. This represents how many timesteps back each order is from being added to the inventory. $I$ represents the current on-hand inventory. To represent a timestep, an order will be moved up an index in the array unless it is added to the inventory, in which case it is removed from the array. Each supplier has their own set of indices in the array that represent its lead times. Each index in the list (except for $ I $) has a maximum value of the max_order parameter.
If negative inventory is allowed, the last index, the on-hand inventory, is offset by adding the maximum inventory value to it. It is in the range $[0, 2 * maxinventory]$ This is done so that a negative value of the on-hand inventory can be temporarily kept to use in reward calculations for backorders and so that the observation space can be represented properly. Before this value is used in any calculations, the value of the max inventory is subtracted so that the true value of the inventory is used. Otherwise if negative inventory is not allowed, the on-hand inventory must be in the range of $[0,maxinventory]$ and directly corresponds to the current inventory.
#### Action Space
The action space is $A = [0,\text{Max-Order}]^N$ where N is the number of suppliers. This represents the amount to order from each supplier for the current timestep. The order amount cannot be greater than the max_order paramter (set in the initialization of the environment).
#### Reward
The reward is $R = - (Order + holdcost \times max(0,I) + backordercost \times max(0, -I))$ where $Order = \sum_{i = 1}^{N} c_i \times a_i$ and represents the sum of the amount most recently ordered from each supplier, $a_i$, multiplied by the appropriate ordering cost, $c_i$. $holdcost$ represents the holding cost for excess inventory, and $backordercost$ represents the backorder cost for when the inventory would become negative.
#### Transitions
At each timestep, orders are placed into each supplier for a certain amount of resources. These orders are processed and will add to the on-hand inventory once the lead time for the appropriate supplier has passed. The time that has passed for each order is trakced using the state at each timestep. If any lead times have passed, the ordered amount is added to the on-hand inventory. Then, the randomly chosen demand is subtracted from the on-hand inventory. If the demand is higher than the current inventory, then the inventory does become negative for the next state. The reward is then calculated proportional to the revenue earned from meeting the demand, but is inversely proportional to the amount that is backordered (the difference between the inventory and demand). If the demand is lower than the current inventory, the inventory remains positive for the next state. The reward is still proportional to the revenue earned from meeting the demand, but is inversely proportional to the amount of inventory left over multiplied by the holding costs.
#### Configuration Paramters
* lead_times: array of ints representing the lead times of each supplier
* demand_dist: The random number sampled from the given distribution to be used to calculate the demand
* supplier_costs: array of ints representing the costs of each supplier
* hold_cost: The int holding cost.
* backorder_cost: The backorder holding cost.
* max_inventory: The maximum value (int) that can be held in inventory
* max_order: The maximum value (int) that can be ordered from each supplier
* epLen: The int number of time steps to run the experiment for.
* starting_state: An int list containing enough indices for the sum of all the lead times, plus an additional index for the initial on-hand inventory.
* neg_inventory: A bool that says whether the on-hand inventory can be negative or not.
## Heuristic Agents
### Random Agent
This agent randomly samples from the action space. For this environment, the amount ordered from each supplier is an integer from $[0, maxorder]$.
### Base Surge Agent (TBS)
The base surge agent has 2 parameters, $r$ and $S$. Each action is expressed as $[r,[orderamount]]$. $r$ is a vector of the order amounts for all suppliers except the one with the greatest lead time. $S$ represents the "order up to amount". orderamount is calculated by calculating $S - I$ where $I$ is the current on-hand inventory. This value is then made 0 if it is negative or is reduced to the $maxorder$ if it is greater. This order amount is used for the supplier with the greatest lead time.
| github_jupyter |
```
import numpy as np
import pandas as pd
import tensorflow as tf
from data_process import build_vocab, batch_iter, sentence_to_index
from models import LSTM, biLSTM, deepBiLSTM
train = pd.read_csv('./data/train-5T.txt', delimiter='\t')
test = pd.read_csv('./data/test-1T.txt', delimiter='\t')
X_train = train.document
Y_train = train.label
X_test = test.document
Y_test = test.label
max_vocab = 50000
vocab, _, vocab_size = build_vocab(X_train, max_vocab)
```
# Sentiment Analysis with LSTM
```
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
model = LSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)
train_acc = []
avgLoss = []
x_test = sentence_to_index(X_test, vocab)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_index(x_train, vocab)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
```
# Sentiment Analysis with biLSTM
```
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
model = biLSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)
train_acc = []
avgLoss = []
x_test = sentence_to_index(X_test, vocab)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_index(x_train, vocab)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
```
# Sentiment Analysis with deepBiLSTM
```
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
model = deepBiLSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)
train_acc = []
avgLoss = []
x_test = sentence_to_index(X_test, vocab)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_index(x_train, vocab)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
```
| github_jupyter |
<a href="https://colab.research.google.com/github/aksha1234/Deep-learning-tutorials/blob/main/code_04_XX_Iris_Deep_Learning_Classification_with_penguin_Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Deep Learning Example - Iris
This examples demonstrates the core deep learning model building concepts using the Keras library. The Iris flower dataset is used to build the model and perform classification tasks
### 5.1 Setup
```
#Install related libraries for the course.
#This is a common requirement for all other exampels too
!pip install pandas
!pip install tensorflow
!pip install sklearn
!pip install matplotlib
```
### 4.2. Prepare Input Data for Deep Learning
Perform the following steps for preparing data
1. Load data into a pandas dataframe
2. Convert the dataframe to a numpy array
3. Scale the feature dataset
4. Use one-hot-encoding for the target variable
5. Split into training and test datasets
```
import pandas as pd
import os
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import keras
df=sns.load_dataset('iris')
df.head()
from sklearn.preprocessing import LabelEncoder
encoder=LabelEncoder().fit(df['species'])
encoder.classes_
encoder.transform(df.species)
df['species']=encoder.transform(df.species)
df
## To convert the dataframe into the arrays
## convert into the five array of size by 5
df_numpy=df.to_numpy()
train_inputs=df_numpy[:,:4]
train_targets=df_numpy[:,4]
```
> Standardised teh data with the normal distribution having the 0 as means and 1 as variance ~N(0,1)
```
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler().fit(train_inputs)
train_inputs=scaler.transform(train_inputs,3)
train_inputs
train_targets=tf.keras.utils.to_categorical(train_targets,3)
print("\nFeatures after scaling :\n------------------------------------")
print(train_inputs[:5,:])
print("\nTarget after one-hot-encoding :\n------------------------------------")
print(train_targets[:5,:])
#Split training and test data
from sklearn.model_selection import train_test_split
train_inputs,test_inputs,train_targets,test_targets=train_test_split(train_inputs,train_targets,test_size=0.1,random_state=12)
```
### 4.3. Creating a Model
Creating a model in Keras requires defining the following
1. Number of hidden layers
2. Number of nodes in each layer
3. Activation functions
4. Loss Function & Accuracy measurements
```
from tensorflow import keras
#Number of classes in the target variable
NB_CLASSES=3
## Create a sequential model in Keras
model=tf.keras.models.Sequential()
## Add the first hidden layer
model.add(keras.layers.Dense(128, ## Number of nodes
input_shape=(4,), ### Number of input variables
name='Hiddden_layer-1', ## Logical NAme
activation='relu')) ## Activation function
```
> Adding the second hidden layer with 128 nodes aand no need to **make input size one more time.**
```
model.add(keras.layers.Dense(128,name='Hidden-Layer-2',activation='relu'))
```
> Creating the output layers
```
model.add(keras.layers.Dense(NB_CLASSES,
name='Output-Layer',
activation='softmax')) ## As we have multiclass variables
## compile the model with loss and metrics
model.compile(loss='categorical_crossentropy',metrics=['accuracy','MSE'])
model.summary()
```
### 4.4. Training and evaluating the Model
Training the model involves defining various training models and then perform
forward and back propagation.
```
#Make it verbose so we can see the progress
VERBOSE=1
#Setup Hyper Parameters for training
#Set Batch size
BATCH_SIZE=16 ## 2^n only multiples
#Set number of epochs
EPOCHS=10
#Set validation split. 20% of the training data will be used for validation
#after each epoch
VALIDATION_SPLIT=0.2
print("\nTraining Progress:\n------------------------------------")
#Fit the model. This will perform the entire training cycle, including
#forward propagation, loss computation, backward propagation and gradient descent.
#Execute for the specified batch sizes and epoch
#Perform validation after each epoch
history=model.fit(train_inputs,train_targets,batch_size=BATCH_SIZE,epochs=EPOCHS,verbose=VERBOSE,validation_split=VALIDATION_SPLIT,workers=-1)
## TO covert this data into dataframe we need history.hsitory
pd.DataFrame(history.history)
pd.DataFrame(history.history)['accuracy'].plot(figsize=(8,5))
import matplotlib.pyplot as plt
#Plot accuracy of the model after each epoch.
pd.DataFrame(history.history)["accuracy"].plot(figsize=(8, 5))
plt.title("Accuracy improvements with Epoch")
plt.show()
#Evaluate the model against the test dataset and print results
print("\nEvaluation against Test Dataset :\n------------------------------------")
model.evaluate(test_inputs,test_targets)
```
### 4.5. Saving and Loading Models
The training and inference environments are usually separate. Models need to be saved after they are validated. They are then loaded into the inference environments for actual prediction
```
#Saving a model
model.save("iris_save")
#Loading a Model
loaded_model = keras.models.load_model("iris_save")
#Print Model Summary
loaded_model.summary()
```
### 4.6. Predictions with Deep Learning Models
```
#Raw prediction data
prediction_input = [[6.6, 3. , 4.4, 1.4]]
#Scale prediction data with the same scaling model
scaled_input = scaler.transform(prediction_input)
#Get raw prediction probabilities
raw_prediction = model.predict(scaled_input)
print("Raw Prediction Output (Probabilities) :" , raw_prediction)
#Find prediction
prediction = np.argmax(raw_prediction)## numpy is having teh argument maximum
print("Prediction is ", encoder.inverse_transform([prediction]))
```
## Modelling new dataset
```
df=sns.load_dataset('penguins')
df
df.dropna(inplace=True)
## Lest see how juch data is present or not
df.isnull().sum()
input_cols=df.columns.tolist()[1:]
output_cols=df.columns.tolist()[0]
input_cols,output_cols
## We have to convert our input framem in to the numerical calus so for this we be making using one hot encoding
categorical_cols=df[input_cols].select_dtypes(include='object').columns.tolist()
numerical_cols=df[input_cols].select_dtypes(include=np.number).columns.tolist()
## First let us ocnvert it into the Onehot Encoding
from sklearn.preprocessing import OneHotEncoder
encoder=OneHotEncoder(sparse=False,handle_unknown='ignore').fit(df[categorical_cols])
encoded_cols=encoder.get_feature_names(categorical_cols).tolist()
df[encoded_cols]=encoder.transform(df[categorical_cols])
df
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler().fit(df[numerical_cols+encoded_cols])
df[numerical_cols+encoded_cols]=scaler.transform(df[numerical_cols+encoded_cols])
inputs_df=df[numerical_cols+encoded_cols].copy().to_numpy() ## converting the dataframe into the array as required for deep learning
targets_df=df[output_cols].copy()
## COnverting teh target column in to label encoder
encoder2=LabelEncoder().fit(targets_df)
targets_df=encoder2.transform(targets_df)
encoder2.classes_
## With the help of keras we will convert it into catgories into matrces of 3 columns as 3 classes avaliable
targets_df=tf.keras.utils.to_categorical(targets_df)
train_inputs,test_inputs,train_targets,test_targets=train_test_split(inputs_df,targets_df,test_size=0.2,random_state=0)
inputs_df.shape
## Designing the models
model=tf.keras.models.Sequential()
model.add(keras.layers.Dense(32,activation='relu',input_shape=(9,),name='Hidden_Layer1'))
model.add(keras.layers.Dense(32,activation='relu',name='Hidden_Layer2'))
model.add(keras.layers.Dense(3,activation='softmax',name='Output_layer'))
## compile the model with loss and metrics
model.compile(loss='categorical_crossentropy',## Because model is multi classs
metrics=['accuracy','mse'])
model.summary()
```
## Training and evaluating the model
```
history=model.fit(train_inputs,train_targets,verbose=1,batch_size=16,epochs=16,validation_split=0.2,workers=-1)
pd.DataFrame(history.history)['accuracy'].plot(figsize=(12,9))
model.evaluate(test_inputs,test_targets)
def classify_species(df):
data=df.copy()
data[encoded_cols]=encoder.transform(data[categorical_cols])
data[numerical_cols+encoded_cols]=scaler.transform(data[numerical_cols+encoded_cols])
data=data[numerical_cols+encoded_cols].to_numpy()
predict=model.predict(data)
arg_max=np.argmax(predict)
return encoder2.inverse_transform(arg_max)
df
sns.load_dataset('penguins')
input_cols
def classify_species(df):
data=df.copy()
data[encoded_cols]=encoder.transform(data[categorical_cols])
data[numerical_cols+encoded_cols]=scaler.transform(data[numerical_cols+encoded_cols])
data=data[numerical_cols+encoded_cols].to_numpy()
predict=model.predict(data)
arg_max=np.argmax(predict)
return encoder2.inverse_transform([arg_max])
df=pd.DataFrame([{'island':'Gentoo',
'bill_length_mm':58,
'bill_depth_mm':20,
'flipper_length_mm':200,
'body_mass_g':3450,
'sex':'Male'}])
df=pd.DataFrame([{'island':'Adelie',
'bill_length_mm':30,
'bill_depth_mm':23,
'flipper_length_mm':200,
'body_mass_g':4000,
'sex':'Female'}])
classify_species(df)
import joblib
assignment={'encoder_cat_cols':encoder,'encoder_species':encoder2,'scaler':scaler,'model':model}
joblib.dump(assignment,filename='model_params.joblib')
! git
! git init ## first initializing a new repositroy
! git clone https://github.com/aksha1234/Deep-learning-tutorials.git
! pwd
%cd Deep-learning-tutorials/
! git remote -v
! git status
## cretaing a file for just example
! touch firstpy.py
! git status
## to add teh files to get tracked
! git add -A
!git status
! git commit -a -m 'first_commit'
uname = "aksha1234"
!git config --global user.email '$uname@gmail.com'
!git config --global user.name '$uname'
! git config --list
! git status
```
To push all teh requitred changes to github we have to
```
username= input('Enter username')
from getpass import getpass
password=getpass(' Enter password:')
!git remote add origin https://$username:$password@github.com/$username/Deep-learning-tutorials.git
! git remote rm origin
api='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'
!git remote add origin https://$username:$api@github.com/$username/Deep-learning-tutorials.git
! git push origin main
! git log
```
| github_jupyter |
```
# Import needed packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# If you're working in Jupyter Notebook, include the following so that plots will display:
%matplotlib inline
dataframe = pd.read_csv('student-mat.csv')
print(dataframe.head())
for c in dataframe.columns:
print(c)
print(dataframe[c].value_counts(dropna=False))
print(dataframe.columns)
from sklearn.model_selection import train_test_split
target = 'internet'
y = dataframe[target].map({'yes': 1, 'no': 0})
X = dataframe.drop(labels = [target], axis='columns')
# drop non-numberic fields
yes_no = ['activities','schoolsup','famsup','paid','nursery','higher','romantic']
# map yes/no to 1/0
for c in yes_no:
X[c] = X[c].map({'yes': 1, 'no': 0})
# translate non_numeric fields to int
non_numeric = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob',
'Fjob', 'reason', 'guardian']
for c in non_numeric:
X[c] = X[c].astype('category').cat.codes
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# split validate set and test set
validate_train,holdout_train,validate_test,holdout_test = train_test_split(X_test, y_test, test_size=0.5)
baseline_classifier_model = RandomForestClassifier()
# Train the baseline model on training data
baseline_classifier_model = baseline_classifier_model.fit(X_train, y_train)
# Use the forest's predict method on the holdout
predictions = baseline_classifier_model.predict(holdout_train)
# Calculate the accuracy_score
accuracy_score(predictions, holdout_test)
best_model = None
for i in range(5):
classifier_model = RandomForestClassifier()
classifier_model = classifier_model.fit(X_train, y_train)
predictions = classifier_model.predict(validate_train)
score = accuracy_score(validate_test, predictions)
print("%d times running, scoring %f"%(i+1,score))
if best_model is None or best_model[1] < score:
best_model = (classifier_model, score)
model, score = best_model
# View confusion matrix for test data and predictions
confusion_matrix(holdout_test, predictions)
# Get and reshape confusion matrix data
matrix = confusion_matrix(holdout_test, predictions)
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
# Build the plot
plt.figure(figsize=(16,7))
sns.set(font_scale=1.4)
sns.heatmap(matrix, annot=True, annot_kws={'size':10},
cmap=plt.cm.Greens, linewidths=0.2)
# Add labels to the plot
class_names = ['activities','schoolsup','famsup','paid','nursery','higher','romantic']
tick_marks = np.arange(len(class_names))
tick_marks2 = tick_marks + 0.5
plt.xticks(tick_marks, class_names, rotation=25)
plt.yticks(tick_marks2, class_names, rotation=0)
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix for Random Forest Model')
plt.show()
# View the classification report for test data and predictions
print(classification_report(y_test, y_pred_test))
```
| github_jupyter |
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Nonlinear Filtering
```
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
## Introduction
The Kalman filter that we have developed uses linear equations, and so the filter can only handle linear problems. But the world is nonlinear, and so the classic filter that we have been studying to this point can have very limited utility.
There can be nonlinearity in the process model. Suppose we want to track an object falling through the atmosphere. The acceleration of the object depends on the drag it encounters. Drag depends on air density, and the air density decreases with altitude. In one dimension this can be modelled with the nonlinear differential equation
$$\ddot x = \frac{0.0034ge^{-x/22000}\dot x^2}{2\beta} - g$$
A second source of nonlinearity comes from the measurements. For example, radars measure the slant range to an object, and we are typically interested in the aircraft's position over the ground. We invoke Pythagoras and get the nonlinear equation:
$$x=\sqrt{\mathtt{slant}^2 - \mathtt{altitude}^2}$$
These facts were not lost on the early adopters of the Kalman filter. Soon after Dr. Kalman published his paper people began working on how to extend the Kalman filter for nonlinear problems.
It is almost true to state that the only equation anyone knows how to solve is $\mathbf{Ax}=\mathbf{b}$. We only really know how to do linear algebra. I can give you any linear set of equations and you can either solve it or prove that it has no solution.
Anyone with formal education in math or physics has spent years learning various analytic ways to solve integrals, differential equations and so on. Yet even trivial physical systems produce equations that cannot be solved analytically. I can take an equation that you are able to integrate, insert a $\log$ term, and render it insolvable. This leads to jokes about physicists stating "assume a spherical cow on a frictionless surface in a vacuum...". Without making extreme simplifications most physical problems do not have analytic solutions.
How do we do things like model airflow over an aircraft in a computer, or predict weather, or track missiles with a Kalman filter? We retreat to what we know: $\mathbf{Ax}=\mathbf{b}$. We find some way to linearize the problem, turning it into a set of linear equations, and then use linear algebra software packages to compute an approximate solution.
Linearizing a nonlinear problem gives us inexact answers, and in a recursive algorithm like a Kalman filter or weather tracking system these small errors can sometimes reinforce each other at each step, quickly causing the algorithm to spit out nonsense.
What we are about to embark upon is a difficult problem. There is not one obvious, correct, mathematically optimal solution anymore. We will be using approximations, we will be introducing errors into our computations, and we will forever be battling filters that *diverge*, that is, filters whose numerical errors overwhelm the solution.
In the remainder of this short chapter I will illustrate the specific problems the nonlinear Kalman filter faces. You can only design a filter after understanding the particular problems the nonlinearity in your problem causes. Subsequent chapters will then teach you how to design and implement different kinds of nonlinear filters.
## The Problem with Nonlinearity
The mathematics of the Kalman filter is beautiful in part due to the Gaussian equation being so special. It is nonlinear, but when we add and multiply them we get another Gaussian as a result. That is very rare. $\sin{x}*\sin{y}$ does not yield a $\sin$ as an output.
What I mean by linearity may be obvious, but there are some subtleties. The mathematical requirements are twofold:
* additivity: $f(x+y) = f(x) + f(y)$
* homogeneity: $f(ax) = af(x)$
This leads us to say that a linear system is defined as a system whose output is linearly proportional to the sum of all its inputs. A consequence of this is that to be linear if the input is zero than the output must also be zero. Consider an audio amp - if I sing into a microphone, and you start talking, the output should be the sum of our voices (input) scaled by the amplifier gain. But if amplifier outputs a nonzero signal such as a hum for a zero input the additive relationship no longer holds. This is because you linearity requires that $amp(voice) = amp(voice + 0)$ This clearly should give the same output, but if amp(0) is nonzero, then
$$
\begin{aligned}
amp(voice) &= amp(voice + 0) \\
&= amp(voice) + amp(0) \\
&= amp(voice) + non\_zero\_value
\end{aligned}
$$
which is clearly nonsense. Hence, an apparently linear equation such as
$$L(f(t)) = f(t) + 1$$
is not linear because $L(0) = 1$. Be careful!
## An Intuitive Look at the Problem
I particularly like the following way of looking at the problem, which I am borrowing from Dan Simon's *Optimal State Estimation* [[1]](#[1]). Consider a tracking problem where we get the range and bearing to a target, and we want to track its position. The reported distance is 50 km, and the reported angle is 90$^\circ$. Assume that the errors in both range and angle are distributed in a Gaussian manner. Given an infinite number of measurements what is the expected value of the position?
I have been recommending using intuition to gain insight, so let's see how it fares for this problem. We might reason that since the mean of the range will be 50 km, and the mean of the angle will be 90$^\circ$, that the answer will be x=0 km, y=50 km.
Let's plot that and find out. Here are 3000 points plotted with a normal distribution of the distance of 0.4 km, and the angle having a normal distribution of 0.35 radians. We compute the average of the all of the positions, and display it as a star. Our intuition is displayed with a large circle.
```
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
N = 3000
a = np.pi/2. + (randn(N) * 0.35)
r = 50.0 + (randn(N) * 0.4)
xs = r * np.cos(a)
ys = r * np.sin(a)
plt.figure()
plt.scatter(xs, ys, label='Sensor', color='k', marker='.', s=2)
xs, ys = sum(xs)/N, sum(ys)/N
plt.scatter(xs, ys, c='r', marker='*', s=200, label='Mean')
plt.scatter(0, 50, c='k', marker='o', s=300, label='Intuition')
plt.axis('equal')
plt.legend();
```
We can see that out intuition failed us because the nonlinearity of the problem forced all of the errors to be biased in one direction. This bias, over many iterations, can cause the Kalman filter to diverge. Even if it doesn't diverge the solution will not be optimal. Linear approximations applied to nonlinear problems yields inaccurate results.
## The Effect of Nonlinear Functions on Gaussians
Gaussians are not closed under an arbitrary nonlinear function. Recall the equations of the Kalman filter - at each evolution we pass the Gaussian representing the state through the process function to get the Gaussian at time $k$. Our process function was always linear, so the output was always another Gaussian. Let's look at that on a graph. I will take an arbitrary Gaussian and pass it through the function $f(x) = 2x + 1$ and plot the result. We know how to do this analytically, but let's use sampling. I will generate 500,000 points with a normal distribution, pass them through $f(x)$, and plot the results. I do it this way because the next example will be nonlinear, and we will have no way to compute this analytically.
```
import numpy as np
from numpy.random import normal
gaussian = (0., 1.)
data = normal(loc=gaussian[0], scale=gaussian[1], size=500000)
plt.figure()
plt.hist(2*data + 1, 1000);
```
This is an unsurprising result. The result of passing the Gaussian through $f(x)=2x+1$ is another Gaussian centered around 1. Let's look at the input, nonlinear function, and output at once.
```
from kf_book.book_plots import set_figsize, figsize
from kf_book.nonlinear_plots import plot_nonlinear_func
def g1(x):
return 2*x+1
plt.figure()
plot_nonlinear_func(data, g1, gaussian)
```
> I explain how to plot Gaussians, and much more, in the Notebook *Computing_and_Plotting_PDFs* in the
Supporting_Notebooks folder. You can also read it online [here](https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb)[1]
The plot labeled 'Input' is the histogram of the original data. This is passed through the function $f(x)=2x+1$ which is displayed in the chart on the bottom left. The red lines shows how one value, $x=0$ is passed through the function. Each value from input is passed through in the same way to the output function on the right. For the output I computed the mean by taking the average of all the points, and drew the results with the dotted blue line. A solid blue line shows the actual mean for the point $x=0$. The output looks like a Gaussian, and is in fact a Gaussian. We can see that the variance in the output is larger than the variance in the input, and the mean has been shifted from 0 to 1, which is what we would expect given the transfer function $f(x)=2x+1$ The $2x$ affects the variance, and the $+1$ shifts the mean The computed mean, represented by the dotted blue line, is nearly equal to the actual mean. If we used more points in our computation we could get arbitrarily close to the actual value.
Now let's look at a nonlinear function and see how it affects the probability distribution.
```
def g2(x):
return (np.cos(3*(x/2 + 0.7))) * np.sin(0.3*x) - 1.6*x
plt.figure()
plot_nonlinear_func(data, g2, gaussian)
```
This result may be somewhat surprising to you. The function looks "fairly" linear, but the probability distribution of the output is completely different from a Gaussian. Recall the equations for multiplying two univariate Gaussians:
$$\begin{aligned}
\mu &=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2} \\
\sigma &= \frac{1}{\frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}
\end{aligned}$$
These equations do not hold for non-Gaussians, and certainly do not hold for the probability distribution shown in the 'Output' chart above.
Think of what this implies for the Kalman filter algorithm of the previous chapter. All of the equations assume that a Gaussian passed through the process function results in another Gaussian. If this is not true then all of the assumptions and guarantees of the Kalman filter do not hold. Let's look at what happens when we pass the output back through the function again, simulating the next step time step of the Kalman filter.
```
y = g2(data)
gaussian2 = (np.mean(y), np.var(y))
plt.figure()
plot_nonlinear_func(y, g2, gaussian2)
```
As you can see the probability function is further distorted from the original Gaussian. However, the graph is still somewhat symmetric around x=0, let's see what the mean is.
```
print('input mean, variance: %.4f, %.4f' %
(np.mean(data), np.var(data)))
print('output mean, variance: %.4f, %.4f' %
(np.mean(y), np.var(y)))
```
Let's compare that to the linear function that passes through (-2,3) and (2,-3), which is very close to the nonlinear function we have plotted. Using the equation of a line we have
$$m=\frac{-3-3}{2-(-2)}=-1.5$$
```
def g3(x):
return -1.5 * x
plt.figure()
plot_nonlinear_func(data, g3, gaussian)
out = g3(data)
print('output mean, variance: %.4f, %.4f' %
(np.mean(out), np.var(out)))
```
Although the shapes of the output are very different, the mean and variance of each are almost the same. This may lead us to reasoning that perhaps we can ignore this problem if the nonlinear equation is 'close to' linear. To test that, we can iterate several times and then compare the results.
```
out = g3(data)
out2 = g2(data)
for i in range(10):
out = g3(out)
out2 = g2(out2)
print('linear output mean, variance: %.4f, %.4f' %
(np.average(out), np.std(out)**2))
print('nonlinear output mean, variance: %.4f, %.4f' %
(np.average(out2), np.std(out2)**2))
```
Unfortunately the nonlinear version is not stable. It drifted significantly from the mean of 0, and the variance is half an order of magnitude larger.
I minimized the issue by using a function that is quite close to a straight line. What happens if the function is $y(x)=x^2$?
```
def g3(x):
return -x*x
x0 = (1, 1)
data = normal(loc=x0[0], scale=x0[1], size=500000)
plt.figure()
plot_nonlinear_func(data, g3, gaussian=x0)
```
Despite the curve being smooth and reasonably straight at $x=1$ the probability distribution of the output doesn't look anything like a Gaussian and the computed mean of the output is quite different than the value computed directly. This is not an unusual function - a ballistic object moves in a parabola, and this is the sort of nonlinearity your filter will need to handle. If you recall we've tried to track a ball and failed miserably. This graph should give you insight into why the filter performed so poorly.
## A 2D Example
It is hard to look at probability distributions and reason about what will happen in a filter. So let's think about tracking an aircraft with radar. The estimate may have a covariance that looks like this:
```
import kf_book.nonlinear_internal as nonlinear_internal
nonlinear_internal.plot1()
```
What happens when we try to linearize this problem? The radar gives us a range to the aircraft. Suppose the radar is directly under the aircraft (x=10) and the next measurement states that the aircraft is 3 miles away (y=3). The positions that could match that measurement form a circle with radius 3 miles, like so.
```
nonlinear_internal.plot2()
```
We can see by inspection that the probable position of the aircraft is somewhere near x=11.4, y=2.7 because that is where the covariance ellipse and range measurement overlap. But the range measurement is nonlinear so we have to linearize it. We haven't covered this material yet, but the Extended Kalman filter will linearize at the last position of the aircraft - (10,2). At x=10 the range measurement has y=3, and so we linearize at that point.
```
nonlinear_internal.plot3()
```
Now we have a linear representation of the problem (literally a straight line) which we can solve. Unfortunately you can see that the intersection of the line and the covariance ellipse is a long way from the actual aircraft position.
```
nonlinear_internal.plot4()
```
That sort of error often leads to disastrous results. The error in this estimate is large. But in the next innovation of the filter that very bad estimate will be used to linearize the next radar measurement, so the next estimate is likely to be markedly worse than this one. After only a few iterations the Kalman filter will diverge, and start producing results that have no correspondence to reality.
This covariance ellipse spans miles. I exaggerated the size to illustrate the difficulties of highly nonlinear systems. In real radar tracking problems the nonlinearity is usually not that bad, but the errors will still accumulate. Other systems you may be work could have this amount of nonlinearity - this was not an exaggeration only to make a point. You will always be battling divergence when working with nonlinear systems.
## The Algorithms
You may be impatient to solve a specific problem, and wondering which filter to use. I will quickly survey the options. The subsequent chapters are somewhat independent of each other, and you can fruitfully skip around, though I recommend reading linearly if you truly want to master all of the material.
The workhorses of nonlinear filters are the *linearized Kalman filter* and *extended Kalman filter* (EKF). These two techniques were invented shortly after Kalman published his paper and they have been the main techniques used since then. The flight software in airplanes, the GPS in your car or phone almost certainly use one of these techniques.
However, these techniques are extremely demanding. The EKF linearizes the differential equations at one point, which requires you to find a solution to a matrix of partial derivatives (a Jacobian). This can be difficult or impossible to do analytically. If impossible, you have to use numerical techniques to find the Jacobian, but this is expensive computationally and introduces more error into the system. Finally, if the problem is quite nonlinear the linearization leads to a lot of error being introduced in each step, and the filters frequently diverge. You can not throw some equations into some arbitrary solver and expect to to get good results. It's a difficult field for professionals. I note that most Kalman filtering textbooks merely gloss over the EKF despite it being the most frequently used technique in real world applications.
Recently the field has been changing in exciting ways. First, computing power has grown to the point that we can use techniques that were once beyond the ability of a supercomputer. These use *Monte Carlo* techniques - the computer generates thousands to tens of thousands of random points and tests all of them against the measurements. It then probabilistically kills or duplicates points based on how well they match the measurements. A point far away from the measurement is unlikely to be retained, whereas a point very close is quite likely to be retained. After a few iterations there is a clump of particles closely tracking your object, and a sparse cloud of points where there is no object.
This has two benefits. First, the algorithm is robust even for extremely nonlinear problems. Second, the algorithm can track arbitrarily many objects at once - some particles will match the behavior on one object, and other particles will match other objects. So this technique is often used to track automobile traffic, people in crowds, and so on.
The costs should be clear. It is computationally expensive to test tens of thousands of points for every step in the filter. But modern CPUs are very fast, and this is a good problem for GPUs because the part of the algorithm is parallelizable. Another cost is that the answer is not mathematical. With a Kalman filter my covariance matrix gives me important information about the amount of error in the estimate. The particle filter does not give me a rigorous way to compute this. Finally, the output of the filter is a cloud of points; I then have to figure out how to interpret it. Usually you will be doing something like taking the mean and standard deviations of the points, but this is a difficult problem. There are still many points that do not 'belong' to a tracked object, so you first have to run some sort of clustering algorithm to first find the points that seem to be tracking an object, and then you need another algorithm to produce an state estimate from those points. None of this is intractable, but it is all quite computationally expensive.
Finally, we have a new algorithm called the *unscented Kalman filter* (UKF). It does not require you to find analytic solutions to nonlinear equations, and yet almost always performs better than the EKF. It does well with nonlinear problems - problems where the EKF has significant difficulties. Designing the filter is extremely easy. Some will say the jury is still out on the UKF, but to my mind the UKF is superior in almost every way to the EKF. I suggest that the UKF should be the starting point for any implementation, especially if you are not a Kalman filter professional with a graduate degree in control theory. The main downside is that the UKF can be a few times slower than the EKF, but this really depends on whether the EKF solves the Jacobian analytically or numerically. If numerically the UKF is almost certainly faster. It has not been proven (and probably it cannot be proven) that the UKF always yields more accurate results than the EKF. In practice it almost always does, often significantly so. It is very easy to understand and implement, and I strongly suggest this filter as your starting point.
## Summary
The world is nonlinear, but we only really know how to solve linear problems. This introduces significant difficulties for Kalman filters. We've looked at how nonlinearity affects filtering in 3 different but equivalent ways, and I've given you a brief summary of the major appoaches: the linearized Kalman filter, the extended Kalman filter, the Unscented Kalman filter, and the particle filter.
Until recently the linearized Kalman filter and EKF have been the standard way to solve these problems. They are very difficult to understand and use, and they are also potentially very unstable.
Recent developments have offered what are to my mind superior approaches. The UKF dispenses with the need to find solutions to partial differential equations, yet it is also usually more accurate than the EKF. It is easy to use and understand. I can get a basic UKF going in a few minutes by using FilterPy. The particle filter dispenses with mathimatical modeling completely in favor of a Monte Carlo technique of generating a random cloud of thousands of points. It runs slowly, but it can solve otherwise intractable problems with relative ease.
I get more email about the EKF than anything else; I suspect that this is because most treatments in books, papers, and on the internet use the EKF. If your interest is in mastering the field of course you will want to learn about the EKF. But if you are just trying to get good results I point you to the UKF and particle filter first. They are much easier to implement, understand, and use, and they are typically far more stable than the EKF.
Some will quibble with that advice. A lot of recent publications are devoted to a comparison of the EKF, UKF, and perhaps a few other choices for a given problem. Do you not need to perform a similar comparison for your problem? If you are sending a rocket to Mars then of course you do. You will be balancing issues such as accuracy, round off errors, divergence, mathematical proof of correctness, and the computational effort required. I can't imagine not knowing the EKF intimately.
On the other hand the UKF works spectacularly! I use it at work for real world applications. I mostly haven't even tried to implement an EKF for these applications because I can verify that the UKF is working fine. Is it possible that I might eke out another 0.2% of performance from the EKF in certain situations? Sure! Do I care? No! I completely understand the UKF implementation, it is easy to test and verify, I can pass the code to others and be confident that they can understand and modify it, and I am not a masochist that wants to battle difficult equations when I already have a working solution. If the UKF or particle filters start to perform poorly for some problem then I will turn other to techniques, but not before then. And realistically, the UKF usually provides substantially better performance than the EKF over a wide range of problems and conditions. If "really good" is good enough I'm going to spend my time working on other problems.
I'm belaboring this point because in most textbooks the EKF is given center stage, and the UKF is either not mentioned at all or just given a 2 page gloss that leaves you completely unprepared to use the filter. The UKF is still relatively new, and it takes time to write new editions of books. At the time many books were written the UKF was either not discovered yet, or it was just an unproven but promising curiosity. But I am writing this now, the UKF has had enormous success, and it needs to be in your toolkit. That is what I will spend most of my effort trying to teach you.
## References
<A name="[1]">[1]</A> https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb
| github_jupyter |
# Fig. 6
This notebook load and combine the output files produced by the script `power.py` to produce the figure shown in the paper.
Output from ```power.py``` are saved in pickle files with name `"theta_"+str(theta)+':_eta_'+str(eta)+specifier+".pkl"`.
Multiple outputs at the same theta are saved with different unique identifiers.
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
import pickle
from matplotlib.colors import LogNorm
import os
import re
import sys
sys.path.insert(0, "../../lib") # add the library folder to the path I look for modules
sys.path.insert(0, "../heterogeneity/")
import latexify
from utilities import make_network
#import random_regular
#import dynamical_cavity as cavity
def load_obj(theta,eta,specifier = ''):
name='theta_'+str(theta)+'_eta_'+str(eta)+specifier+'.pkl'
with open(directory+'/data/dic-' + name , 'rb') as f:
return pickle.load(f)
def load_data(directory):
filenames=os.listdir(directory+"/data")
pattern = re.compile("dic-theta_\d*\.\d_eta_\d*\.\d|\d.pkl")
dictnames=[name for name in filenames if pattern.match(name)]# select only dictionary files
print(' Results are available in the files:')
params = []
for filename in dictnames:
params+=[filename.lstrip('dic-theta_').rstrip('.pkl').split('_eta_')]
params = pd.DataFrame(params,columns= ['theta','eta'],dtype=float).sort_values(by = ['theta','eta'])
latexify.latexify(columns = 2)
mean_mean = []
mean_cav = []
for theta, eta in params.values:
dic = load_obj(theta,eta)
mean_mean+= [dic['mean_mean']]
mean_cav += [dic['mean_cav']]
mean_cav = np.array(mean_cav)
mean_mean = np.array(mean_mean)
return params,mean_cav,mean_mean
directory = 'gamma=3'
params,mean_cav,mean_mean = load_data(directory)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
etas = 1-np.array(list(params['eta'][params['theta']==0]))
plt.plot(etas,mean_cav[params['theta']==0],'--^',c = colors[0],label = 'cav.')
plt.plot(etas,mean_mean[params['theta']==0],'s',c = colors[0],mfc= 'w',alpha = 0.5,label = 'nMF')
etas = 1-np.array(list(params['eta'][params['theta']==0.5]))
plt.plot(etas,mean_cav[params['theta']==0.5],'--^',c = colors[2])
plt.plot(etas,mean_mean[params['theta']==0.5],'s',c = colors[2],mfc= 'w',alpha = 0.5)
plt.text(0.65,mean_cav[[(params['theta']==0)&(params['eta']==0.5)]]+0.0,'$\\vartheta = 0$',c = colors[0],fontsize = 12)
plt.text(0.65,mean_cav[[(params['theta']==0.5)&(params['eta']==0.5)]]-0.1, '$\\vartheta/J = 0.5$',c = colors[2],fontsize = 12)
plt.ylabel('$\\langle P\\rangle$',fontsize = 13)
plt.xlabel('$\\eta$',fontsize = 13)
plt.legend(ncol = 2,fontsize = 10.5,numpoints = 1,columnspacing=1)
plt.tight_layout()
#plt.savefig(directory+'_mean_field.pdf')
params,mean_cav,mean_mean = load_data(directory)
params
directory = 'kin=3'
params,mean_cav,mean_mean = load_data(directory)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
params
list(params['eta'][params['theta']==0.5])
etas = 1-np.array(list(params['eta'][params['theta']==0]))
plt.plot(etas,mean_cav[params['theta']==0],'--^',c = colors[0],label = 'cav.')
plt.plot(etas,mean_mean[params['theta']==0],'s',c = colors[0],mfc= 'w',alpha = 0.5,label = 'nMF')
etas = 1-np.array(list(params['eta'][params['theta']==0.5]))
plt.plot(etas,mean_cav[params['theta']==0.5],'--^',c = colors[2])
plt.plot(etas,mean_mean[params['theta']==0.5],'s',c = colors[2],mfc= 'w',alpha = 0.5)
plt.text(0.35,mean_cav[[(params['theta']==0)&(params['eta']==0.3)]]-0.15,'$\\vartheta = 0$',c = colors[0],fontsize = 12)
plt.text(0.35,mean_cav[[(params['theta']==0.5)&(params['eta']==0.3)]]-0.52,'$\\vartheta/J = 0.5$',c = colors[2],fontsize = 12)
plt.ylabel('$\\langle P\\rangle$',fontsize = 13)
plt.xlabel('$\\eta$',fontsize = 13)
plt.legend(ncol = 2,fontsize = 10.5,numpoints = 1)
plt.tight_layout()
#plt.savefig(directory+'_mean_field.pdf')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
colors[0]
```
$$
\frac{1}{2}\left[ 1+P_j \tanh\frac{\beta(\pm J-\theta)}{2}-(1-P_j)\tanh\frac{\beta(\theta)}{2}\right]
$$
```
def mean_field(P, js, T, interaction, N, Ks, theta=0, precision=1e-4, max_iter=50):
"""
Run the dynamical cavity with recursive calls.
:param P_init: list of floats of length N
:param T: float
:param J: sparse.csr_matrix
:param theta: float (in units of 1/sqrt(<K>))
:param max_iter: int
:param precision: float
:return: P_new it is a list of dimensions N which contains the probability of active state for each gene.
In order to help storing, couplings are taken to be +-1, bias is then rescaled by 1/sqrt(<|J_{ij}|>)
"""
avg_degree = np.mean(Ks)
P_new = np.zeros(N)
for count in range(max_iter):
for i,(inter,j) in enumerate(zip(interaction,js)):
P_new[i] = 0.5*(1+np.tanh((sum(inter*P[j])-theta)/2/T/ np.sqrt(avg_degree)))
if max(np.abs(np.array(P) - np.array(P_new))) < precision:
P = P_new
print('finishing after', count, 'iterations')
break
if count == max_iter-1:
print("Maximum number of repetition reached, but target precision has not been reached. ")
P = P_new.copy()
return P
N = 100000
gamma =3#1.81
bias = 0.3 #0.379
#J = make_network(N,gamma,bias)
J = random_regular.make_network(N,3,bias)
N = J.shape[0]
J.data = np.where(J.data > 0, 1, -1)
N = J.shape[0]
J_transpose = J.transpose().tolil()
js = J_transpose.rows # list of list, structure is [el[i]] where el[i]
# is the list of predecessors of gene i ( the index)
interaction = J_transpose.data # list of list, structure is [el[i]]
# where el[i] is the list of predecessors of gene i (interaction strength with sign)
Ks = np.array([len(neigh) for neigh in js]) # in degree of each gene
J0 = 1/np.sqrt(Ks.mean())
```
Comparison mean field vs hetherogeneous. Mean field is:
$$
P_i = \frac{1}{2}\left(1+\tanh \frac{\beta}{2}\sum_j J_{ij}P_j \right)
$$
```
theta=0.
T=0.2
%time P=cavity.cavity(np.random.rand(N), js, T*J0, interaction, N, Ks, theta*J0,J0)
%time P_mean=mean_field(np.random.rand(N), js, T*J0, interaction, N, Ks, theta*J0)
latexify.latexify(columns = 2)
avg_degree = np.mean(Ks)
def plus(P):
return 0.5*(1-np.tanh(theta/np.sqrt(avg_degree)/2/T)+P*(np.tanh((1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))
def minus(P):
return 0.5*(1-np.tanh(theta/np.sqrt(avg_degree)/2/T)+P*(np.tanh((-1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))
h,b = np.histogram(P_mean,np.linspace(0,1,100),density=True)
plt.plot(b[:-1],h,label = 'nMF')
h,b = np.histogram(P,bins = b,density=True)
plt.plot(b[:-1],h,'-',alpha = 1.,label = 'cav.')
plt.legend(loc = 'upper center',ncol = 2,fontsize = 10.5)
'''
x = (1-np.tanh(theta/np.sqrt(avg_degree)/2/T))/(2-(np.tanh((1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))
y = (1-np.tanh(theta/np.sqrt(avg_degree)/2/T))/(2-(np.tanh((-1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))
plt.axvline(x)
plt.axvline(y, ls = ':', c= 'r')
plt.axvline(plus(y), ls = ':', c= 'r')
plt.axvline(minus(x),ls = '--')
plt.axvline(minus(minus(x)),ls = '-.')
plt.axvline(plus(minus(x)),ls = '-.')
'''
plt.semilogy()
plt.xlabel('$P$',fontsize = 13)
plt.ylabel('$\\Pi(P)$',fontsize = 13)
plt.tight_layout()
#plt.savefig('random_regular_comparison.pdf')
#plt.savefig('power_law_comparison.pdf')
np.mean(P),np.mean(P_mean),1-abs(np.mean(P)/np.mean(P_mean))
1/np.sqrt(np.mean(Ks)),1/np.sqrt(3)
_ = plt.hist(Ks,100)
params
```
| github_jupyter |
# Artificial Intelligence Nanodegree
## Machine Translation Project
In this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully!
## Introduction
In this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation.
- **Preprocess** - You'll convert text to sequence of integers.
- **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model!
- **Prediction** Run the model on English text.
```
!git clone https://github.com/ap-nlp-research/aind2-nlp-capstone.git
import os
from tqdm import tqdm, tqdm_notebook
import numpy as np
from tensorflow import keras
import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell
```
### Verify access to the GPU
The following test applies only if you expect to be using a GPU, e.g., while running in a Udacity Workspace or using an AWS instance with GPU support. Run the next cell, and verify that the device_type is "GPU".
- If the device is not GPU & you are running from a Udacity Workspace, then save your workspace with the icon at the top, then click "enable" at the bottom of the workspace.
- If the device is not GPU & you are running from an AWS instance, then refer to the cloud computing instructions in the classroom to verify your setup steps.
```
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
```
## Dataset
We begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset.
### Load Data
The data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below.
```
def load_data(path):
"""
Load dataset
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data.split('\n')
# Load English data
english_sentences = load_data('aind2-nlp-capstone/data/small_vocab_en')
# Load French data
french_sentences = load_data('aind2-nlp-capstone/data/small_vocab_fr')
print('Dataset Loaded')
```
### Files
Each line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file.
```
for sample_i in range(2):
print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i]))
print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i]))
```
From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing.
### Vocabulary
The complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with.
For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words.
## Preprocess
For this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods:
1. Tokenize the words into ids
2. Add padding to make all the sequences the same length.
Time to start preprocessing the data...
### Tokenize (IMPLEMENTATION)
For a neural network to predict on text data, it first has to be turned into data it can understand. Text data like "dog" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s).
We can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those.
Turn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below.
Running the cell will run `tokenize` on sample data and show output for debugging.
```
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
x_tk = keras.preprocessing.text.Tokenizer()
x_tk.fit_on_texts(x)
return x_tk.texts_to_sequences(x), x_tk
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
```
### Padding (IMPLEMENTATION)
When batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length.
Make sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function.
```
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
if length is None:
length = max([len(sentence) for sentence in x])
return keras.preprocessing.sequence.pad_sequences(x, maxlen=length, padding='post')
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
```
### Preprocess Pipeline
Your focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function.
```
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
return preprocess_x, preprocess_y, x_tk, y_tk
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(english_sentences, french_sentences)
max_english_sequence_length = preproc_english_sentences.shape[1]
max_french_sequence_length = preproc_french_sentences.shape[1]
english_vocab_size = len(english_tokenizer.word_index)
french_vocab_size = len(french_tokenizer.word_index)
print('Data Preprocessed')
print("Max English sentence length:", max_english_sequence_length)
print("Max French sentence length:", max_french_sequence_length)
print("English vocabulary size:", english_vocab_size)
print("French vocabulary size:", french_vocab_size)
```
## Models
In this section, you will experiment with various neural network architectures.
You will begin by training four relatively simple architectures.
- Model 1 is a simple RNN
- Model 2 is a RNN with Embedding
- Model 3 is a Bidirectional RNN
- Model 4 is an optional Encoder-Decoder RNN
After experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models.
### Ids Back to Text
The neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network.
```
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
print('`logits_to_text` function loaded.')
def train(train_ops: list, metrics: list, inputs: np.ndarray, targets: np.ndarray, epochs: int, batch_size: int):
n_samples = len(inputs)
n_batches = (n_samples + batch_size - 1) // batch_size
metric_names = [m.op.name for m in metrics]
with tf.get_default_graph().as_default() as graph:
input_tensors = tf.get_collection('inputs')[0]
target_tensors = tf.get_collection('targets')[0]
# Get a TensorFlow session managed by the supervisor.
with tf.Session(graph=graph) as sess:
# Initialize all global variables
_ = sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
for epoc in range(epochs):
pbar = tqdm(range(n_batches), desc="Epoch {}".format(epoc))
for it in pbar:
start = it * batch_size
end = min(n_samples, start + batch_size)
x, y = inputs[start:end], targets[start:end]
_, metrics_output = sess.run([train_ops, metrics], feed_dict={input_tensors: x, target_tensors: y})
pbar.set_postfix(dict([(m, v) for m, v in zip(metric_names, metrics_output)]), refresh=True)
```
### Model 1: RNN (IMPLEMENTATION)

A basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French.
```
def embed_input(x: tf.Tensor, n_words: int, embedding_size: int) -> tf.Tensor:
# Create embedding
emdedded_input = tf.nn.embedding_lookup(params=tf.get_variable(name="embedding", shape=(n_words, embedding_size)),
ids=x)
return emdedded_input
def create_encoder(emdedded_input: tf.Tensor, num_units: int = 64) -> tf.Tensor:
"""
:param x: tf.placeholder or data input
:return:
"""
with tf.variable_scope("encoder"):
cell = GRUCell(num_units=num_units)
_, encoder_final_state = tf.nn.dynamic_rnn(cell=cell, inputs=emdedded_input, dtype=tf.float32)
return encoder_final_state
def create_decoder(encoder_hs: tf.Tensor, sequence_length: int) -> tf.Tensor:
batch_size = tf.shape(encoder_hs)[0]
encoder_units = encoder_hs.get_shape().as_list()[-1]
dtype = encoder_hs.dtype
# create a decoder cell
def teacher_forcing_loop(time, cell_output = None, cell_state = None, loop_state = None):
emit_output = cell_output # == None for time == 0
elements_finished = (time >= sequence_length)
# time == 0 initialize the sequence with encoder hidden state
# otherwise, force the cell output as RNNCell input
if cell_output is None: # time == 0
next_cell_state = encoder_hs
next_input = tf.zeros([batch_size, encoder_units], dtype=dtype)
else:
next_cell_state = cell_state
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, encoder_units], dtype=dtype),
lambda: cell_output)
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
with tf.variable_scope("decoder"):
cell = GRUCell(num_units=encoder_units)
# unroll the sequence reusing cell output as the next input
outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(cell, loop_fn=teacher_forcing_loop)
# outputs provided in the form of tensor array that should be converted back into a tensor
decoder_output = outputs_ta.stack()
return decoder_output
def create_encoder_decoder_model(inputs: tf.Tensor,
source_size: int,
target_size: int,
target_length: int,
embedding_size: int = 16) -> tf.Tensor:
embedded_input = embed_input(inputs, n_words=source_size, embedding_size=embedding_size)
encoder_hs = create_encoder(embedded_input)
decoder_output = create_decoder(encoder_hs, sequence_length=target_length)
logits = tf.layers.Dense(units=target_size)(decoder_output)
return logits
# refresh the graph to make sure nothing was left there from prior runs
tf.reset_default_graph()
inputs = tf.placeholder(tf.int32, [None, max_english_sequence_length], name='inputs')
tf.add_to_collection(name="inputs", value=inputs)
targets = tf.placeholder(tf.int32, [None, max_french_sequence_length], name='targets')
tf.add_to_collection(name="targets", value=targets)
logits = create_encoder_decoder_model(inputs=inputs,
source_size=english_vocab_size+1,
target_size=french_vocab_size+1,
target_length=max_french_sequence_length)
# To be consumed correctly be TF metrics and losses, logits should be transposed [1, 0, 2]
logits = tf.transpose(logits, [1, 0, 2])
# build a loss function
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=targets, logits=logits), name='acc_loss')
# build accuracy metric
predictions = tf.argmax(logits, axis=2, name="prediction")
accuracy, update_count_op = tf.metrics.accuracy(labels=targets, predictions=predictions)
variables = tf.trainable_variables()
gradients = tf.gradients(loss, variables)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# Optimization
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)
update_step = optimizer.apply_gradients(zip(clipped_gradients, variables))
train(train_ops=[update_step, update_count_op, global_step],
metrics=[loss, accuracy],
inputs=preproc_english_sentences,
targets=preproc_french_sentences,
epochs=10,
batch_size=1024)
```
## Prediction (IMPLEMENTATION)
```
def final_predictions(x, y, x_tk, y_tk):
"""
Gets predictions using the final model
:param x: Preprocessed English data
:param y: Preprocessed French data
:param x_tk: English tokenizer
:param y_tk: French tokenizer
"""
# Pass pretrained model
model = bd_embedded_rnn_model
## DON'T EDIT ANYTHING BELOW THIS LINE
y_id_to_word = {value: key for key, value in y_tk.word_index.items()}
y_id_to_word[0] = '<PAD>'
sentence = 'he saw a old yellow truck'
sentence = [x_tk.word_index[word] for word in sentence.split()]
sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')
sentences = np.array([sentence[0], x[0]])
predictions = model.predict(sentences, len(sentences))
print('Sample 1:')
print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))
print('Il a vu un vieux camion jaune')
print('Sample 2:')
print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))
print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))
final_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)
```
## Submission
When you're ready to submit, complete the following steps:
1. Review the [rubric](https://review.udacity.com/#!/rubrics/1004/view) to ensure your submission meets all requirements to pass
2. Generate an HTML version of this notebook
- Run the next cell to attempt automatic generation (this is the recommended method in Workspaces)
- Navigate to **FILE -> Download as -> HTML (.html)**
- Manually generate a copy using `nbconvert` from your shell terminal
```
$ pip install nbconvert
$ python -m nbconvert machine_translation.ipynb
```
3. Submit the project
- If you are in a Workspace, simply click the "Submit Project" button (bottom towards the right)
- Otherwise, add the following files into a zip archive and submit them
- `helper.py`
- `machine_translation.ipynb`
- `machine_translation.html`
- You can export the notebook by navigating to **File -> Download as -> HTML (.html)**.
```
!!python -m nbconvert *.ipynb
```
## Optional Enhancements
This project focuses on learning various network architectures for machine translation, but we don't evaluate the models according to best practices by splitting the data into separate test & training sets -- so the model accuracy is overstated. Use the [`sklearn.model_selection.train_test_split()`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to create separate training & test datasets, then retrain each of the models using only the training set and evaluate the prediction accuracy using the hold out test set. Does the "best" model change?
| github_jupyter |
<a href="http://cocl.us/pytorch_link_top">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
</a>
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
<h1>Convolutional Neural Network with Small Images</h1>
<h2>Table of Contents</h2>
<p>In this lab, we will use a Convolutional Neural Network to classify handwritten digits from the MNIST database. We will reshape the images to make them faster to process </p>
<ul>
<li><a href="#Makeup_Data">Get Some Data</a></li>
<li><a href="#CNN">Convolutional Neural Network</a></li>
<li><a href="#Train">Define Softmax, Criterion function, Optimizer and Train the Model</a></li>
<li><a href="#Result">Analyze Results</a></li>
</ul>
<p>Estimated Time Needed: <strong>25 min</strong> 14 min to train model </p>
<hr>
<h2>Preparation</h2>
```
# Import the libraries we need to use in this lab
# Using the following line code to install the torchvision library
# !conda install -y torchvision
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import matplotlib.pylab as plt
import numpy as np
```
Define the function <code>plot_channels</code> to plot out the kernel parameters of each channel
```
# Define the function for plotting the channels
def plot_channels(W):
n_out = W.shape[0]
n_in = W.shape[1]
w_min = W.min().item()
w_max = W.max().item()
fig, axes = plt.subplots(n_out, n_in)
fig.subplots_adjust(hspace=0.1)
out_index = 0
in_index = 0
#plot outputs as rows inputs as columns
for ax in axes.flat:
if in_index > n_in-1:
out_index = out_index + 1
in_index = 0
ax.imshow(W[out_index, in_index, :, :], vmin=w_min, vmax=w_max, cmap='seismic')
ax.set_yticklabels([])
ax.set_xticklabels([])
in_index = in_index + 1
plt.show()
```
Define the function <code>plot_parameters</code> to plot out the kernel parameters of each channel with Multiple outputs .
```
# Define the function for plotting the parameters
def plot_parameters(W, number_rows=1, name="", i=0):
W = W.data[:, i, :, :]
n_filters = W.shape[0]
w_min = W.min().item()
w_max = W.max().item()
fig, axes = plt.subplots(number_rows, n_filters // number_rows)
fig.subplots_adjust(hspace=0.4)
for i, ax in enumerate(axes.flat):
if i < n_filters:
# Set the label for the sub-plot.
ax.set_xlabel("kernel:{0}".format(i + 1))
# Plot the image.
ax.imshow(W[i, :], vmin=w_min, vmax=w_max, cmap='seismic')
ax.set_xticks([])
ax.set_yticks([])
plt.suptitle(name, fontsize=10)
plt.show()
```
Define the function <code>plot_activation</code> to plot out the activations of the Convolutional layers
```
# Define the function for plotting the activations
def plot_activations(A, number_rows=1, name="", i=0):
A = A[0, :, :, :].detach().numpy()
n_activations = A.shape[0]
A_min = A.min().item()
A_max = A.max().item()
fig, axes = plt.subplots(number_rows, n_activations // number_rows)
fig.subplots_adjust(hspace = 0.4)
for i, ax in enumerate(axes.flat):
if i < n_activations:
# Set the label for the sub-plot.
ax.set_xlabel("activation:{0}".format(i + 1))
# Plot the image.
ax.imshow(A[i, :], vmin=A_min, vmax=A_max, cmap='seismic')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
```
Define the function <code>show_data</code> to plot out data samples as images.
```
def show_data(data_sample):
plt.imshow(data_sample[0].numpy().reshape(IMAGE_SIZE, IMAGE_SIZE), cmap='gray')
# plt.title('y = '+ str(data_sample[1].item()))
plt.title('y = '+ str(data_sample[1]))
```
<!--Empty Space for separating topics-->
<h2 id="Makeup_Data">Get the Data</h2>
we create a transform to resize the image and convert it to a tensor .
```
IMAGE_SIZE = 16
composed = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()])
```
Load the training dataset by setting the parameters <code>train </code> to <code>True</code>. We use the transform defined above.
```
train_dataset = dsets.MNIST(root='../data', train=True, download=True, transform=composed)
```
Load the testing dataset by setting the parameters train <code>False</code>.
```
# Make the validating
validation_dataset = dsets.MNIST(root='../data', train=False, download=True, transform=composed)
```
We can see the data type is long.
```
# Show the data type for each element in dataset
print(train_dataset[1][0].type())
train_dataset[1][1]
```
Each element in the rectangular tensor corresponds to a number representing a pixel intensity as demonstrated by the following image.
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter%206/6.2.1imagenet.png" width="550" alt="MNIST data image">
Print out the fourth label
```
# The label for the fourth data element
train_dataset[3][1]
```
Plot the fourth sample
```
# The image for the fourth data element
show_data(train_dataset[3])
```
The fourth sample is a "1".
<!--Empty Space for separating topics-->
<h2 id="CNN">Build a Convolutional Neural Network Class</h2>
Build a Convolutional Network class with two Convolutional layers and one fully connected layer. Pre-determine the size of the final output matrix. The parameters in the constructor are the number of output channels for the first and second layer.
```
class CNN(nn.Module):
# Contructor
def __init__(self, out_1=16, out_2=32):
super(CNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=out_1, kernel_size=5, padding=2)
self.maxpool1=nn.MaxPool2d(kernel_size=2)
self.cnn2 = nn.Conv2d(in_channels=out_1, out_channels=out_2, kernel_size=5, stride=1, padding=2)
self.maxpool2=nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(out_2 * 4 * 4, 10)
# Prediction
def forward(self, x):
x = self.cnn1(x)
x = torch.relu(x)
x = self.maxpool1(x)
x = self.cnn2(x)
x = torch.relu(x)
x = self.maxpool2(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
# Outputs in each steps
def activations(self, x):
#outputs activation this is not necessary
z1 = self.cnn1(x)
a1 = torch.relu(z1)
out = self.maxpool1(a1)
z2 = self.cnn2(out)
a2 = torch.relu(z2)
out1 = self.maxpool2(a2)
out = out.view(out.size(0),-1)
return z1, a1, z2, a2, out1,out
```
<h2 id="Train">Define the Convolutional Neural Network Classifier, Criterion function, Optimizer and Train the Model</h2>
There are 16 output channels for the first layer, and 32 output channels for the second layer
```
# Create the model object using CNN class
model = CNN(out_1=16, out_2=32)
```
Plot the model parameters for the kernels before training the kernels. The kernels are initialized randomly.
```
# Plot the parameters
plot_parameters(model.state_dict()['cnn1.weight'], number_rows=4, name="1st layer kernels before training ")
plot_parameters(model.state_dict()['cnn2.weight'], number_rows=4, name='2nd layer kernels before training' )
```
Define the loss function, the optimizer and the dataset loader
```
criterion = nn.CrossEntropyLoss()
learning_rate = 0.1
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100)
validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000)
```
Train the model and determine validation accuracy technically test accuracy **(This may take a long time)**
```
# Train the model
n_epochs=3
cost_list=[]
accuracy_list=[]
N_test=len(validation_dataset)
COST=0
def train_model(n_epochs):
for epoch in range(n_epochs):
COST=0
for x, y in train_loader:
optimizer.zero_grad()
z = model(x)
loss = criterion(z, y)
loss.backward()
optimizer.step()
COST+=loss.data
cost_list.append(COST)
correct=0
#perform a prediction on the validation data
for x_test, y_test in validation_loader:
z = model(x_test)
_, yhat = torch.max(z.data, 1)
correct += (yhat == y_test).sum().item()
accuracy = correct / N_test
accuracy_list.append(accuracy)
print('epoch:'+str(epoch)+'/'+str(n_epochs)+' cost: '+str(COST)+' acc: '+str(accuracy))
train_model(n_epochs)
```
<!--Empty Space for separating topics-->
<h2 id="Result">Analyze Results</h2>
Plot the loss and accuracy on the validation data:
```
# Plot the loss and accuracy
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(cost_list, color=color)
ax1.set_xlabel('epoch', color=color)
ax1.set_ylabel('Cost', color=color)
ax1.tick_params(axis='y', color=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color)
ax2.set_xlabel('epoch', color=color)
ax2.plot( accuracy_list, color=color)
ax2.tick_params(axis='y', color=color)
fig.tight_layout()
```
View the results of the parameters for the Convolutional layers
```
# Plot the channels
plot_channels(model.state_dict()['cnn1.weight'])
plot_channels(model.state_dict()['cnn2.weight'])
train_dataset[1]
```
Consider the following sample
```
# Show the second image
show_data(train_dataset[1])
```
Determine the activations
```
# Use the CNN activations class to see the steps
out = model.activations(train_dataset[1][0].view(1, 1, IMAGE_SIZE, IMAGE_SIZE))
```
Plot out the first set of activations
```
# Plot the outputs after the first CNN
plot_activations(out[0], number_rows=4, name="Output after the 1st CNN")
```
The image below is the result after applying the relu activation function
```
# Plot the outputs after the first Relu
plot_activations(out[1], number_rows=4, name="Output after the 1st Relu")
```
The image below is the result of the activation map after the second output layer.
```
# Plot the outputs after the second CNN
plot_activations(out[2], number_rows=32 // 4, name="Output after the 2nd CNN")
```
The image below is the result of the activation map after applying the second relu
```
# Plot the outputs after the second Relu
plot_activations(out[3], number_rows=4, name="Output after the 2nd Relu")
```
We can see the result for the third sample
```
# Show the third image
show_data(train_dataset[2])
# Use the CNN activations class to see the steps
out = model.activations(train_dataset[2][0].view(1, 1, IMAGE_SIZE, IMAGE_SIZE))
# Plot the outputs after the first CNN
plot_activations(out[0], number_rows=4, name="Output after the 1st CNN")
# Plot the outputs after the first Relu
plot_activations(out[1], number_rows=4, name="Output after the 1st Relu")
# Plot the outputs after the second CNN
plot_activations(out[2], number_rows=32 // 4, name="Output after the 2nd CNN")
# Plot the outputs after the second Relu
plot_activations(out[3], number_rows=4, name="Output after the 2nd Relu")
```
Plot the first five mis-classified samples:
```
# Plot the mis-classified samples
count = 0
for x, y in torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=1):
z = model(x)
_, yhat = torch.max(z, 1)
if yhat != y:
show_data((x, y))
plt.show()
print("yhat: ",yhat)
count += 1
if count >= 5:
break
```
<!--Empty Space for separating topics-->
<a href="http://cocl.us/pytorch_link_bottom">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
</a>
<h2>About the Authors:</h2>
<a href="https://www.linkedin.com/in/joseph-s-50398b136/">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/">Michelle Carey</a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
Thanks to Magnus <a href="http://www.hvass-labs.org/">Erik Hvass Pedersen</a> whose tutorials helped me understand convolutional Neural Network
<hr>
Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
| github_jupyter |
# Classifying Business Documents using Deep Learning
## IBM Coursera Advanced Data Science Capstone - Results Demo
## Sumudu Tennakoon
```
import pandas as pd
import numpy as np
import sys
import os
import re
import matplotlib.pyplot as plt
from datetime import date
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
print('TensorFlow Version: ', tf.__version__)
from DocumentClassifierV1 import * # Custom library created for the Capstone project.
```
## 1. Read Pre-saved Input dataset (Test Sample-not used in modeling)
```
DocumentFilesData = pd.read_pickle('Data/DocumentClassification_IBM_ADV_DS_Capstone_TestSample_128x128_20190316.pkl')
```
## 2. Organize Classs Labels
```
ClassLabels = list(DocumentFilesData.FileClass.unique())
ClassNumbers = list(range(len(ClassLabels)))
ClassLabelMap = list((zip(ClassLabels, ClassNumbers)))
print(ClassLabelMap)
for clm in ClassLabelMap:
DocumentFilesData.loc[DocumentFilesData['FileClass']==clm[0] , 'ClassNumber'] = clm[1]
```
## 3. Separate Features and Response
```
NClasses = len(ClassLabels)
imgRows = 128
imgCols = 128
X = np.asarray(list(DocumentFilesData['DocumentMatrix'].values), dtype ='int')
y = DocumentFilesData['ClassNumber'].values
#Shape of datasets
print(X.shape)
print(y.shape)
```
## 4. Plot sample image
```
#Plot sample image with scale
plt.imshow(X[10000])
plt.colorbar()
```
## 5. Send data into the Model
```
if keras.backend.image_data_format() == 'channels_first':
X = X.reshape(X.shape[0], 1, imgRows, imgCols)
input_shape = (1, imgRows, imgCols)
else:
X = X.reshape(X.shape[0], imgRows, imgCols, 1)
input_shape = (imgRows, imgCols, 1)
X = X.astype('float32') #convert interger image tensor to float
X = X/255 # Normalize grayscale to a number between 0 and 1
print(X.shape[0], 'samples')
# Record actuals
y_act = y
y = keras.utils.to_categorical(y, NClasses)
ClassificationModel = TFModel(ModelFile='Models/DocumentClassification_IBM_ADV_DS_Capstone_CNN_V03_128x128_20190316.pkl', Model=keras.models.load_model('Models/DocumentClassification_IBM_ADV_DS_Capstone_CNN_V03_128x128_20190316.h5'))
Output = ClassificationModel.Classify(InputFiles=X, size=(imgRows,imgCols), ActualClasses=list(y_act),
ReturnImageMatrix=True, ReturnJSON=False, ReturnFullPath=True, TransformedData=True)
```
## 6. Proces output
```
Output['actual'] = Output['actual'].astype('int')
for clm in ClassLabelMap:
Output.loc[Output['actual']==clm[1] , 'actual'] = clm[0]
Output.head()
```
## 7. Performance Evaluation
### Confusion Matrix
```
cf = pd.crosstab(Output.actual, Output.prediction, margins=True)
cf
import seaborn as sns
sns.heatmap(pd.crosstab(Output.actual, Output.prediction, margins=False), annot=True)
```
### Accuracy
```
CorrectPredictions = np.sum(np.diagonal(pd.crosstab(Output.actual, Output.prediction, margins=False).values))
TotalDocuments = np.sum(pd.crosstab(Output.actual, Output.prediction, margins=False).values)
Accuracy = CorrectPredictions/TotalDocuments
print('CorrectPredictions= {}'.format(CorrectPredictions))
print('TotalDocuments= {}'.format(TotalDocuments))
print('Accuracy= {}'.format(Accuracy))
```
### Model Robustness
```
bins=np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
labels=np.array([ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
Output['MaxProbabilityScore']=pd.cut(Output.probability, bins=bins) #, labels=labels)
Output['PredictedCorrect'] = np.where(Output['actual']==Output['prediction'], 1, 0)
Robustness = Output.groupby(by='MaxProbabilityScore').agg({'probability':'mean', 'PredictedCorrect':'sum', 'filename':'count'})
Robustness.columns = ['MeanProbability', 'PredictedCorrect', 'BucketCount']
Robustness['BucketPrecision']=Robustness['PredictedCorrect']/Robustness['BucketCount']
Robustness['BucketFraction']=Robustness['BucketCount']/(Robustness['BucketCount'].sum())
Robustness
```
## 8. Run the model on sample Image file
```
InputFiles = ['Data/test1.png']
Output_single = ClassificationModel.Classify(InputFiles=InputFiles, size=(imgRows,imgCols), ActualClasses=None,
ReturnImageMatrix=True, ReturnJSON=True, ReturnFullPath=False, TransformedData=False)
Output_single
OutputDashboard = Dashboard()
fig = OutputDashboard.ImageOutput(Output_single, NSamples=1, Format='JSON', ClassLabels=ClassificationModel.ClassLabels)
plt.show()
```
<hr>
<p> This notebook and related materials were developed by <b> Sumudu Tennakoon</b> for the capstone project in partial fulfillment of the requirements for the <b> Advanced Data Science with IBM Specialization</b>. <br>
March 2019. <br>
Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)</p>
| github_jupyter |
# Louvain Community Detection
In this notebook, we will use cuGraph to identify the cluster in a test graph using the Louvain algorithm
Notebook Credits
* Original Authors: Bradley Rees and James Wyles
* Created: 08/01/2019
* Last Edit: 03/03/2020
RAPIDS Versions: 0.13
Test Hardware
* GV100 32G, CUDA 10.2
## Introduction
The Louvain method of community detection is a greedy heirarical clsutering algorithm which seeks to optimize Modularity as it progresses. Louvain starts with each vertex in its own clusters and iteratively merges groups using graph contraction.
For a detailed description of the algorithm see: https://en.wikipedia.org/wiki/Louvain_Modularity
It takes as input a cugraph.Graph object and returns as output a
cudf.Dataframe object with the id and assigned partition for each
vertex as well as the final modularity score
To compute the Louvain cluster in cuGraph use: <br>
__df, mod = cugraph.louvain(G)__
Parameters
----------
input_graph : cugraph.Graph
cuGraph graph descriptor, should contain the connectivity information
as an edge list. The adjacency list will be computed if not already present.
The graph should be undirected where an undirected edge is represented by a
directed edge in both direction.
max_iter : integer
This controls the maximum number of levels/iterations of the Louvain
algorithm. When specified the algorithm will terminate after no more
than the specified number of iterations. No error occurs when the
algorithm terminates early in this manner.
Returns
-------
parts : cudf.DataFrame
A GPU data frame of size V containing two columns the vertex id and the
partition id it is assigned to.
modularity_score : float
a floating point number containing the modularity score of the
partitioning.
All vertices with the same partition ID are in the same cluster
#### Note
Parallel Louvain produces different modularity scores that seriel Louvain. A complete technical write-up is being produced and will be linked here when available.
### References
* Blondel, V. D., Guillaume, J.-L., Lambiotte, R., and Lefebvre, E. Fast unfolding of communities in large networks. Journal of statistical mechanics: theory and experiment 2008, 10 (2008), P10008.
## cuGraph Notice
The current version of cuGraph has some limitations:
* Vertex IDs need to be 32-bit integers.
* Vertex IDs are expected to be contiguous integers starting from 0.
cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon.
### Test Data
We will be using the Zachary Karate club dataset
*W. W. Zachary, An information flow model for conflict and fission in small groups, Journal of
Anthropological Research 33, 452-473 (1977).*

### Prep
```
# Import needed libraries
import cugraph
import cudf
```
## Read data using cuDF
```
# Test file
datafile='../data//karate-data.csv'
# read the data using cuDF
gdf = cudf.read_csv(datafile, delimiter='\t', names=['src', 'dst'], dtype=['int32', 'int32'] )
# The algorithm also requires that there are vertex weights. Just use 1.0
gdf["data"] = 1.0
# just for fun, let's look at the data types in the dataframe
gdf.dtypes
# create a Graph - since the data does not start at '0', use the auto-renumbering feature
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst', edge_attr='data', renumber=True)
# Call Louvain on the graph
df, mod = cugraph.louvain(G)
# Print the modularity score
print('Modularity was {}'.format(mod))
print()
df.dtypes
# How many partitions where found
part_ids = df["partition"].unique()
print(str(len(part_ids)) + " partition detected")
# print the clusters.
for p in range(len(part_ids)):
part = []
for i in range(len(df)):
if (df['partition'][i] == p):
part.append(df['vertex'][i] )
print("Partition " + str(p) + ":")
print(part)
```
___
Copyright (c) 2019, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
___
| github_jupyter |
```
# default_exp callback.noisy_student
```
# Noisy student
> Callback to apply noisy student self-training (a semi-supervised learning approach) based on: Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.preprocessing import *
from tsai.data.transforms import *
from tsai.models.layers import *
from fastai.callback.all import *
#export
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
#export
# This is an unofficial implementation of noisy student based on:
# Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification.
# In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).
# Official tensorflow implementation available in https://github.com/google-research/noisystudent
class NoisyStudent(Callback):
"""A callback to implement the Noisy Student approach. In the original paper this was used in combination with noise:
- stochastic depth: .8
- RandAugment: N=2, M=27
- dropout: .5
Steps:
1. Build the dl you will use as a teacher
2. Create dl2 with the pseudolabels (either soft or hard preds)
3. Pass any required batch_tfms to the callback
"""
def __init__(self, dl2:DataLoader, bs:Optional[int]=None, l2pl_ratio:int=1, batch_tfms:Optional[list]=None, do_setup:bool=True,
pseudolabel_sample_weight:float=1., verbose=False):
r'''
Args:
dl2: dataloader with the pseudolabels
bs: batch size of the new, combined dataloader. If None, it will pick the bs from the labeled dataloader.
l2pl_ratio: ratio between labels and pseudolabels in the combined batch
batch_tfms: transforms applied to the combined batch. If None, it will pick the batch_tfms from the labeled dataloader (if any)
do_setup: perform a transform setup on the labeled dataset.
pseudolabel_sample_weight: weight of each pseudolabel sample relative to the labeled one of the loss.
'''
self.dl2, self.bs, self.l2pl_ratio, self.batch_tfms, self.do_setup, self.verbose = dl2, bs, l2pl_ratio, batch_tfms, do_setup, verbose
self.pl_sw = pseudolabel_sample_weight
def before_fit(self):
if self.batch_tfms is None: self.batch_tfms = self.dls.train.after_batch
self.old_bt = self.dls.train.after_batch # Remove and store dl.train.batch_tfms
self.old_bs = self.dls.train.bs
self.dls.train.after_batch = noop
if self.do_setup and self.batch_tfms:
for bt in self.batch_tfms:
bt.setup(self.dls.train)
if self.bs is None: self.bs = self.dls.train.bs
self.dl2.bs = min(len(self.dl2.dataset), int(self.bs / (1 + self.l2pl_ratio)))
self.dls.train.bs = self.bs - self.dl2.bs
pv(f'labels / pseudolabels per training batch : {self.dls.train.bs} / {self.dl2.bs}', self.verbose)
rel_weight = (self.dls.train.bs/self.dl2.bs) * (len(self.dl2.dataset)/len(self.dls.train.dataset))
pv(f'relative labeled/ pseudolabel sample weight in dataset: {rel_weight:.1f}', self.verbose)
self.dl2iter = iter(self.dl2)
self.old_loss_func = self.learn.loss_func
self.learn.loss_func = self.loss
def before_batch(self):
if self.training:
X, y = self.x, self.y
try: X2, y2 = next(self.dl2iter)
except StopIteration:
self.dl2iter = iter(self.dl2)
X2, y2 = next(self.dl2iter)
if y.ndim == 1 and y2.ndim == 2: y = torch.eye(self.learn.dls.c)[y].to(device) # ensure both
X_comb, y_comb = concat(X, X2), concat(y, y2)
if self.batch_tfms is not None:
X_comb = compose_tfms(X_comb, self.batch_tfms, split_idx=0)
y_comb = compose_tfms(y_comb, self.batch_tfms, split_idx=0)
self.learn.xb = (X_comb,)
self.learn.yb = (y_comb,)
pv(f'\nX: {X.shape} X2: {X2.shape} X_comb: {X_comb.shape}', self.verbose)
pv(f'y: {y.shape} y2: {y2.shape} y_comb: {y_comb.shape}', self.verbose)
def loss(self, output, target):
if target.ndim == 2: _, target = target.max(dim=1)
if self.training and self.pl_sw != 1:
loss = (1 - self.pl_sw) * self.old_loss_func(output[:self.dls.train.bs], target[:self.dls.train.bs])
loss += self.pl_sw * self.old_loss_func(output[self.dls.train.bs:], target[self.dls.train.bs:])
return loss
else:
return self.old_loss_func(output, target)
def after_fit(self):
self.dls.train.after_batch = self.old_bt
self.learn.loss_func = self.old_loss_func
self.dls.train.bs = self.old_bs
self.dls.bs = self.old_bs
from tsai.data.all import *
from tsai.models.all import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, return_split=False)
tfms = [None, Categorize()]
dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=[TSStandardize(), TSRandomSize(.5)])
pseudolabeled_data = X
soft_preds = True
pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
dl2 = TSDataLoader(dsets2)
model = create_model(InceptionTime, dls=dls)
noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
learn = Learner(dls, model, cbs=noisy_student_cb, metrics=accuracy)
learn.fit_one_cycle(1)
pseudolabeled_data = X
soft_preds = False
pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
dl2 = TSDataLoader(dsets2)
model = create_model(InceptionTime, dls=dls)
noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
learn = Learner(dls, model, cbs=noisy_student_cb, metrics=accuracy)
learn.fit_one_cycle(1)
#hide
out = create_scripts(); beep(out)
```
| github_jupyter |
# Numpy
The basis of most scientific programming in Pyhton is the *numerical Python* library, `numpy`. NumPy gives us many tools - including a fast and efficient data type, the `numpy Array` - for working with numerical data.
## Numpy Array
NumPy is built around the `array`. This is a data structure defined in NumPy which is *ordered* and *mutable*, must like the `list`. Although very similar to the list, the numpy array only allows *numerical* data as elements, like the `int` and `float`. Let's explore!
```
# Frist we need to import the numpy package. It is commonly shortened to "np"
import numpy as np
```
The easiest way to define numpy arrays is to define a list or tuple, and convert it to an array with the `numpy.array()` function.
```
a = [0, 1, 2, 3, 4]
b = np.array(a)
print(type(a))
print(type(b))
```
We can index and slice numpy arrays much like lists:
```
print(b[0], b[1:3], b[-1])
```
Try running the following to get help on the NumPy array
```
help(np.ndarray)
```
Woah. That's a really long help page. Often when you are working with a new package, `help()` won't be the most convenient or easy to read way to get help. Instead, we can search for online *documentation* for the package we are using.
If you Google **numpy documentation**, you will likely see links to info about *numpy* and another package we will explore later, *scipy*. If you follow the links to **NumPy**, you should find a [NumPy user Guide](https://docs.scipy.org/doc/numpy-1.15.0/user/index.html) and from there, several pages of tutorials and documentation about the package. The [Quickstart tutorial](https://docs.scipy.org/doc/numpy-1.15.0/user/quickstart.html), will give a much more legible intro to the package.
## Numpy Attributes
NumPy arrays have some built in **attributes**, i.e. info stored in an object, accessible with `object.attribute` (note: no parentheses after).
```
# Let's print some attributes of our b array
print("Num dimensions:", b.ndim,
"\nShape:", b.shape,
"\nSize:", b.size)
```
A common way to define NumPy arrays with with the `arange` function.
```
np.arange(10)
help(np.arange)
```
The numpy `arange` function allows us to quickly build integer arrays. It takes `start`, `stop`, and `step` as arguments.
```
x = np.arange(1, 10)
y = np.arange(2, 20, 2)
print(x)
print(y)
```
We can apply any mathematical operation to a NumPy array, and it will apply that operation to every element in the array.
```
x = np.arange(-3, 4)
y = x**2
print(y)
```
Another way to make NumPy arrays is with the `linspace()` function. This allows us to choose the bounds of an interval and the number of points we want to divide it into. Numpy also has useful math constants like `pi` and `e` and math functions like `sin`, `cos`, `tan`.
```
import matplotlib.pyplot as plt
x = np.linspace(-2*np.pi, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
# Linspace can be useful for adding more resolution to continuous functions
xarange = np.arange(-np.pi, np.pi)
yarange = np.cos(xarange)
xlinspace = np.linspace(-np.pi, np.pi, 1000)
ylinspace = np.cos(xlinspace)
plt.subplot(1, 2, 1)
plt.plot(xarange, yarange)
plt.subplot(1, 2, 2)
plt.plot(xlinspace, ylinspace)
```
If we want to plot a bell curve we can use the `np.random` module to randomly sample a normal distribution.
```
norm = np.random.standard_normal(100000) # Draw 1000 random points from normal distribution
hist, bins = np.histogram(norm, bins=10, density=True) # Make histogram of our samples
plt.plot(bins[1:], hist)
hist, bins = np.histogram(norm, bins=100, density=True)
plt.plot(bins[1:], hist)
```
This is barely scratching the surface of the `numpy` package, but should be enough to get you started. The [Quickstart tutorial](https://docs.scipy.org/doc/numpy-1.15.0/user/quickstart.html) is a great resource for more of the basics and some more advanced usage. Finally, don't forget to use the most powerful tool at our disposal: *Google*. Most programmers only have the most common syntax memorized, everything else can be found with Google!
Next we will further explore the `matplotlib` package that we briefly introduced above!
| github_jupyter |
# Adquisiรณn de datos `DIRECTO`
- [X] descarga directa
- peticiรณn GET a travรฉs de API de terceros (ej. AEMET, Ayto. Barcelona....)
- web crawling (que es una prรกctica ilegal...pero muy de moda entre los hackers!?ยฟ!)
***
## Primer paso
Es trabajar con los datos en formato `JSON`
```
# Primero vamos a entender el funcionamiento del JSON a trรกves los diccionarios (dict)
# Construimos un diccionario de ejemplo y mostramos el tipo de datos y el contenido de la variable.
diccionario_ejemplo = {"nombre": "Yann", "apellidos": {"apellido1": "LeCun", "apellido2": "-"}, "edad": 56}
print(type(diccionario_ejemplo))
print(diccionario_ejemplo)
# Construimos una lista de ejemplo y mostramos el tipo de datos y el contenido de la variable.
lista_ejemplo = [1, 2, 3]
print(type(lista_ejemplo))
print(lista_ejemplo)
nested_dict = [diccionario_ejemplo]
nested_dict
print(type(nested_dict))
nested_dict
type(nested_dict[0])
# Trataremos los json a parte
import json
# Mostramos la representaciรณn del json del diccionario
json_dict = json.dumps(diccionario_ejemplo)
# Mostramos su estructura
print(type(json_dict))
print(json_dict)
# Mostramos la representaciรณn del json de la lista
json_list = json.dumps(lista_ejemplo)
print(type(json_list))
print(json_list)
```
Este proceso a travรฉs de la funciรณn `json.dumps` del json, es **serializar** el objeto, en este caso siempre serรก en formato 'string'.
***
El proceso inverso conocido como **deserializar** crea objetos Python en `list`y `dict` a travรฉs de la funciรณn `json.loads`
```
# Como el caso anterior convertimos los JSONs en dict y list
json2dict = json.loads(json_dict)
print(json2dict)
print(type(json2dict))
# No podemos convertir a json datos en lista o diccionarios, tienen que ser en formato o class STR, BYTES o BYTEARRAY
json2dict_2 = json.loads(nested_dict)
# Convertimos el objeto (anteriormente en lista) de json a list
json2list = json.loads(json_list)
print(json2list)
print(type(json2list))
```
***
Para mejorar la legibilidad de los datos que obtendremos de las API, definiremos una funciรณn que mostrarรก cadenas JSON por pantalla formateadas para mejorar la lectura. La funciรณn aceptarรก tanto cadenas de carรกcteres con contenido JSON como objetos Python, y mostrarรก el contenido por pantalla.
Ademรกs, la funciรณn recibirรก un parรกmetro opcional que nos permitirรก indicar el nรบmero mรกximo de lรญneas que hay que mostrar. Asรญ, podremos usar la funciรณn para visualizar las primeras lรญneas de un JSON largo, sin tener que mostrar el JSON completo por pantalla.
```
# Definimos una funciรณn `json_print` que tiene un parรกmetro (json_data) y uno opcional (limit)
# El parรกmetro sort_keys FALSE para ordenar o no alfabeticamente
# el parรกmetro indent para buscar entre los anidados (niveles)
def json_print(json_data, limit=None):
if isinstance(json_data, (str)):
json_data = json.loads(json_data)
nice = json.dumps(json_data, sort_keys=False, indent=3, separators=(',',':'))
print("\n".join(nice.split("\n")[0:limit]))
if limit is not None:
print("[....]")
# Aplicamos la funciรณn a un tweet
tweet = {
"created_at": "Thu Apr 06 15:24:15 +0000 2017",
"id_str": "850006245121695744",
"text": "1\/ Today we\u2019re sharing our vision for the future of the Twitter API platform!\nhttps:\/\/t.co\/XweGngmxlP",
"user": {
"id": 2244994945,
"name": "Twitter Dev",
"screen_name": "TwitterDev",
"location": "Internet",
"url": "https:\/\/dev.twitter.com\/",
"description": "Your official source for Twitter Platform news, updates & events. Need technical help? Visit https:\/\/twittercommunity.com\/ \u2328\ufe0f #TapIntoTwitter"
},
"place": {
},
"entities": {
"hashtags": [
],
"urls": [
{
"url": "https:\/\/t.co\/XweGngmxlP",
"unwound": {
"url": "https:\/\/cards.twitter.com\/cards\/18ce53wgo4h\/3xo1c",
"title": "Building the Future of the Twitter API Platform"
}
}
],
"user_mentions": [
]
}
}
tweet
type(tweet)
# Convertimos este tweet en json
json_print(tweet)
print(json_dict)
print(type(json_dict))
print(diccionario_ejemplo)
print(type(diccionario_ejemplo))
json_print(diccionario_ejemplo)
json_print(lista_ejemplo)
diccionario_ejemplo
print(type(json_print(diccionario_ejemplo, 3)))
```
| github_jupyter |
# Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
```
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
```
## Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
```
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
```
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
```
text[:100]
```
### Tokenization
In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
```
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
```
And we can see those same characters from above, encoded as integers.
```
encoded[:100]
```
## Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
```
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
```
## Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
### Creating Batches
**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **
Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
**2. After that, we need to split `arr` into $N$ batches. **
You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
**3. Now that we have this array, we can iterate through it to get our mini-batches. **
The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
```
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size * seq_length
# total number of batches we can make
n_batches = len(arr)//batch_size_total
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size_total]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
# iterate through the array, one sequence at a time
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
```
### Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
```
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
```
If you implemented `get_batches` correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `drop_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
```python
self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
```
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
```python
self.init_hidden()
```
```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the LSTM
self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## TODO: define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## TODO: define the final, fully-connected output layer
self.fc = nn.Linear(n_hidden, len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x, hidden)
## TODO: pass through a dropout layer
out = self.dropout(r_output)
# Stack up LSTM outputs using view
# you may need to use contiguous to reshape the output
out = out.contiguous().view(-1, self.n_hidden)
## TODO: put x through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
```
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
```
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
```
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
```
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
batch_size = 128
seq_length = 100
n_epochs = 20 # start smaller if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
```
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
```
# change the name, for saving multiple files
model_name = 'rnn_20_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
```
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
```
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
```
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
```
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
```
## Loading a checkpoint
```
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_20_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
```
| github_jupyter |
# Ray RLlib - Sample Application: CartPole
ยฉ 2019-2021, Anyscale. All Rights Reserved

We were briefly introduced to the `CartPole` example and the OpenAI gym `CartPole-v1` environment ([gym.openai.com/envs/CartPole-v1/](https://gym.openai.com/envs/CartPole-v1/)) in the [reinforcement learning introduction](../01-Introduction-to-Reinforcement-Learning.ipynb). This lesson uses [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) to train a policy for `CartPole`.
Recall that the `gym` Python module provides MDP interfaces to a variety of simulators, like the simple simulator for the physics of balancing a pole on a cart that is used by the CartPole environment. The `CartPole` problem is described at https://gym.openai.com/envs/CartPole-v1.

([source](https://gym.openai.com/envs/CartPole-v1/))
Even though this is a relatively simple and quick example to run, its results can be understood quite visually. `CartPole` is one of OpenAI Gym's ["classic control"](https://gym.openai.com/envs/#classic_control) examples.
For more background about this problem, see:
* ["Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem"](https://ieeexplore.ieee.org/document/6313077), AG Barto, RS Sutton, and CW Anderson, *IEEE Transactions on Systems, Man, and Cybernetics* (1983). The same Sutton and Barto who wrote [*Reinforcement Learning: An Introduction*](https://mitpress.mit.edu/books/reinforcement-learning-second-edition).
* ["Cartpole - Introduction to Reinforcement Learning (DQN - Deep Q-Learning)"](https://towardsdatascience.com/cartpole-introduction-to-reinforcement-learning-ed0eb5b58288), [Greg Surma](https://twitter.com/GSurma).
First, import Ray and the PPO module in RLlib, then start Ray.
```
import ray
import ray.rllib.agents.ppo as ppo
import pandas as pd
import json
import os
import shutil
import sys
```
Model *checkpoints* will get saved after each iteration into directories under `tmp/ppo/cart`, i.e., relative to this directory.
The default directories for checkpoints are `$HOME/ray_results/<algo_env>/.../checkpoint_N`.
> **Note:** If you prefer to use a different directory root, change it in the next cell _and_ in the `rllib rollout` command below.
```
checkpoint_root = "tmp/ppo/cart"
```
Clean up output of previous lessons (optional):
```
# Where checkpoints are written:
shutil.rmtree(checkpoint_root, ignore_errors=True, onerror=None)
# Where some data will be written and used by Tensorboard below:
ray_results = f'{os.getenv("HOME")}/ray_results/'
shutil.rmtree(ray_results, ignore_errors=True, onerror=None)
```
Start Ray:
```
info = ray.init(ignore_reinit_error=True)
```
The Ray Dashboard is useful for monitoring Ray:
```
print("Dashboard URL: http://{}".format(info["webui_url"]))
```
Next we'll train an RLlib policy with the [`CartPole-v1` environment](https://gym.openai.com/envs/CartPole-v1/).
If you've gone through the _Multi-Armed Bandits_ lessons, you may recall that we used [Ray Tune](http://tune.io), the Ray Hyperparameter Tuning system, to drive training. Here we'll do it ourselves.
By default, training runs for `10` iterations. Increase the `N_ITER` setting if you want to train longer and see the resulting rewards improve. However, if the max score of `200` is achieved early, you can use a smaller number of iterations.
- `num_workers` is the number of actors that the agent will create. This determines the degree of parallelism that will be used. In a cluster, these actors will be spread over the available nodes.
- `num_sgd_iter` is the number of epochs of SGD (stochastic gradient descent, i.e., passes through the data) that will be used to optimize the PPO surrogate objective at each iteration of PPO, for each _minibatch_ ("chunk") of training data. Using minibatches is more efficient than training with one record at a time.
- `sgd_minibatch_size` is the SGD minibatch size (batches of data) that will be used to optimize the PPO surrogate objective.
- `model` contains a dictionary of parameters describing the neural net used to parameterize the policy. The `fcnet_hiddens` parameter is a list of the sizes of the hidden layers. Here, we have two hidden layers of size 100, each.
- `num_cpus_per_worker` when set to 0 prevents Ray from pinning a CPU core to each worker, which means we could run out of workers in a constrained environment like a laptop or a cloud VM.
> **Note:** If you change the values shown for `config['model']['fcnet_hiddens']`, make the same change in the `rllib rollout` command below!
```
SELECT_ENV = "CartPole-v1" # Specifies the OpenAI Gym environment for Cart Pole
N_ITER = 10 # Number of training runs.
config = ppo.DEFAULT_CONFIG.copy() # PPO's default configuration. See the next code cell.
config["log_level"] = "WARN" # Suppress too many messages, but try "INFO" to see what can be printed.
# Other settings we might adjust:
config["num_workers"] = 1 # Use > 1 for using more CPU cores, including over a cluster
config["num_sgd_iter"] = 10 # Number of SGD (stochastic gradient descent) iterations per training minibatch.
# I.e., for each minibatch of data, do this many passes over it to train.
config["sgd_minibatch_size"] = 250 # The amount of data records per minibatch
config["model"]["fcnet_hiddens"] = [100, 50] #
config["num_cpus_per_worker"] = 0 # This avoids running out of resources in the notebook environment when this cell is re-executed
```
Out of curiousity, let's see what configuration settings are defined for PPO. Note in particular the parameters for the deep learning `model`:
```
ppo.DEFAULT_CONFIG
agent = ppo.PPOTrainer(config, env=SELECT_ENV)
results = []
episode_data = []
episode_json = []
for n in range(N_ITER):
result = agent.train()
results.append(result)
episode = {
"n": n,
"episode_reward_min": result["episode_reward_min"],
"episode_reward_mean": result["episode_reward_mean"],
"episode_reward_max": result["episode_reward_max"],
"episode_len_mean": result["episode_len_mean"],
}
episode_data.append(episode)
episode_json.append(json.dumps(episode))
file_name = agent.save(checkpoint_root)
print(f'{n:3d}: Min/Mean/Max reward: {result["episode_reward_min"]:8.4f}/{result["episode_reward_mean"]:8.4f}/{result["episode_reward_max"]:8.4f}. Checkpoint saved to {file_name}')
```
The episode rewards should increase after multiple iterations. Try tweaking the config parameters. Smaller values for the `num_sgd_iter`, `sgd_minibatch_size`, or the `model`'s `fcnet_hiddens` will train faster, but take longer to improve the policy.
```
df = pd.DataFrame(data=episode_data)
df
df.plot(x="n", y=["episode_reward_mean", "episode_reward_min", "episode_reward_max"], secondary_y=True)
```
Also, print out the policy and model to see the results of training in detailโฆ
```
import pprint
policy = agent.get_policy()
model = policy.model
pprint.pprint(model.variables())
pprint.pprint(model.value_function())
print(model.base_model.summary())
```
## Rollout
Next we'll use the [RLlib rollout CLI](https://ray.readthedocs.io/en/latest/rllib-training.html#evaluating-trained-policies), to evaluate the trained policy.
This visualizes the `CartPole` agent operating within the simulation: moving the cart left or right to avoid having the pole fall over.
We'll use the last saved checkpoint, `checkpoint_10` (or whatever you set for `N_ITER` above) for the rollout, evaluated through `2000` steps.
> **Notes:**
>
> 1. If you changed `checkpoint_root` above to be different than `tmp/ppo/cart`, then change it here, too. Note that bugs in variable substitution in Jupyter notebooks, we can't use variables in the next cell, unfortunately.
> 2. If you changed the model parameters, specifically the `fcnet_hiddens` array in the `config` object above, make the same change here.
You may need to make one more modification, depending on how you are running this tutorial:
1. Running on your laptop? - Remove the line `--no-render`.
2. Running on the Anyscale Service? The popup windows that would normally be created by the rollout can't be viewed in this case. Hence, the `--no-render` flag suppresses them. The code cell afterwords provides a sample video. You can try adding `--video-dir tmp/ppo/cart`, which will generate MP4 videos, then download them to view them. Or copy the `Video` cell below and use it to view the movies.
```
!rllib rollout tmp/ppo/cart/checkpoint_10/checkpoint-10 \
--config "{\"env\": \"CartPole-v1\", \"model\": {\"fcnet_hiddens\": [100, 50]}}" \
--run PPO \
--no-render \
--steps 2000
```
Here is a sample episode.
> **Note:** This video was created by running the previous `rllib rollout` command with the argument `--video-dir some_directory`. It creates one video per episode.
```
from IPython.display import Video
cart_pole_sample_video = "../images/rllib/Cart-Pole-Example-Video.mp4"
Video(cart_pole_sample_video)
```
Finally, launch [TensorBoard](https://ray.readthedocs.io/en/latest/rllib-training.html#getting-started). Select the Cart Pole runs and visualize the key metrics from training with RLlib.
```shell
tensorboard --logdir=$HOME/ray_results
```
```
ray.shutdown()
```
| github_jupyter |
```
#่งฃๅไธไธ็ฅๅฐๆนไนๅ็PaddleSeg๏ผ่งฃๅไธๆฌกๅฐฑๅฏไปฅๆณจ้ๆไบ
!unzip -oq /home/aistudio/PaddleSeg.zip
#่งฃๅๆฐๆฎ้่ณdata/็ฎๅฝ
!unzip -qo data/data95249/train_50k_mask.zip -d data/
!unzip -oq data/data100087/Bๆฆๆต่ฏๆฐๆฎ้.zip -d data/
!unzip -oq data/data95249/train_image.zip -d data/
import sys
sys.path.append("PaddleSeg")
import paddleseg
import paddle
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
import random
#่ฎพ็ฝฎ้ๆบๆฐ็งๅญ
random.seed(2021)
def write_txt(file_name, imgs_path, labels_path=None, mode='train', val_pro=0.2):
assert mode=="train" or mode=="test", "ERROR:mode must be train or test."
if mode!="test":
train_path = []
for idx, f_path in enumerate(imgs_path):
for i_path in sorted(os.listdir(f_path)):
path1 = os.path.join(f_path, i_path)
path2 = os.path.join(labels_path[idx], i_path)
train_path.append((path1, path2, str(idx)))
if val_pro>=0 and val_pro<=1:
#ๆไนฑๆฐๆฎ
random.shuffle(train_path)
val_len = int(len(train_path)*val_pro)
val_path = train_path[:val_len]
train_path = train_path[val_len:]
with open(file_name[0], 'w') as f:
for path in train_path:
f.write(path[0]+" "+path[1]+" "+path[2]+"\n")
with open(file_name[1], 'w') as f:
for path in val_path:
f.write(path[0]+" "+path[1]+" "+path[2]+"\n")
return len(train_path), val_len
else:
with open(file_name[0], 'w') as f:
for path in train_path:
f.write(path[0]+" "+path[1]+" "+path[2]+"\n")
return len(train_path), 0
else:
with open(file_name, 'w') as f:
for path in imgs_path:
img_path = os.path.join(test_path, path)
f.write(img_path+"\n")
def create_txt(data_root, train_imgs_dir=None, train_labels_dir=None, test_dir=None, val_pro=0.2):
if train_imgs_dir is not None:
if os.path.exists("train.txt"):
os.remove("train.txt")
if os.path.exists("val.txt"):
os.remove("val.txt")
train_imgs_dir = os.path.join(data_root, train_imgs_dir)
train_labels_dir = os.path.join(data_root, train_labels_dir)
file_names = os.listdir(train_imgs_dir)
file_names = sorted(file_names)
train_imgs_path, train_labels_path =[], []
for na in file_names:
train_imgs_path.append(os.path.join(train_imgs_dir, na))
train_labels_path.append(os.path.join(train_labels_dir, na))
train_len, val_len = write_txt(["train.txt", "val.txt"], train_imgs_path, train_labels_path, mode='train', val_pro=val_pro)
print("่ฎญ็ปๆฐๆฎๆด็ๅฎๆฏ๏ผ่ฎญ็ป้้ฟๅบฆ๏ผ{}๏ผ้ช่ฏ้้ฟๅบฆ๏ผ{}๏ผ ็ฑปๅซๆฐ๏ผ{}".format(train_len, val_len, len(file_names)))
if test_dir is not None:
if os.path.exists("test.txt"):
os.remove("test.txt")
global test_path
test_path = os.path.join(data_root, test_dir)
test_imgs_path_list = sorted(os.listdir(test_path))
write_txt("test.txt", test_imgs_path_list, mode="test")
print("ๆต่ฏๆฐๆฎๆด็ๅฎๆฏ๏ผๆต่ฏ้้ฟๅบฆ๏ผ{}".format(len(test_imgs_path_list)))
data_root = "data"
train_imgs_dir = "train_image"
train_labels_dir = "train_50k_mask"
test_dir = "test_image"
create_txt(data_root, train_imgs_dir, train_labels_dir, test_dir, val_pro=0.2)
#้ช่ฏไธไธๆฏๅฆๅๅ
ฅๆญฃ็กฎ๏ผๅฏไปฅ็ดๆฅ็นๅผๆไปถๆฅ็
#ไนๅฏไปฅ่ฏปๅๆไปถๅ
ๅฎนๆฅ็
#ไปฅtrain.txtไธบไพ๏ผๅช็ๅ5่ก้ช่ฏ
count = 5
with open('train.txt', 'r') as f:
for line in f.readlines():
print(line)
count -= 1
if count==0:
break
# ่ฎญ็ปๆจกๅ
!python PaddleSeg/train.py --config my_deeplabv3.yml --do_eval --use_vdl --save_dir /home/aistudio/output_deeplabv3_1 --save_interval 2000
#ๆจ็
!python PaddleSeg/predict.py --config my_deeplabv3.yml --model_path output_deeplabv3_1/best_model/model.pdparams --image_path data/test_image --save_dir output/result_1 #--aug_pred --flip_horizontal --flip_vertical
import os
import cv2
import numpy as np
filePath = 'output/result_1'
filenames = os.listdir(filePath)
kernel = np.ones((3,3),np.uint8)
for filename in filenames:
img = cv2.imread(filePath + filename)
erosion = cv2.erode(img,kernel,iterations = 1)
gaussian = cv2.GaussianBlur(erosion,(5, 5), 2)
cv2.imwrite('./pred/' + filename, gaussian)
%cd output/result_1/results
!zip -r -oq /home/aistudio/pred.zip ./
%cd /home/aistudio
```
| github_jupyter |
```
import tensorflow as tf
tf.config.experimental.list_physical_devices()
tf.test.is_built_with_cuda()
```
# Importing Libraries
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os.path as op
import pickle
import tensorflow as tf
from tensorflow import keras
from keras.models import Model,Sequential,load_model
from keras.layers import Input, Embedding
from keras.layers import Dense, Bidirectional
from keras.layers.recurrent import LSTM
import keras.metrics as metrics
import itertools
from tensorflow.python.keras.utils.data_utils import Sequence
from decimal import Decimal
from keras import backend as K
from keras.layers import Conv1D,MaxPooling1D,Flatten,Dense
```
# Data Fetching
```
A1=np.empty((0,5),dtype='float32')
U1=np.empty((0,7),dtype='float32')
node=['150','149','147','144','142','140','136','61']
mon=['Apr','Mar','Aug','Jun','Jul','Sep','May','Oct']
for j in node:
for i in mon:
inp= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[1,2,3,15,16])
out= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[5,6,7,8,17,18,19])
inp=np.array(inp,dtype='float32')
out=np.array(out,dtype='float32')
A1=np.append(A1, inp, axis=0)
U1=np.append(U1, out, axis=0)
print(A1)
print(U1)
```
# Min Max Scaler
```
from sklearn.preprocessing import MinMaxScaler
import warnings
scaler_obj=MinMaxScaler()
X1=scaler_obj.fit_transform(A1)
Y1=scaler_obj.fit_transform(U1)
warnings.filterwarnings(action='ignore', category=UserWarning)
X1=X1[:,np.newaxis,:]
Y1=Y1[:,np.newaxis,:]
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
```
# Model
```
model1 = Sequential()
model1.add(keras.Input(shape=(1,5)))
model1.add(tf.keras.layers.LSTM(7,activation="tanh",use_bias=True,kernel_initializer="glorot_uniform",bias_initializer="zeros"))
model1.add(Dense(7))
model1.add(keras.layers.BatchNormalization(axis=-1,momentum=0.99,epsilon=0.001,center=True,scale=True,
beta_initializer="zeros",gamma_initializer="ones",
moving_mean_initializer="zeros",moving_variance_initializer="ones",trainable=True))
model1.add(keras.layers.ReLU())
model1.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])
model1.summary()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)
model_fit8 = model1.fit(x_train,y_train,batch_size=256,epochs=50, validation_split=0.1)
model1.evaluate(x_test,y_test)
model1.evaluate(x_train,y_train)
```
# Saving Model as File
```
model_json = model1.to_json()
with open("Model_File/lstm_tanh.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model1.save_weights("Model_File/lstm_tanh.h5")
print("Saved model to disk")
from keras.models import model_from_json
json_file = open('Model_File/lstm_tanh.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("Model_File/lstm_tanh.h5")
print("Loaded model from disk")
loaded_model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])
```
# Error Analysis
```
# summarize history for loss
plt.plot(model_fit8.history['loss'])
plt.plot(model_fit8.history['val_loss'])
plt.title('Model Loss',fontweight ='bold',fontsize = 15)
plt.ylabel('Loss',fontweight ='bold',fontsize = 15)
plt.xlabel('Epoch',fontweight ='bold',fontsize = 15)
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# summarize history for accuracy
plt.plot(model_fit8.history['accuracy'])
plt.plot(model_fit8.history['val_accuracy'])
plt.title('Model accuracy',fontweight ='bold',fontsize = 15)
plt.ylabel('Accuracy',fontweight ='bold',fontsize = 15)
plt.xlabel('Epoch',fontweight ='bold',fontsize = 15)
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
#Creating csv file of prediction
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)
y_test_pred=loaded_model.predict(x_test)
y_test_pred
y_test
y_test=y_test[:,0]
from numpy import savetxt
savetxt('ARRAY_DATA/lstm_y_test_pred.csv', y_test_pred[:1001], delimiter=',')
from numpy import savetxt
savetxt('ARRAY_DATA/lstm_y_test.csv', y_test[:1001], delimiter=',')
#completed
```
| github_jupyter |
# Paper Trends
## Imports
```
%load_ext autoreload
%autoreload 2
%aimport
%matplotlib inline
import os
import sys
nb_dir = os.path.dirname(os.path.split(os.getcwd())[0])
if nb_dir not in sys.path:
sys.path.append(nb_dir)
from tqdm import tqdm_notebook as tqdm
import pandas as pd
from turicreate import SFrame, load_sframe
from pathlib import Path
import turicreate.aggregate as agg
import numpy as np
import json
import os
import matplotlib.pyplot as plt
import pandas as pd
import math
import glob
import ntpath
from tqdm import tqdm
import seaborn as sns
from matplotlib.ticker import FuncFormatter
import datetime
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
```
## Utility Functions
```
def convert_to_barchart_format(sf,x, year_column="Year", count_column="count", year_range=(1786,2019)):
year_sf = SFrame()
year_sf[year_column] = np.linspace(year_range[0],year_range[1],year_range[1]-year_range[0]+1).tolist()
year_sf[year_column] = year_sf[year_column]
sf[year_column] = sf[year_column].astype(float)
res_sf = SFrame()
for d in tqdm(sf[x].unique()):
temp_sf = SFrame()
temp_sf[x] = [d]*len(year_sf)
temp_sf[year_column] = year_sf[year_column]
res_sf = res_sf.append(temp_sf)
sf = sf.join(res_sf,how="right").sort(year_column)
sf = sf.fillna(count_column,0)
df = sf.to_dataframe()
df = df.sort_values([x,year_column])
df['value'] = df.groupby([x])[count_column].cumsum()
df["lastValue"] = df.groupby([x])["value"].shift(1)
df = df.fillna(0)
df["rank"] =df.groupby([year_column])["value"].rank(ascending=False)
return df.rename(columns={x:"name", year_column: "year",count_column:"count"})[["year","name","value","lastValue","rank"]]
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i + n]
def get_d(sf_corr, diseases_id):
for data in sf_corr.groupby("id"):
if len(data[1]) >5:
yield f"{data[0]}: {diseases_id[diseases_id['id']==data[0]][0]['Disease'].title()}", data[1].sort_values("year")
sns.set(style="ticks")
def create_gird(df, col, hue,x,y,sharey=True, legend=False):
# Initialize a grid of plots with an Axes for each walk
grid = sns.FacetGrid(df, col=col, hue=hue, palette=sns.color_palette("hls", 4),sharey=sharey,
col_wrap=3, height=4.5)
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, _: int(x)))
# Draw a horizontal line to show the starting point
grid.map(plt.axhline, y=0, ls=":", c=".5")
# Draw a line plot to show the trajectory of each random walk
grid.map(plt.plot, x, y)
grid.set_titles("{col_name}")
if legend:
grid.add_legend()
# Adjust the arrangement of the plots
grid.fig.tight_layout(w_pad=1)
return grid
```
## Analysis
```
spothlight = ["SARS","MERS Coronavirus", "Avian Influenza","Ebola", "Influenza", "HIV/AIDS","Hepatitis B","Hepatitis C", "Swine Flu"]
years = [2002,2012,1878,1976,1878,1981,1966,1987,1918 ]
min_refs = 5
```
### Data Loading
```
diseases_id= load_sframe("Data/diseases_id.csv")
disease_names = SFrame.read_csv("Data/disease_names.csv")
```
General MAG Medicine Publications:
```
med_mag = load_sframe("Data/mag/med_mag.sframe")
len(med_mag)
```
MAG Medicine Publications about the specific diseases:
```
diseases_mag = load_sframe("Data/mag/diseases_med_mag.sframe")
```
General MAG Virology Publications:
```
len(diseases_mag)
viro_mag = load_sframe("Data/mag/viro_mag.sframe")
```
MAG Virology Publications about the specific diseases"
```
len(viro_mag)
diseases_viro_mag = load_sframe("Data/mag/diseases_viro_mag.sframe")
len(diseases_viro_mag)
```
### Number of papers by diseases from 2001
```
diseases = diseases_mag[(diseases_mag["Year"]>2001)&(diseases_mag["Ref Number"]>min_refs)]
diseases = diseases.filter_by(spothlight, "disease")["disease"].value_counts()
diseases = diseases.rename({"value":"Disease", "count": "Numer of Papers"})
plt.figure(figsize=(20,10))
sns.set()
colors = ["#4374B3", "#4374B3"]
# Set your custom color palette
sns.set_palette(sns.color_palette(colors))
ax = sns.barplot(x="Disease", y="Numer of Papers", data=diseases.to_dataframe(), color="#4374B3")
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.tight_layout()
plt.savefig("output/Papers/disease_count.svg")
```
We filter all publication that are not academic papers (editorials, letters, etc.).
This type of publication rarely cite other papers filtering the number of refernces removes this kind of publications from the dataset.
```
med_mag = med_mag[med_mag["Ref Number"]>min_refs]
viro_mag = viro_mag[viro_mag["Ref Number"]>min_refs]
diseases_mag = diseases_mag[diseases_mag["Ref Number"]>min_refs].filter_by(spothlight, "disease")
diseases_viro_mag = diseases_viro_mag[diseases_viro_mag["Ref Number"]>min_refs].filter_by(spothlight, "disease")
```
### Publications - Citation
#### NPR
Publication data normaliztion
```
def nomalize_disease_publications(diseases_sf, general_sf):
diseases_pub_count = diseases_sf.groupby(["disease","Year"], {"Number of papers": agg.COUNT()})
papers_year = general_sf.groupby("Year", {"Total Number of papers": agg.COUNT()})
diseases_pub_count = diseases_pub_count.join(papers_year,{"Year":"Year"})
diseases_pub_count["NPR"] = diseases_pub_count["Number of papers"] / diseases_pub_count["Total Number of papers"]
diseases_pub_count = diseases_pub_count.rename({"disease":"Disease"})
return diseases_pub_count.sort(["Disease","Year"])
diseases_pub_count_viro = nomalize_disease_publications(diseases_viro_mag, viro_mag)
diseases_pub_count_med = nomalize_disease_publications(diseases_mag, med_mag)
diseases_pub_count_viro["Type"] = "Virolgy"
diseases_pub_count_med["Type"] = "Medicine"
diseases_pub_count = diseases_pub_count_viro.append(diseases_pub_count_med)
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i + n]
def get_data(sf_corr):
for data in sf_corr.groupby("Disease"):
if len(data[1]) >5:
yield data[1].sort_values("Year")
```
Filter the data:
```
pub = SFrame()
for d,y in zip(spothlight, years):
pub = pub.append( diseases_pub_count[(diseases_pub_count["Disease"]==d)&(diseases_pub_count["Year"]>=y)])
pub["Normalized Paper Rate"] = pub["NPR"]
```
Generate SVG
```
sns.set(font_scale=1.3)
plt.rc('text', usetex=False)
plt.figure(figsize=(16, 12))
des = list(get_data(pub[(pub["Year"]>=1980)&(pub["Type"]== "Virolgy")].to_dataframe()))
for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):
create_gird(pd.concat(curr_f),"Disease","Type","Year", "Normalized Paper Rate",False,False)
plt.savefig(f"output/Papers/Virolgy_NPR_{i}.svg")
# plt.close()
sns.set(font_scale=1.3)
plt.rc('text', usetex=False)
plt.figure(figsize=(16, 12))
des = list(get_data(pub[(pub["Year"]>=1980)&(pub["Type"]== "Medicine")].to_dataframe()))
for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):
create_gird(pd.concat(curr_f),"Disease","Type","Year", "Normalized Paper Rate",False,False)
plt.savefig(f"output/Papers/Medicine_NPR_{i}.svg")
# plt.close()
```
Generate multi-page PDF
```
sns.set(font_scale=1.3)
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
with PdfPages('output/Papers/Medicine_NPR.pdf') as pdf:
# if LaTeX is not installed or error caught, change to `usetex=False`
plt.rc('text', usetex=False)
plt.figure(figsize=(8, 6))
des = list(get_data(pub[(pub["Year"]>=1980)&(pub["Type"]== "Medicine")].to_dataframe()))
for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):
create_gird(pd.concat(curr_f),"Disease","Type","Year", "Normalized Paper Rate",False,False)
pdf.savefig()
plt.close()
pub["Normalized Paper Rate"] = np.log(pub["NPR"])
import plotly.express as px
fig = px.line(pub[(pub["Type"]=="Virolgy")&(pub["Year"]>1959)].to_dataframe(), x="Year", y="Normalized Paper Rate",color="Disease", width=1600, height=800)
fig.update_layout({"legend":{"x":0,"y":1.1}, "legend_orientation":"h"}, font=dict(
size=20,
))
fig.show()
# import plotly.io as pio
# pio.orca.config.server_url = "http://localhost:9091"
# fig.write_image("output/Papers/disease-npr.svg")
```
Plot Similarity Using DTW
```
data = pub[(pub["Year"]>=1980)&(pub["Type"]== "Virolgy")&(pub["Year"]<2019)][["Disease","Year","NPR"]].to_dataframe()
data = data.sort_values(["Disease","Year"])
from tslearn.metrics import dtw
res= {"Disease1":[], "Disease2":[], "dtw":[]}
for d1, df1 in data.groupby("Disease"):
for d2, df2 in data.groupby("Disease"):
res["Disease1"].append(d1)
res["Disease2"].append(d2)
disease1 = df1["NPR"].values
disease2 = df2["NPR"].values
res["dtw"].append(dtw(disease1, disease2))
piv_data = []
for d, df in data.groupby("Disease"):
piv_data.append(df["NPR"].values)
sns.set(font_scale=2.0)
corr = pd.DataFrame(res).pivot(index='Disease1', columns='Disease2', values='dtw')
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
plt.figure(figsize=(40,20))
ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, annot=True, fmt='0.3f', cmap=sns.light_palette("#cc0000" , reverse=True, as_cmap=True))
plt.savefig("output/Papers/dtw_npr.svg")
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import TimeSeriesKMeans
km = TimeSeriesKMeans(n_clusters=2, metric="dtw", max_iter=10, tol=1e-5).fit(to_time_series_dataset(piv_data))
from collections import defaultdict
clusters = defaultdict(lambda: [])
for d, c in zip(corr.index, km.labels_):
clusters[c].append(d)
clusters
```
#### NCR
```
# Calculte the number of citaions for each diseses per year.
def diseses_citations_year(publication_sf):
disease_citations = publication_sf.stack("Dict of Year_Citation Number",new_column_name=["cite year", "Citations"], drop_na=True)
disease_citations = disease_citations.groupby(["disease","cite year"], {"Citations": agg.SUM("Citations")})
disease_citations["cite year"] = disease_citations["cite year"].astype(int)
return disease_citations.rename({"cite year": "year"})
disease_citations_viro = diseses_citations_year(diseases_viro_mag)
disease_citations_med = diseses_citations_year(diseases_mag)
# The total number of citaions for a year, used to normalize the data.
def citaion_year_mag(publication_sf):
med_citations = publication_sf.stack("Dict of Year_Citation Number",new_column_name=["cite year", "Citations"], drop_na=True)
med_citations = med_citations.rename({"cite year": "year"})
return med_citations.groupby(["year"], operations={"Total Citations": agg.SUM("Citations")})
citations_year_viro = citaion_year_mag(viro_mag)
citations_year_med = citaion_year_mag(med_mag)
citations_year_med["year"] = citations_year_med["year"].astype(int)
citations_year_med.sort("Total Citations",False)
```
Medicine citaions over time
```
citations_year_med.to_dataframe().sort_values("year").plot(x="year", y="Total Citations")
```
Citaion data normaliztion
```
def norm_disease_citations(disease_citations, citations_year):
disease_citations = disease_citations.join(citations_year, on="year")
disease_citations["Citations Norm"] = disease_citations["Citations"]/disease_citations["Total Citations"]
return disease_citations.join(disease_names)
disease_citations_med = norm_disease_citations(disease_citations_med, citations_year_med)
disease_citations_viro = norm_disease_citations(disease_citations_viro, citations_year_viro)
def clean_disease_citations(disease_citations):
disease_citations = disease_citations.rename({"year":"Year","Citations Norm":"NCR", "disease": "Disease"})
disease_citations = disease_citations.join(disease_names, {"id":"id"})
disease_citations = disease_citations.sort(["Disease", "Year"])
disease_citations = disease_citations.to_dataframe()
disease_citations = disease_citations[disease_citations["Year"].notna()]
disease_citations = disease_citations[disease_citations["Year"]<2019]
return disease_citations.reset_index()
disease_citations_med = clean_disease_citations(disease_citations_med)
disease_citations_viro = clean_disease_citations(disease_citations_viro)
disease_citations_med["Type"] = "Medicine"
disease_citations_viro["Type"] = "Virology"
disease_citations = disease_citations_med.append(disease_citations_viro)
cite = pd.DataFrame()
for d,y in zip(spothlight, years):
cite = cite.append( disease_citations[(disease_citations["Disease"]==d)&(disease_citations["Year"]>=y)])
cite["Normalized Citaion Rate"] = cite["NCR"]
cite = cite.rename(columns={"Normalized Citaion Rate":"Normalized Citation Rate"})
sns.set(font_scale=1.3)
# sns.set(style="ticks")
plt.rc('text', usetex=False)
plt.figure(figsize=(8, 6))
des = list(get_data(cite[(cite["Year"]>=1980)&(cite["Type"]== "Medicine")]))
for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):
create_gird(pd.concat(curr_f),"Disease","Type","Year", "Normalized Citation Rate", False, legend=False)
plt.savefig(f"output/Papers/Medicine_NCR_{i}.svg")
# plt.close()
sns.set(font_scale=1.3)
plt.rc('text', usetex=False)
plt.figure(figsize=(8, 6))
des = list(get_data(cite[(cite["Year"]>=1980)&(cite["Type"]== "Virology")]))
for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):
create_gird(pd.concat(curr_f),"Disease","Type","Year", "Normalized Citation Rate", False, legend=False)
plt.savefig(f"output/Papers/Virolgy_NCR_{i}.svg")
# plt.close()
np.log(10)
10 ** np.log(6)
cite["Normalized Citation Rate"] = np.log(cite["NCR"])
import plotly.express as px
fig = px.line(cite, x="Year", y="Normalized Citaion Rate",color="Disease", width=1600, height=800)
fig.show()
data = cite[(cite["Year"]>=1980)&(cite["Type"]== "Virology")&(cite["Year"]<2019)][["Disease","Year","NCR"]]
data = data.sort_values(["Disease","Year"])
from tslearn.metrics import dtw
res= {"Disease1":[], "Disease2":[], "dtw":[]}
for d1, df1 in data.groupby("Disease"):
for d2, df2 in data.groupby("Disease"):
res["Disease1"].append(d1)
res["Disease2"].append(d2)
disease1 = df1["NCR"].values
disease2 = df2["NCR"].values
res["dtw"].append(dtw(disease1, disease2))
piv_data = []
for d, df in data.groupby("Disease"):
piv_data.append(df["NCR"].values)
sns.set( font_scale=2.0)
corr = pd.DataFrame(res).pivot(index='Disease1', columns='Disease2', values='dtw')
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
plt.figure(figsize=(40,20))
ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, annot=True, fmt='0.3f', cmap=sns.light_palette("#cc0000" , reverse=True, as_cmap=True))
plt.savefig("output/Papers/dtw-ncr.svg")
from tslearn.generators import random_walks
from tslearn.clustering import TimeSeriesKMeans
# X = random_walks(n_ts=50, sz=32, d=1)
km = TimeSeriesKMeans(n_clusters=2, metric="dtw", max_iter=10, tol=1e-5).fit(to_time_series_dataset(piv_data))
from collections import defaultdict
clusters = defaultdict(lambda: [])
for d, c in zip(corr.index, km.labels_):
clusters[c].append(d)
clusters
```
### Data and Code in research
```
from ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph
from ScienceDynamics.config.configs import DATASETS_BASE_DIR
mag = MicrosoftAcademicGraph(DATASETS_BASE_DIR)
resources = diseases_mag.join(mag.paper_resources, on="PaperId")
```
ResourceType. 1 = Project, 2 = Data, 4 = Code
```
resources[resources["ResourceType"]==2]["disease"].value_counts()
len(resources[resources["ResourceType"]==2]["disease"])
len(resources[resources["ResourceType"]==4]["disease"])
resources[resources["ResourceType"]==4]["disease"].value_counts()
resources[resources["ResourceType"]==1]["disease"].value_counts()
```
## Data Fusion
```
diseases_pubmed = load_sframe("Data/pubmed/diseases_pubmed.sframe")
pubmed_papers_year = diseases_pubmed.groupby("year",{"PubMed":agg.COUNT()})
mag_papers_year = diseases_mag.groupby("Year",{"MAG":agg.COUNT()})
pubmed = load_sframe("Data/pubmed/pubmed.sframe")
pubmed_papers_year = pubmed.groupby("year",{"PubMed":agg.COUNT()})
mag_papers_year = med_mag.groupby("Year",{"MAG":agg.COUNT()})
df = pubmed_papers_year.join(mag_papers_year,{"year":"Year"}).sort("year")
df =df.rename({"year":"Year"})
df2 = df.pack_columns(column_names=["MAG","PubMed"], dtype=dict, new_column_name='Papers').stack("Papers", new_column_name=['Dataset', 'Total Papers'])
import plotly.express as px
fig = px.line(df2[df2["Year"]<2016].to_dataframe(), x="Year", y="Total Papers",color="Dataset", width=1600, height=800)
fig.update_layout({"legend":{"x":0,"y":1.1}, "legend_orientation":"h"}, font=dict(
size=20,
))
fig.show()
# fig.write_image("output/Papers/Total Papers.svg")
```
| github_jupyter |
### This is an example of where Scipy default optimizer (L-BFGS-B) does not correctly estimate the inefficiency variance. Even with $\gamma$ set to 0 and small measurement error, it estimates $\eta = 0$.
#### January 22, 2021 (after SFMA meeting)
Install the latest commit of `anml` from GitHub and the `logerf` branch from `SFMA`
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sfma.api import SFMAModel
```
## Make Simulations
```
np.random.seed(10)
n = 100
intercept = 1.5
slope = 5
x_domain = [0, 10]
ineff = 0.4
sample_size_1 = [1000, 0.7]
sample_size_2 = [1000, 0.3]
def frontier(x):
return np.log(intercept + slope * x)
def simulate():
x = np.random.uniform(low=x_domain[0], high=x_domain[1], size=n)
sample_sizes_1 = np.random.negative_binomial(
n=sample_size_1[0], p=sample_size_1[1], size=int(n / 2)
)
sample_sizes_2 = np.random.negative_binomial(
n=sample_size_2[0], p=sample_size_2[1], size=int(n / 2)
)
sample_sizes = np.append(sample_sizes_1, sample_sizes_2)
the_frontier = frontier(x)
inefficiency = np.random.exponential(ineff, size=n)
means = the_frontier - inefficiency
samples = [np.random.normal(m, scale=4, size=s) for m, s in zip(means, sample_sizes)]
est_means = np.array([np.mean(s) for s in samples])
est_sterr = np.array([np.sqrt(np.sum(sum((s - np.mean(s))**2)) / ((len(s) - 1)))/np.sqrt(len(s)) for s in samples])
df = pd.DataFrame({
'output': est_means,
'se': est_sterr,
'input': x,
'ones': np.ones(len(x)),
'frontier': the_frontier,
'truth': means,
'sample_size': sample_sizes
})
return df
sim = simulate()
the_frontier = sim['frontier']
linspace = np.linspace(x_domain[0], x_domain[1])
front = frontier(linspace)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(linspace, front, linestyle='solid')
ax.scatter(sim.input, sim.output, color='orange')
ax.errorbar(sim.input, sim.output, yerr=sim.se, linestyle='None')
model = SFMAModel(
df=sim,
col_output='output',
col_se='se',
col_input='input',
include_gamma=True
)
concave = SFMAModel(
df=sim,
col_output='output',
col_se='se',
col_input='input',
r_linear=True,
concave=True,
include_gamma=True,
)
model.fit(options={'solver_options': {}})
# concave.fit(options={'solver_options': {}})
sim['base_predictions'] = model.predict()
# sim['concave_predictions'] = concave.predict()
sim.sort_values('input', inplace=True)
```
#### The last entry is $\eta$ and you can see that it's 0 for the `model` object but non-zero correct) for the `concave` object.
```
model.x_init
model.solver.x_opt
concave.solver.x_opt
fig, axes = plt.subplots(1, 1, figsize=(8, 4))
axes.plot(linspace, front, linestyle='dashed', color='black')
axes.scatter(sim.input, sim.output, color='grey', alpha=0.4, label="data")
# axes.scatter(sim.input, sim.output + concave.inefficiencies, color='#008080', alpha=0.4, label="data + inefficiency")
axes.errorbar(sim.input, sim.output, yerr=sim.se, linestyle='None', color='grey', alpha=0.4)
axes.plot(sim.input, sim.base_predictions, color='red', label='basic (L-BFGS-B)')
# axes.plot(sim.input, sim.concave_predictions, color='green', label='concave (trust-constr)')
axes.legend()
plt.savefig("results.png", bbox_inches="tight")
model.solver.result
p = np.random.uniform(size=8)
p
model.marginal_model.gradient(x=p, data=model.data)
model.marginal_model.gradient_ad(x=p, data=model.data)
model.data.obs.shape[0]
```
| github_jupyter |
# Residual Networks
Welcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.
**In this assignment, you will:**
- Implement the basic building blocks of ResNets.
- Put together these building blocks to implement and train a state-of-the-art neural network for image classification.
This assignment will be done in Keras.
Before jumping into the problem, let's run the cell below to load the required packages.
```
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
```
## 1 - The problem of very deep neural networks
Last week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.
The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode" to take very large values).
During training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds:
<img src="images/vanishing_grad_kiank.png" style="width:450px;height:220px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Vanishing gradient** <br> The speed of learning decreases very rapidly for the early layers as the network trains </center></caption>
You are now going to solve this problem by building a Residual Network!
## 2 - Building a Residual Network
In ResNets, a "shortcut" or a "skip connection" allows the gradient to be directly backpropagated to earlier layers:
<img src="images/skip_connection_kiank.png" style="width:650px;height:200px;">
<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : A ResNet block showing a **skip-connection** <br> </center></caption>
The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network.
We also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.)
Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them.
### 2.1 - The identity block
The identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps:
<img src="images/idblock2_kiank.png" style="width:650px;height:150px;">
<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Identity block.** Skip connection "skips over" 2 layers. </center></caption>
The upper path is the "shortcut path." The lower path is the "main path." In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras!
In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this:
<img src="images/idblock3_kiank.png" style="width:650px;height:150px;">
<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Identity block.** Skip connection "skips over" 3 layers.</center></caption>
Here're the individual steps.
First component of main path:
- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization.
- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
Second component of main path:
- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization.
- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
Third component of main path:
- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization.
- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component.
Final step:
- The shortcut and the input are added together.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
**Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest.
- To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/#conv2d)
- To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis))
- For the activation, use: `Activation('relu')(X)`
- To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/#add)
```
# GRADED FUNCTION: identity_block
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (โ3 lines)
X = Conv2D(filters = F2, kernel_size = (f,f), strides = (1,1), padding= 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (โ2 lines)
X = Conv2D(filters = F3, kernel_size = (1,1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (โ2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
```
**Expected Output**:
<table>
<tr>
<td>
**out**
</td>
<td>
[ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]
</td>
</tr>
</table>
## 2.2 - The convolutional block
You've implemented the ResNet identity block. Next, the ResNet "convolutional block" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path:
<img src="images/convblock_kiank.png" style="width:650px;height:150px;">
<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Convolutional block** </center></caption>
The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step.
The details of the convolutional block are as follows.
First component of main path:
- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '2a'`.
- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
Second component of main path:
- The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is "same" and it's name should be `conv_name_base + '2b'`.
- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
Third component of main path:
- The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is "valid" and it's name should be `conv_name_base + '2c'`.
- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component.
Shortcut path:
- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '1'`.
- The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`.
Final step:
- The shortcut and the main path values are added together.
- Then apply the ReLU activation function. This has no name and no hyperparameters.
**Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.
- [Conv Hint](https://keras.io/layers/convolutional/#conv2d)
- [BatchNorm Hint](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))
- For the activation, use: `Activation('relu')(X)`
- [Addition Hint](https://keras.io/layers/merge/#add)
```
# GRADED FUNCTION: convolutional_block
def convolutional_block(X, f, filters, stage, block, s = 2):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (โ3 lines)
X = Conv2D(F2,(f, f),strides=(1, 1),padding='same',name=conv_name_base +'2b',kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path (โ2 lines)
X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c',kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)
##### SHORTCUT PATH #### (โ2 lines)
X_shortcut = Conv2D(F3,(1,1),strides=(s,s),padding='valid',name=conv_name_base+'1',kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name=bn_name_base+'1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (โ2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
```
**Expected Output**:
<table>
<tr>
<td>
**out**
</td>
<td>
[ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]
</td>
</tr>
</table>
## 3 - Building your first ResNet model (50 layers)
You now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. "ID BLOCK" in the diagram stands for "Identity block," and "ID BLOCK x3" means you should stack 3 identity blocks together.
<img src="images/resnet_kiank.png" style="width:850px;height:150px;">
<caption><center> <u> <font color='purple'> **Figure 5** </u><font color='purple'> : **ResNet-50 model** </center></caption>
The details of this ResNet-50 model are:
- Zero-padding pads the input with a pad of (3,3)
- Stage 1:
- The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is "conv1".
- BatchNorm is applied to the channels axis of the input.
- MaxPooling uses a (3,3) window and a (2,2) stride.
- Stage 2:
- The convolutional block uses three set of filters of size [64,64,256], "f" is 3, "s" is 1 and the block is "a".
- The 2 identity blocks use three set of filters of size [64,64,256], "f" is 3 and the blocks are "b" and "c".
- Stage 3:
- The convolutional block uses three set of filters of size [128,128,512], "f" is 3, "s" is 2 and the block is "a".
- The 3 identity blocks use three set of filters of size [128,128,512], "f" is 3 and the blocks are "b", "c" and "d".
- Stage 4:
- The convolutional block uses three set of filters of size [256, 256, 1024], "f" is 3, "s" is 2 and the block is "a".
- The 5 identity blocks use three set of filters of size [256, 256, 1024], "f" is 3 and the blocks are "b", "c", "d", "e" and "f".
- Stage 5:
- The convolutional block uses three set of filters of size [512, 512, 2048], "f" is 3, "s" is 2 and the block is "a".
- The 2 identity blocks use three set of filters of size [512, 512, 2048], "f" is 3 and the blocks are "b" and "c".
- The 2D Average Pooling uses a window of shape (2,2) and its name is "avg_pool".
- The flatten doesn't have any hyperparameters or name.
- The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`.
**Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above.
You'll need to use this function:
- Average pooling [see reference](https://keras.io/layers/pooling/#averagepooling2d)
Here're some other functions we used in the code below:
- Conv2D: [See reference](https://keras.io/layers/convolutional/#conv2d)
- BatchNorm: [See reference](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))
- Zero padding: [See reference](https://keras.io/layers/convolutional/#zeropadding2d)
- Max pooling: [See reference](https://keras.io/layers/pooling/#maxpooling2d)
- Fully conected layer: [See reference](https://keras.io/layers/core/#dense)
- Addition: [See reference](https://keras.io/layers/merge/#add)
```
# GRADED FUNCTION: ResNet50
def ResNet50(input_shape = (64, 64, 3), classes = 6):
"""
Implementation of the popular ResNet50 the following architecture:
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
"""
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3 (โ4 lines)
X = convolutional_block(X, f=3, filters= [128, 128, 512], stage = 3, block='a', s=2)
X = identity_block(X, f=3, filters= [128, 128, 512], stage = 3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage =3, block='d')
# Stage 4 (โ6 lines)
X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block ='f')
# Stage 5 (โ3 lines)
X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage =5, block='c')
# AVGPOOL (โ1 line). Use "X = AveragePooling2D(...)(X)"
X = AveragePooling2D((2,2),name='avg_pool')(X)
### END CODE HERE ###
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
```
Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.
```
model = ResNet50(input_shape = (64, 64, 3), classes = 6)
```
As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.
```
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
```
The model is now ready to be trained. The only thing you need is a dataset.
Let's load the SIGNS Dataset.
<img src="images/signs_data_kiank.png" style="width:450px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 6** </u><font color='purple'> : **SIGNS dataset** </center></caption>
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch.
```
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
```
**Expected Output**:
<table>
<tr>
<td>
** Epoch 1/2**
</td>
<td>
loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours.
</td>
</tr>
<tr>
<td>
** Epoch 2/2**
</td>
<td>
loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing.
</td>
</tr>
</table>
Let's see how this model (trained on only two epochs) performs on the test set.
```
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
**Expected Output**:
<table>
<tr>
<td>
**Test Accuracy**
</td>
<td>
between 0.16 and 0.25
</td>
</tr>
</table>
For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well.
After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU.
Using a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take โ1min to load the model.
```
model = load_model('ResNet50.h5')
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.
Congratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system!
## 4 - Test on your own image (Optional/Ungraded)
If you wish, you can also take a picture of your own hand and see the output of the model. To do this:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right!
```
img_path = 'images/my_image.jpg'
img = image.load_img(img_path, target_size=(64, 64))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
my_image = scipy.misc.imread(img_path)
imshow(my_image)
print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ")
print(model.predict(x))
```
You can also print a summary of your model by running the following code.
```
model.summary()
```
Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".
```
plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
<font color='blue'>
**What you should remember:**
- Very deep "plain" networks don't work in practice because they are hard to train due to vanishing gradients.
- The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function.
- There are two main type of blocks: The identity block and the convolutional block.
- Very deep Residual Networks are built by stacking these blocks together.
### References
This notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the github repository of Francois Chollet:
- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385)
- Francois Chollet's github repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
| github_jupyter |
```
!pip install rdflib
!pip install folium
!pip install pyproj
storage = "https://raw.githubusercontent.com/opencitydata/guia-rdf-datosgob/main/rdf-explotacion/terrazas-madrid.nt" #poner el enlace a tus datos en github (raw files)
from rdflib import Graph, Namespace, Literal
from rdflib.plugins.sparql import prepareQuery
import folium
from pyproj import Transformer
g = Graph()
g.parse(storage, format="ntriples") #quizรก esto tarde un poco si el archivo es muy grande
```
# Cambio de serializaciรณn del grafo creado
```
# Escritura del grafo en RDF/XML en el archivo terrazas-madrid.xml
g.serialize(destination='terrazas-madrid.xml',format="xml")
# Escritura del grafo en Turtle en el archivo terrrazas-madrid.ttl
g.serialize(destination='terrazas-madrid.ttl',format="ttl")
```
# Consulta 1: Listado de terrazas y sus horarios de lunes a viernes anualmente
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?horario
WHERE {
?terraza rdf:type escom:Terraza;
<http://schema.org/openingHours> ?horario .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.horario)
```
# Consulta 2: Listado de terrazas que tengan mรกs de 15 mesas autorizadas
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?mesas
WHERE {
?terraza rdf:type escom:Terraza;
escom:numeroMesasAutorizadas ?mesas .
FILTER(?mesas > "15"^^<http://www.w3.org/2001/XMLSchema#integer>)
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.mesas)
```
# Consulta 3: Listado de terrazas con actividad en el periodo anual
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza
WHERE {
?terraza rdf:type escom:Terraza;
escom:periodoFuncionamiento <http://vocab.linkeddata.es/datosabiertos/kos/comercio/periodo-funcionamiento/anual> .
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza)
```
# Ejemplo real: Dibujando puntos geogrรกficos a partir de RDF
```
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
# Preparamos la consulta, dame las terrazas, su horario anual de lunes a jueves y la latitud y longitud de su LocalComercial asociado
q1 = prepareQuery('''
SELECT
?horario ?lat ?lon
WHERE {
?terraza rdf:type escom:Terraza .
?terraza <http://schema.org/openingHours> ?horario .
?terraza escom:perteneceA ?local .
?local rdf:type escom:LocalComercial .
?local <http://www.opengis.net/ont/geosparql#hasGeometry> ?point .
?point rdf:type <http://www.opengis.net/ont/sf#Point> .
?point <https://datos.ign.es/def/geo_core#xETRS89> ?lat .
?point <https://datos.ign.es/def/geo_core#yETRS89> ?lon .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
} LIMIT 10
''',
initNs = { "escom": ESCOM}
)
# inspeccionamos los datos que nos devuelve la consulta
for r in g.query(q1):
print(r.lat, r.lon, r.horario)
# debemos transformar el formato de los datos de lon de UTM a WGS 84
transformer = Transformer.from_crs('epsg:25830','epsg:4326')
mapa = folium.Map(location=[40.4167, -3.70325])
for r in g.query(q1):
x,y = transformer.transform(float(r.lat),float(r.lon))
horario = (r.horario).replace("Anual Lun-Juev ","")
folium.Marker([x,y], popup=horario, tooltip=horario).add_to(mapa)
mapa
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#1.-Addition-and-subtraction" data-toc-modified-id="1.-Addition-and-subtraction-1">1. Addition and subtraction</a></span></li><li><span><a href="#2.-Multiplication-and-division-(element-by-element)" data-toc-modified-id="2.-Multiplication-and-division-(element-by-element)-2">2. Multiplication and division (element-by-element)</a></span></li><li><span><a href="#3.-Square-roots-and-other-powers" data-toc-modified-id="3.-Square-roots-and-other-powers-3">3. Square roots and other powers</a></span></li><li><span><a href="#4.-Trigonometric-and-other-functions" data-toc-modified-id="4.-Trigonometric-and-other-functions-4">4. Trigonometric and other functions</a></span></li><li><span><a href="#5.-Enrichment" data-toc-modified-id="5.-Enrichment-5">5. Enrichment</a></span></li></ul></div>
>All content is released under Creative Commons Attribution [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) and all source code is released under a [BSD-3 clause license](https://en.wikipedia.org/wiki/BSD_licenses).
>
>Please reuse, remix, revise, and reshare this content in any way, keeping this notice. [Report issues please](https://github.com/kgdunn/digital-skills-module5/issues).
>
><img style="float: right;" width="150px" src="images/jupyter-logo.png">**Are you viewing this on jupyter.org?** Then this notebook will be read-only. <br>
>See how you can interactively run the code in this notebook by visiting our [instruction page about Notebooks](https://yint.org/notebooks).
# Simple elementwise functions and operations on a NumPy array
Once we have created an array - [see the prior notebooks](./) - we are then ready to actually use them for calculations!
Let us consider these calculations:
1. Addition and subtraction
2. Multiplication and division (element-by-element)
3. Square roots and other powers
4. Trigonometric and other functions
## 1. Addition and subtraction
NumPy can add to, or subtract from two arrays with the same shape. We will use array ``A`` and ``B`` in these examples.
```
import numpy as np
A = np.ones(shape=(5,5))
B = np.ones(shape=(5,5))
print('A = \n{}\n\nB = \n{}'.format(A, B))
print(A + B)
```
The ``+`` operation on two arrays is actually just a convenience. The actual function in NumPy which is being called to do the work is the ``np.add(...)`` function.
Try this to verify:
```
print(np.add(A, B))
```
Similarly, we have the `-` and `.subtract()` functions that serve the same purpose:
```
print(A - B)
print(np.subtract(A, B)) # does the same thing as the prior line of code
print(np.add(A, -B)) # and this produces the same result
```
These are element-by-element operations. That means, NumPy performed the operation of addition on each corresponding element in the arrays `A` and `B` and then repeats that entry-by-entry. This is also called elementwise in NumPy's documentation.
NumPy will also allow you take shortcuts. Imagine that you want to subtract the value of 3 from every entry in matrix `A`. You no not first need to create a matrix with the same shape as ``A`` contain the value of 3, and then go subract that.
**There is a shortcut: **
```
print(A - 3) # still does element-by-element calculations
```
## 2. Multiplication and division (element-by-element)
Multiplication and division can also be done element-by-element.
```
import numpy as np
C = np.reshape(np.linspace(1, 25, 25), (5, 5))
print(C)
```
Now go multiply every value in matrix `C` by 2.0 as follows:
```
doubled = C * 2
print(doubled)
print(np.multiply(C, 2)) # does exactly the same as the prior code
# Also try this:
print(C * 0.0)
```
What happens if you multiply matrix ``C`` by itself, if you want to calculated $C^2$
```
print(C * C)
```
The multiply operator `*` is shorthand for ``numpy.multiply()`` and works on an element-by-element basis. Similarly the `/` operator is shorthand for `numpy.divide()`
```
print(C / C)
print(np.divide(C, C)) # both give you what you expect - a matrix of 1's
# Advanced: add some code to see what happens if you divide by zero: C/0.0
```
## 3. Square roots and other powers
There are other elementwise operations that can be done on matrices. These often involve raising the individual matrix elements to a certain power, or taking a square root (which is the same as raising a number to the power of $0.5$), or calculating the logarithm.
Let's try it out interactively.
### To try:
> 1. Use the ``**`` operation to raise to a power
> 2. Use the ``.square()`` function
> 3. Use the ``.power()`` function
> 4. Use the ``.sqrt()`` function
> 5. Verify that `**(0.5)` gives the same values as the `.sqrt()` function
```
# Step 1:
import numpy as np
D = np.reshape(np.linspace(-1, 1, 15), (3, 5)) # create a 3x5 matrix with positive and negative values
print(D**2)
print('-------')
# Step 2
print(np.square(D)) # you should see the same as above
print('-------')
# Step 3
D_squared = np.power(D, 2)
print(D_squared)
print('-------')
# Step 4: remember there are some negative values in D
# The square root is undefined for negative values (exception for complex values)
print(np.sqrt(D))
print('-------')
# Step 5: raising something to the power of 0.5 is the same as square rooting
print(np.power(D, 0.5))
```
## 4. Trigonometric and other functions
A wide variety of mathematical functions are possible. See the full list in the [NumPy documentation](https://docs.scipy.org/doc/numpy/reference/routines.math.html).
You will self-discover these function by running the code below.
### Some questions to try answering below:
>1. The standard trigonometric functions: ``np.sin(...)``, ``np.tan(...)``, etc
>2. Rounding off to the closest integer. Do negative values round up towards zero, or away from zero?
>3. Rounding off to a certain number of ``decimals``; try rounding to 1 decimal place. Are the results what you expect?
>4. Similar to rounding: try the ``np.floor(...)`` and ``np.ceil(...)``: what is the difference between the floor and the ceiling? Hint: read the documentation for [`floor`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.floor.html) and [`ceil`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ceil.html).
>5. Logarithms and exponents are also part of the standard calculations we expect to do with matrices using the ``np.log(...)`` and ``np.exp(...)`` functions. Recall that $\log(\exp(x)) = x$.
```
import numpy as np
radians = np.reshape(np.linspace(-2, +2, 16), (4, 4)) # create a 4x4 matrix with positive and negative values
print(radians)
print('-----')
# Step 1
print(np.sin(radians))
print('-----')
print(np.tan(radians))
# Step 2
print(np.around(radians)) # rounds to the closest integer. Check what happens with negatives!
# Step 3
print(np.around(radians, decimals=1)) # rounds to the closest 0.1
# Advanced: try this code: np.around(radians*100, decimals=-2)
# What does it mean to round to a negative number of decimals?
# Step 4
print(np.floor(radians)) # compare this output to the original matrix
print(np.ceil(radians))
# Step 5
exponent = np.exp(radians)
print(exponent)
print('-----')
recovered = np.log(exponent)
print(recovered)
print('-----')
# Does "recovered" match the original "radians" matrix?
# It should: we first took the exponent, then the logarithm.
# This subtraction should be a matrix of all zeros:
print(recovered - radians)
```
The last matrix in your printout above should be all zeros, but is not exactly equal to zero (it is very, very close to zero though).
To test that we can use the ``np.isclose(...)`` function. It is another elementwise function that you can add to your toolbox. It tests if the entries in an array are close to another:
```
np.isclose(recovered - radians, 0)
# There is a function to check if the entries are all `True`
np.allclose(recovered - radians, 0)
```
## 5. Enrichment
### Try these elementwise operations on arrays yourself
>1. Calculating the absolute values: `np.fabs(...)` and `np.absolute(...)`
>2. Comparing two arrays and return the minimum `np.fmin(...)` and maximum `np.fmax(...)`
>3. The reciprocal value of $x$ is equal to $1/x$. You can calculate it using `np.reciprocal(...)`
>4. The sign of the values in the array: `np.sign(...)`
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rcParams
rcParams['figure.figsize'] = 11.7,8.27 # figure size in inches
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
%config Completer.use_jedi = False
from sklearn.impute import KNNImputer
from sklearn.preprocessing import LabelEncoder
```
# Note
* Aggregate data by every 180 days
```
df_creatinine = pd.read_csv('CSV/T_creatinine.csv'); df_creatinine.rename(columns = {'value': 'creatinine'}, inplace=True)
df_dbp = pd.read_csv('CSV/T_DBP.csv'); df_dbp.rename(columns = {'value': 'dbp'}, inplace=True)
df_glucose = pd.read_csv('CSV/T_glucose.csv'); df_glucose.rename(columns = {'value': 'glucose'}, inplace=True)
df_hgb = pd.read_csv('CSV/T_HGB.csv'); df_hgb.rename(columns = {'value': 'hgb'}, inplace=True)
df_ldl = pd.read_csv('CSV/T_ldl.csv'); df_ldl.rename(columns = {'value': 'ldl'}, inplace=True)
df_meds = pd.read_csv('CSV/T_meds.csv')
df_sbp = pd.read_csv('CSV/T_sbp.csv'); df_sbp.rename(columns = {'value': 'sbp'}, inplace=True)
```
# Compute maximum time point (day) for each subject
```
df_creatinine_d = df_creatinine.groupby(['id'])['time'].max()
df_dbp_d = df_dbp.groupby(['id'])['time'].max()
df_glucose_d = df_glucose.groupby(['id'])['time'].max()
df_hgb_d = df_hgb.groupby(['id'])['time'].max()
df_ldl_d = df_ldl.groupby(['id'])['time'].max()
df_sbp_d = df_sbp.groupby(['id'])['time'].max()
df_meds_d = df_meds.groupby(['id'])['end_day'].max()
df_meds_d = df_meds_d.rename('time')
df_d_merge = pd.DataFrame(pd.concat([df_creatinine_d, df_dbp_d, df_glucose_d, df_hgb_d, df_ldl_d, df_sbp_d, df_meds_d])).reset_index()
df_d_merge = df_d_merge.groupby(['id']).max().reset_index()
df_d_merge = df_d_merge.sort_values('time')
print('Minimum = ' + str(df_d_merge['time'].min()) + ', Maximum = ' + str(df_d_merge['time'].max()))
print('Mean = ' + str(df_d_merge['time'].mean()) + ', Median = ' + str(df_d_merge['time'].median()))
plt.plot(list(range(df_d_merge.shape[0])), df_d_merge['time'], '-p', markersize=1)
plt.xlabel("Subject")
plt.ylabel("Days")
plt.title("Days of record")
df_d_merge.to_csv('CSV/days_of_record.csv', index=False)
```
# Process med data
```
# Ignore medication ended before day 0
df_meds = df_meds[df_meds['end_day'] >= 0]
df_meds.head(10)
period_bin = 180
def generate_bin(n_start, n_end):
global period_bin
start_count = period_bin
n = 1
token = 0
# keep trying until a code is assigned
while token == 0:
if n_end <= start_count:
# start and end within period
if n_start <= (start_count + 1):
return int(start_count / period_bin)
token = 1
else:
# the "end of period" is within start and end (e.g.: 90 < 180 < 280)
if n_start <= start_count:
# set a code for processing later
return 99
token = 1
# start and end are both outside of the period
else:
# try the next period
n += 1
start_count *= n
df_meds['days_bin'] = df_meds.apply(lambda x: generate_bin(x['start_day'], x['end_day']), axis=1)
# Fix the in-between
MID = df_meds['days_bin'] == 99
# Replicate the error part to be fixed and concat with the main one
df_temp = df_meds[MID]
# Bin months based on end_day
df_temp['days_bin'] = (df_temp['end_day'] / period_bin).astype(int) + 1
# Value to be used to replace start (+1) or end
v = (np.floor(df_meds.loc[MID, 'end_day'] / period_bin) * period_bin).astype(int)
df_meds.loc[MID, 'end_day'] = v
# Bin months based on end_day
df_meds['days_bin'] = (df_meds['end_day'] / period_bin).astype(int) + 1
df_temp['start_day'] = (v + 1).astype(int)
df_meds = pd.concat([df_meds, df_temp], axis=0)
df_meds['days_bin'].value_counts().sort_index()
df_meds['end_day'].max()
# Get the total dosage during the period
df_meds['total_day'] = df_meds['end_day'] - df_meds['start_day'] + 1
df_meds['total_dosage'] = df_meds['total_day'] * df_meds['daily_dosage']
# Bin the data by days_bin
df_med_binned = df_meds.groupby(['id', 'days_bin', 'drug'])['total_dosage'].sum().reset_index()
df_med_binned.head()
# Convert df to wide format, with each column = dosage of one med
# If drug not taken, assumed it's 0
df_med_wide = df_med_binned.pivot(index=['id', 'days_bin'],columns='drug',values='total_dosage').reset_index().fillna(0)
df_med_wide.head()
```
# Merge the raw measurements
```
# Check how many is between day 699 and day 720
df_hgb[(df_hgb['time']> 699) & (df_hgb['time'] <= 720)].shape[0]
# Sort columns to id, time, value first
# First values are blood pressure, and systolic comes before diastolic
df_sbp = df_sbp[['id', 'time', 'sbp']]
df_merged = df_sbp.merge(df_dbp, on = ['id','time'], how='outer')
df_merged = df_merged.merge(df_creatinine, on = ['id','time'], how='outer')
df_merged = df_merged.merge(df_glucose, on = ['id','time'], how='outer')
df_merged = df_merged.merge(df_ldl, on = ['id','time'], how='outer')
df_merged = df_merged.merge(df_hgb, on = ['id','time'], how='outer')
df_merged = df_merged.sort_values(['id','time'])
df_merged.head()
# bin time
df_merged['days_bin'] = (df_merged['time'] / period_bin).astype(int) + 1
df_merged = df_merged.drop('time', axis=1)
df_merged['days_bin'].value_counts().sort_index()
# Aggregate data by months_bin and get mean
df_merged = df_merged.groupby(['id', 'days_bin']).median().reset_index()
df_merged.head()
# Merge with med
df_merged = df_merged.merge(df_med_wide, on = ['id','days_bin'], how='outer')
df_merged.head()
# Save output for modelling
df_merged.to_csv('CSV/df_daybin.csv', index=False)
# Only first 4 bins (720 days)
df_merged_4 = df_merged[df_merged['days_bin'] <= 4]
# Change NA to 0 for drugs
df_merged_4.iloc[:, 8:29] = df_merged_4.iloc[:, 8:29].fillna(0)
# Use KNNImputer to fill continuous missing values
imputer = KNNImputer(n_neighbors=3)
for day in range(1,5):
DID = df_merged_4['days_bin'] == day
df_day = df_merged_4[DID]
# Remove id from imputation
df_day.iloc[:,2:8] = pd.DataFrame(imputer.fit_transform(df_day.iloc[:,2:8]), index = df_day.index, columns = df_day.columns[2:8])
df_merged_4[DID] = df_day
# Merge with demographic
df_demo = pd.read_csv('CSV/T_demo.csv')
# Change the unknown in df_demo race to the mode (White)
df_demo.loc[df_demo['race'] == 'Unknown','race'] = 'White'
df_merged_4 = df_merged_4.merge(df_demo, on='id')
# Merge with output
df_stage = pd.read_csv('CSV/T_stage.csv')
# Change state to 0, 1
df_stage['Stage_Progress'] = np.where(df_stage['Stage_Progress'] == True, 1, 0)
df_merged_4 = df_merged_4.merge(df_stage, on='id')
# Save output for modelling
df_merged_4.to_csv('CSV/df_daybin_4.csv', index=False)
df_merged_4.head()
```
# Aggregated data
```
df_agg = df_merged_4.copy()
# Take out demographic and outcome
df_agg.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True)
df_agg_mean = df_agg.groupby('id').mean().reset_index()
df_agg_mean.head()
# Mean sbp, dbp, creatinine, glucose, ldl, hgb
df_agg_mean = df_agg.groupby('id').mean().reset_index()
df_agg_mean = df_agg_mean.iloc[:, np.r_[0, 2:8]]
df_agg_mean.head()
df_agg_mean.shape
# Sum drugs
df_agg_sum = df_agg.groupby('id').sum().reset_index()
df_agg_sum = df_agg_sum.iloc[:, 8:]
df_agg_sum.head()
df_agg_sum.shape
df_agg_fixed = pd.concat([df_agg_mean, df_agg_sum], axis=1)
df_agg_fixed.shape
# Put back demo
df_agg_fixed = df_agg_fixed.merge(df_demo, on = 'id')
# Put back outcome
df_agg_fixed = df_agg_fixed.merge(df_stage, on = 'id')
df_agg_fixed.head()
df_agg_fixed.shape
df_agg_fixed.to_csv('CSV/df_agg.csv', index=False)
```
# Temporal data
* Only use first 2 years of data (most measurements stop at day 699)
```
df_temporal = df_merged_4.copy()
df_temporal.head()
# Take out demographic and outcome
df_temporal.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True)
# Convert to wide format
df_temporal = df_temporal.set_index(['id','days_bin']).unstack()
df_temporal.columns = df_temporal.columns.map(lambda x: '{}_{}'.format(x[0], x[1]))
# Some subjects don't have data in a time_bin, KNNImpute again
df_temporal = pd.DataFrame(imputer.fit_transform(df_temporal), index = df_temporal.index, columns = df_temporal.columns)
df_temporal = df_temporal.reset_index()
# Put back demo
df_temporal = df_temporal.merge(df_demo, on = 'id')
# Put back outcome
df_temporal = df_temporal.merge(df_stage, on = 'id')
df_temporal.head()
# Save output for modelling
df_temporal.to_csv('CSV/df_temporal.csv', index=False)
```
# Categorize measurements
* Set continuous readings to 1=low, 2=normal, 3=high
* Categorize medicine by tertile split total dosage to categorize severity (1=low, 2=normal, 3=high)
* Categorize medicine by the treatment target, sum binary code
```
# Remove 0, get 75th percentile as threshold for high dosage
# Set normal as 1, high as 2
def categorize_drug(df):
NID = df > 0
if sum(NID) > 0:
threshold = np.percentile(df[NID], 75)
df[NID] = np.where(df[NID] > threshold, 2, 1)
return df
```
## Day_bin
```
df_merged_4_cat = df_merged_4.copy()
df_merged_4_cat.head()
names = ['1', '2', '3']
bins = [0, 90, 120, np.inf]
df_merged_4_cat['sbp'] = pd.cut(df_merged_4['sbp'], bins, labels=names)
bins = [0, 60, 80, np.inf]
df_merged_4_cat['dbp'] = pd.cut(df_merged_4['dbp'], bins, labels=names)
bins = [0, 3.9, 7.8, np.inf]
df_merged_4_cat['glucose'] = pd.cut(df_merged_4['glucose'], bins, labels=names)
bins = [0, 100, 129, np.inf]
df_merged_4_cat['ldl'] = pd.cut(df_merged_4['ldl'], bins, labels=names)
MID = df_merged_4['gender'] == 'Male'
bins = [0, 0.74, 1.35, np.inf]
df_merged_4_cat.loc[MID, 'creatinine'] = pd.cut(df_merged_4.loc[MID, 'creatinine'], bins, labels=names)
bins = [0, 0.59, 1.04, np.inf]
df_merged_4_cat.loc[~MID, 'creatinine'] = pd.cut(df_merged_4.loc[~MID, 'creatinine'], bins, labels=names)
bins = [0, 14, 17.5, np.inf]
df_merged_4_cat.loc[MID, 'hgb'] = pd.cut(df_merged_4.loc[MID, 'hgb'], bins, labels=names)
bins = [0, 12.3, 15.3, np.inf]
df_merged_4_cat.loc[~MID, 'hgb'] = pd.cut(df_merged_4.loc[~MID, 'hgb'], bins, labels=names)
df_merged_4_cat.head()
# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2
# Need to compute separately for different days_bin
for day in range(1, 5):
DID = df_merged_4_cat['days_bin'] == day
df_day = df_merged_4_cat[DID]
df_merged_4_cat = df_merged_4_cat[~DID]
df_day.iloc[:, 8:29] = df_day.iloc[:, 8:29].apply(lambda x: categorize_drug(x)).astype(int)
df_merged_4_cat = pd.concat([df_merged_4_cat, df_day])
# Label encode race and gender
le = LabelEncoder()
df_merged_4_cat['race'] = le.fit_transform(df_merged_4_cat['race'])
df_merged_4_cat['gender'] = le.fit_transform(df_merged_4_cat['gender'])
# Group age to young-old (โค74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (โฅ85 y.o.) as 3
df_merged_4_cat['age'] = pd.qcut(df_merged_4['age'], 3, labels=[1,2,3])
df_merged_4_cat['age'].value_counts()
df_merged_4_cat.to_csv('CSV/df_merged_4_cat.csv', index=False)
# Group drug by treatment (sum the binary code)
df_merged_4_cat_drug = df_merged_4_cat.copy()
glucose_col = ['canagliflozin', 'dapagliflozin', 'metformin']
df_merged_4_cat_drug['glucose_treatment'] = df_merged_4_cat_drug[glucose_col].sum(axis=1).astype(int)
df_merged_4_cat_drug.drop(glucose_col, axis=1, inplace=True)
bp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan']
df_merged_4_cat_drug['bp_treatment'] = df_merged_4_cat_drug[bp_col].sum(axis=1).astype(int)
df_merged_4_cat_drug.drop(bp_col, axis=1, inplace=True)
cholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin']
df_merged_4_cat_drug['cholesterol_treatment'] = df_merged_4_cat_drug[cholesterol_col].sum(axis=1).astype(int)
df_merged_4_cat_drug.drop(cholesterol_col, axis=1, inplace=True)
df_merged_4_cat_drug.head()
df_merged_4_cat_drug.to_csv('CSV/df_merged_4_cat_drug.csv', index=False)
```
## Aggregated
```
df_agg_cat = df_agg_fixed
names = ['1', '2', '3']
bins = [0, 90, 120, np.inf]
df_agg_cat['sbp'] = pd.cut(df_agg_fixed['sbp'], bins, labels=names)
bins = [0, 60, 80, np.inf]
df_agg_cat['dbp'] = pd.cut(df_agg_fixed['dbp'], bins, labels=names)
bins = [0, 3.9, 7.8, np.inf]
df_agg_cat['glucose'] = pd.cut(df_agg_fixed['glucose'], bins, labels=names)
bins = [0, 100, 129, np.inf]
df_agg_cat['ldl'] = pd.cut(df_agg_fixed['ldl'], bins, labels=names)
MID = df_agg_fixed['gender'] == 'Male'
bins = [0, 0.74, 1.35, np.inf]
df_agg_cat.loc[MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[MID, 'creatinine'], bins, labels=names)
bins = [0, 0.59, 1.04, np.inf]
df_agg_cat.loc[~MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[~MID, 'creatinine'], bins, labels=names)
bins = [0, 14, 17.5, np.inf]
df_agg_cat.loc[MID, 'hgb'] = pd.cut(df_agg_fixed.loc[MID, 'hgb'], bins, labels=names)
bins = [0, 12.3, 15.3, np.inf]
df_agg_cat.loc[~MID, 'hgb'] = pd.cut(df_agg_fixed.loc[~MID, 'hgb'], bins, labels=names)
df_agg_cat.head()
# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2
df_agg_cat.iloc[:,7:28] = df_agg_fixed.iloc[:,7:28].apply(lambda x: categorize_drug(x)).astype(int)
# Label encode race and gender
le = LabelEncoder()
df_agg_cat['race'] = le.fit_transform(df_agg_cat['race'])
df_agg_cat['gender'] = le.fit_transform(df_agg_cat['gender'])
# Group age to young-old (โค74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (โฅ85 y.o.) as 3
df_agg_cat['age'] = pd.qcut(df_agg_cat['age'], 3, labels=[1,2,3])
df_agg_cat['age'].value_counts()
df_agg_cat.to_csv('CSV/df_agg_cat.csv', index=False)
# Group drug by treatment (sum the binary code)
df_agg_cat_drug = df_agg_cat.copy()
glucose_col = ['canagliflozin', 'dapagliflozin', 'metformin']
df_agg_cat_drug['glucose_treatment'] = df_agg_cat_drug[glucose_col].sum(axis=1).astype(int)
df_agg_cat_drug.drop(glucose_col, axis=1, inplace=True)
bp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan']
df_agg_cat_drug['bp_treatment'] = df_agg_cat_drug[bp_col].sum(axis=1).astype(int)
df_agg_cat_drug.drop(bp_col, axis=1, inplace=True)
cholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin']
df_agg_cat_drug['cholesterol_treatment'] = df_agg_cat_drug[cholesterol_col].sum(axis=1).astype(int)
df_agg_cat_drug.drop(cholesterol_col, axis=1, inplace=True)
df_agg_cat_drug.head()
df_agg_cat_drug.to_csv('CSV/df_agg_cat_drug.csv', index=False)
```
## Temporal
```
df_temporal_cat = df_temporal.copy()
names = ['1', '2', '3']
bins = [0, 90, 120, np.inf]
for colname in ['sbp_1', 'sbp_2', 'sbp_3', 'sbp_4']:
df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)
bins = [0, 60, 80, np.inf]
for colname in ['dbp_1', 'dbp_2', 'dbp_3', 'dbp_4']:
df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)
bins = [0, 3.9, 7.8, np.inf]
for colname in ['glucose_1', 'glucose_2', 'glucose_3', 'glucose_4']:
df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)
bins = [0, 100, 129, np.inf]
for colname in ['ldl_1', 'ldl_2', 'ldl_3', 'ldl_4']:
df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)
MID = df_temporal_cat['gender'] == 'Male'
bins = [0, 0.74, 1.35, np.inf]
for colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']:
df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names)
bins = [0, 0.59, 1.04, np.inf]
for colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']:
df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names)
bins = [0, 14, 17.5, np.inf]
for colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']:
df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names)
bins = [0, 12.3, 15.3, np.inf]
for colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']:
df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names)
df_temporal_cat.head()
# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2
df_temporal_cat.iloc[:,25:109] = df_temporal_cat.iloc[:,25:109].apply(lambda x: categorize_drug(x)).astype(int)
# Label encode race and gender
le = LabelEncoder()
df_temporal_cat['race'] = le.fit_transform(df_temporal_cat['race'])
df_temporal_cat['gender'] = le.fit_transform(df_temporal_cat['gender'])
# Group age to young-old (โค74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (โฅ85 y.o.) as 3
df_temporal_cat['age'] = pd.qcut(df_temporal_cat['age'], 3, labels=[1,2,3])
df_temporal_cat['age'].value_counts()
df_temporal_cat.to_csv('CSV/df_temporal_cat.csv', index=False)
# Group drug by treatment (sum the binary code)
df_temporal_cat_drug = df_temporal_cat.copy()
for i in range(1,5):
glucose_col = ['canagliflozin_' + str(i), 'dapagliflozin_' + str(i), 'metformin_' + str(i)]
df_temporal_cat_drug['glucose_treatment_'+ str(i)] = df_temporal_cat_drug[glucose_col].sum(axis=1).astype(int)
df_temporal_cat_drug.drop(glucose_col, axis=1, inplace=True)
bp_col = ['atenolol_' + str(i),'bisoprolol_' + str(i),'carvedilol_' + str(i),'irbesartan_' + str(i),'labetalol_' + str(i),'losartan_' + str(i),'metoprolol_' + str(i),'nebivolol_' + str(i),'olmesartan_' + str(i),'propranolol_' + str(i),'telmisartan_' + str(i),'valsartan_' + str(i)]
df_temporal_cat_drug['bp_treatment_'+ str(i)] = df_temporal_cat_drug[bp_col].sum(axis=1).astype(int)
df_temporal_cat_drug.drop(bp_col, axis=1, inplace=True)
cholesterol_col = ['atorvastatin_' + str(i),'lovastatin_' + str(i),'pitavastatin_' + str(i),'pravastatin_' + str(i),'rosuvastatin_' + str(i),'simvastatin_' + str(i)]
df_temporal_cat_drug['cholesterol_treatment_'+ str(i)] = df_temporal_cat_drug[cholesterol_col].sum(axis=1).astype(int)
df_temporal_cat_drug.drop(cholesterol_col, axis=1, inplace=True)
df_temporal_cat_drug.head()
df_temporal_cat_drug.to_csv('CSV/df_temporal_cat_drug.csv', index=False)
```
# Compute GFR
* CKD-EPI equations
```
def computeGFR(df):
gender = df['gender']
f_constant = 1
if gender == 'Male':
k = 0.9
a = -0.411
else:
k = 0.7
a = -0.329
f_constant = 1.018
race = df['race']
b_constant = 1
if race == 'Black':
b_constant = 1.159
gfr = 141 * min(df['creatinine'] / k, 1) * (max(df['creatinine'] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant
return gfr
```
## 180-day bin
```
col_gfr = ['id', 'days_bin', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress']
df_merged_4_gfr = df_merged_4[col_gfr].copy()
df_merged_4_gfr['gfr'] = df_merged_4_gfr.apply(lambda x: computeGFR(x), axis=1)
df_merged_4_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True)
# Categorize GFR
df_merged_4_gfr['gfr_cat'] = np.where(df_merged_4_gfr['gfr'] < 60, 1, 2)
df_merged_4_gfr['gfr_cat'].value_counts()
df_merged_4_gfr.to_csv('CSV/df_merged_4_gfr.csv', index=False)
df_merged_4.head()
df_merged_4_gfr.head()
```
## Aggregated
```
col_gfr = ['id', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress']
df_agg_gfr = df_agg_fixed[col_gfr].copy()
df_agg_gfr['gfr'] = df_agg_gfr.apply(lambda x: computeGFR(x), axis=1)
df_agg_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True)
# Categorize GFR
df_agg_gfr['gfr_cat'] = np.where(df_agg_gfr['gfr'] < 60, 1, 2)
df_agg_gfr['gfr_cat'].value_counts()
df_agg_gfr.to_csv('CSV/df_agg_gfr.csv', index=False)
```
## Temporal
```
def computeGFR_temporal(df, i):
gender = df['gender']
f_constant = 1
if gender == 'Male':
k = 0.9
a = -0.411
else:
k = 0.7
a = -0.329
f_constant = 1.018
race = df['race']
b_constant = 1
if race == 'Black':
b_constant = 1.159
gfr = 141 * min(df['creatinine_' + str(i)] / k, 1) * (max(df['creatinine_' + str(i)] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant
return gfr
col_gfr = ['id', 'creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4', 'race', 'gender', 'age', 'Stage_Progress']
df_temporal_gfr = df_temporal[col_gfr].copy()
for i in range(1, 5):
df_temporal_gfr['gfr_' + str(i)] = df_temporal_gfr.apply(lambda x: computeGFR_temporal(x, i), axis=1)
df_temporal_gfr.drop('creatinine_' + str(i), axis=1, inplace=True)
df_temporal_gfr.drop(['race', 'gender', 'age'], axis=1, inplace=True)
# Categorize GFR
for i in range(1, 5):
df_temporal_gfr['gfr_cat_' + str(i)] = np.where(df_temporal_gfr['gfr_' + str(i)] < 60, 1, 2)
df_temporal_gfr.to_csv('CSV/df_temporal_gfr.csv', index=False)
```
| github_jupyter |
# Training Job in Internet-free Mode
If you want to isolate your training data and training container from the rest of the Internet, then you should create the training job in a private subnet. A private subnet is a subnet in your VPC without a route to an Internet Gateway. This means, by default, no inbound calls to your container from the Internet is possible and your container cannot make outbound calls to the Internet. If you need the training container to access your S3 resource, you need to **explicitly** add a VPC endpoint and attach it to the route table of your private subnet to allow traffic to your S3 bucket.
In this notebook, you will walk through an example of creating such a training job. you will
- Build a simple training image
- Set up a VPC
- Set up a private subnet in the VPC
- Set up a security group in the VPC
- Create a training job in your private subnet && security group and watch it to fail (because it cannot access your S3 resource)
- Add a VPC endpoint to allow traffic to S3
- Create another training job in your private subnet and watch it to succeeed
If you are not familiar with VPC security configuration, the following materials can help you
- [Security in Amazon Virtual Private Cloud](https://docs.aws.amazon.com/vpc/latest/userguide/security.html)
- [Training and Inference Containers in Internet-Free Mode](https://docs.aws.amazon.com/sagemaker/latest/dg/mkt-algo-model-internet-free.html)
It's okay if you don't understand everything from the official docs above. The code samples you will see in this notebook will help you grasp those concepts.
```
# import libraries
import boto3
import pprint
import datetime
import time
pp = pprint.PrettyPrinter(indent=1)
```
## Permissions
If you are running this notebook on an EC2 instance with an IAM user (you) as the default profile, then you will need policies to allow you to create VPC / Subnet / Secruity group / VPC endpoint. Likewise, if you are running this notebook on a SageMaker notebook instance or Studio, the service role needs to have those permission as well.
## Build a training image
You will follow the same procedure for building a training image as in [this notebook](https://github.com/hsl89/amazon-sagemaker-examples/blob/sagemaker-fundamentals/sagemaker-fundamentals/create-training-job/create_training_job.ipynb). We will refer to this image as `example-image`. Please go through that notebook if you are not familiar with `CreateTrainingJob` API.
```
# create a repo in your ECR
ecr = boto3.client("ecr")
try:
# The repository might already exist
# in your ECR
cr_res = ecr.create_repository(repositoryName="example-image")
pp.pprint(cr_res)
except Exception as e:
print(e)
%%sh
# build the image
cd container/
# tag it as example-image:latest
docker build -t example-image:latest .
# test the container
python local_test/test_container.py
account=$(aws sts get-caller-identity --query Account | sed -e 's/^"//' -e 's/"$//')
region=$(aws configure get region)
ecr_account=${account}.dkr.ecr.${region}.amazonaws.com
# Give docker your ECR login password
aws ecr get-login-password --region $region | docker login --username AWS --password-stdin $ecr_account
# Fullname of the repo
fullname=$ecr_account/example-image:latest
# Tag the image with the fullname
docker tag example-image:latest $fullname
# Push to ECR
docker push $fullname
```
## Create a VPC
You can think of Amazon VPC as the traditional network in a data center in the cloud.
The following are the key concepts for VPCs:
* Virtual private cloud (VPC) โ A virtual network dedicated to your AWS account.
* Subnet โ A range of IP addresses in your VPC.
* Route table โ A set of rules, called routes, that are used to determine where network traffic is directed.
* Internet gateway โ A gateway that you attach to your VPC to enable communication between resources in your VPC and the internet.
* VPC endpoint โ Enables you to privately connect your VPC to supported AWS services and VPC endpoint services powered by PrivateLink without requiring an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. Instances in your VPC do not require public IP addresses to communicate with resources in the service. Traffic between your VPC and the other service does not leave the Amazon network. For more information, see AWS PrivateLink and VPC endpoints.
* CIDR block โClassless Inter-Domain Routing. An internet protocol address allocation and route aggregation methodology. For more information, see [Classless Inter-Domain Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) in Wikipedia.
All of these concepts are explained in the [official docs](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html).
```
# Create a VPC in your default region
ec2 = boto3.client("ec2")
vpc_res = ec2.create_vpc(
CidrBlock="10.0.0.0/20", # 2^(32 - 20) = 4906 private ipv4 addrs
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
TagSpecifications=[
{
"ResourceType": "vpc",
"Tags": [
{"Key": "Name", "Value": "hello-world"},
],
},
],
)
pp.pprint(vpc_res)
# inspect this VPC in details
vpc_des = ec2.describe_vpcs(VpcIds=[vpc_res["Vpc"]["VpcId"]])
pp.pprint(vpc_des["Vpcs"])
```
## Create a subnet
The VPC you just created has the capacity to host 4906 compute instances. Think of the VPC as you just created as the entire data center for your organization. Of course, you did not spin up any instances yet, so you are not billed for 4906 instances (rest assured). Suppose you are running a real data center, part of your cluster might be pubic facing (for example, machines that host your frontend applications), part of your cluster might be insulated from the internet and is only accessible from other machines in your data center (for example, your backend or database servers). You can define the scope of your cluster (public / private) via **subnet**. Using subnet, you can define which part of your VPC (via its CIDR block) are public and which part are private.
If want to run a SageMaker training job in network isolation mode, then you will need to pass a private subnet id to the `CreateTrainingJob` API. SageMaker service will then start instances in the private subnet that run your training container.
So first off, let's create a private subnet. A subnet is defined within an availability zone, whereas a VPC is defined within a region.
```
# create subnet and associate it with route table
def get_first_availability_zone():
region_name = boto3.Session().region_name
avz_res = ec2.describe_availability_zones(
Filters=[{"Name": "region-name", "Values": [region_name]}],
AllAvailabilityZones=True,
)
for az in avz_res["AvailabilityZones"]:
if az["ZoneType"] == "availability-zone":
return az
else:
return None
def create_subnet(vpc_id, cidr_block, dry_run):
"""Create a subnet in the first availability zone in your current region"""
az = get_first_availability_zone()
if az is not None:
subnet_res = ec2.create_subnet(
AvailabilityZone=az["ZoneName"], VpcId=vpc_id, CidrBlock=cidr_block, DryRun=dry_run
)
return subnet_res
else:
raise "No availability zone"
sn_res = create_subnet(
vpc_id=vpc_res["Vpc"]["VpcId"],
cidr_block="10.0.0.0/28", # I want 2 ^ (32 - 28) private ipv4 in this subnet
dry_run=False,
)
pp.pprint(sn_res)
```
## Create a security group
A [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) is another layer of security configuration for instances running in your VPC. It acts as a firewall for your instance that controls its inbound and outbound calls. You need a security group for a SageMaker training job, because in complicated training job that involves distributed training, you need a security group configuration that allows traffics between instances that runs the training job. For the purpose of this notebook, the default setting of a security group (deny all inbound traffic; allow all outbound traffic) is enough. For more complicated training job, you will need to configure the security group accordingly. This will be discussed in more advanced notebooks for `CreateTrainingJob`.
```
# create a security group
sg_res = ec2.create_security_group(
Description="security group for SageMaker instances",
GroupName="sagemaker-private",
VpcId=vpc_res["Vpc"]["VpcId"],
TagSpecifications=[
{
"ResourceType": "security-group",
"Tags": [
{
"Key": "Service", # Tag the sec gp by service, this can be used to filter sec gps
"Value": "SageMaker",
}
],
}
],
)
pp.pprint(sg_res)
# inspect the security group in detail
ec2.describe_security_groups(GroupIds=[sg_res["GroupId"]])
```
## Creat a training job
Now let's create a training job within your private subnet you just created. First, let's download some helper functions for creating service role for SageMaker.
```
%%bash
cp ../execution-role/iam_helpers.py .
# set up service role for SageMaker
from iam_helpers import create_execution_role
iam = boto3.client("iam")
sts = boto3.client("sts")
caller = sts.get_caller_identity()
if ":user/" in caller["Arn"]: # as IAM user
# either paste in a role_arn with or create a new one and attach
# AmazonSageMakerFullAccess
role_name = "example-sm"
role_arn = create_execution_role(role_name=role_name)["Role"]["Arn"]
iam.attach_role_policy(
RoleName=role_name,
PolicyArn="arn:aws:iam::aws:policy/AmazonSageMakerFullAccess",
)
elif "assumed-role" in caller["Arn"]: # on SageMaker infra
role_arn = caller["Arn"]
else:
print("I assume you are on an EC2 instance launched with an IAM role")
role_arn = caller["Arn"]
# some helpers
def current_time():
ct = datetime.datetime.now()
return str(ct.now()).replace(":", "-").replace(" ", "-")[:19]
def account_id():
return boto3.client("sts").get_caller_identity()["Account"]
```
To make this notebook self-contained, you will create a bucket and upload some data there to pass to training container as you did in the [basic create training job notebook](https://github.com/hsl89/amazon-sagemaker-examples/blob/sagemaker-fundamentals/sagemaker-fundamentals/create-training-job/create_training_job.ipynb). But you don't have to do so, if you already have a bucket that SageMaker service can access (i.e. a bucket with bucket name containing `sagemaker`, see `AmazonSageMakerFullAccessPolicy`), then you can use that bucket as well.
```
# create a bucket for SageMaker in your region
def create_bucket():
"""Create an S3 bucket that is intended to be used for short term"""
bucket = f"sagemaker-{current_time()}"
region_name = boto3.Session().region_name
create_bucket_config = {}
if region_name != "us-east-1":
# us-east-1 is the default region for S3 bucket
# specify LocationConstraint if your VPC is not
# in us-east-1
create_bucket_config["LocationConstraint"] = region_name
boto3.client("s3").create_bucket(Bucket=bucket, CreateBucketConfiguration=create_bucket_config)
return bucket
# replace it with your own SageMaker-accessible bucket
# if you don't want to create a new one
bucket = create_bucket()
# upload some mock data to your bucket
import os
s3 = boto3.client("s3")
input_prefix = "input_data"
for fname in os.listdir("data"):
with open(os.path.join("data", fname), "rb") as f:
key = input_prefix + fname
s3.upload_fileobj(f, bucket, key)
```
Now, you will configure the training job.
```
sm_cli = boto3.client("sagemaker")
# name training job
training_job_name = "example-training-job-{}".format(current_time())
data_path = "s3://" + bucket + "/" + input_prefix
# location that SageMaker saves the model artifacts
output_prefix = "output/"
output_path = "s3://" + bucket + "/" + output_prefix
# ECR URI of your image
region = boto3.Session().region_name
account = account_id()
image_uri = "{}.dkr.ecr.{}.amazonaws.com/example-image:latest".format(account, region)
algorithm_specification = {
"TrainingImage": image_uri,
"TrainingInputMode": "File",
}
input_data_config = [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": data_path,
"S3DataDistributionType": "FullyReplicated",
}
},
},
{
"ChannelName": "test",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": data_path,
"S3DataDistributionType": "FullyReplicated",
}
},
},
]
vpc_config = {
# security groups need to be configured to communicate
# with each other for distributed training job
"SecurityGroupIds": [sg_res["GroupId"]],
"Subnets": [sn_res["Subnet"]["SubnetId"]],
}
output_data_config = {"S3OutputPath": output_path}
resource_config = {"InstanceType": "ml.m5.large", "InstanceCount": 1, "VolumeSizeInGB": 5}
stopping_condition = {
"MaxRuntimeInSeconds": 120,
}
enable_network_isolation = True
ct_res = sm_cli.create_training_job(
TrainingJobName=training_job_name,
AlgorithmSpecification=algorithm_specification,
RoleArn=role_arn,
InputDataConfig=input_data_config,
OutputDataConfig=output_data_config,
VpcConfig=vpc_config,
ResourceConfig=resource_config,
StoppingCondition=stopping_condition,
EnableNetworkIsolation=enable_network_isolation,
EnableManagedSpotTraining=False,
)
```
The training job is expected to fail, because the subnet you created is isolated from the Internet and you have not created any mechanism for it to access your the data in your S3 bucket.
```
# see the training job to fail
stopped = False
while not stopped:
tj_state = sm_cli.describe_training_job(TrainingJobName=training_job_name)
if tj_state["TrainingJobStatus"] in ["Completed", "Stopped", "Failed"]:
stopped = True
else:
print("Training in progress")
time.sleep(30)
if tj_state["TrainingJobStatus"] == "Failed":
print("Training job failed ")
print("Failed Reason: {}".format(tj_state["FailureReason"]))
else:
print("Training job completed")
```
## Add a VPC endpoint
A VPC endpoint enables you to privately connect your VPC to supported AWS services and VPC endpoint services powered by PrivateLink without requiring an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. Instances in your VPC do not require public IP addresses to communicate with resources in the service. **Traffic between your VPC and the other service does not leave the Amazon network**. For more information, see [AWS PrivateLink and VPC endpoints](https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-overview.html).
There are three types of VPC endpoints as of March 2021.
A **Gateway** endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint, which will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.
An **Interface** endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.
A **GatewayLoadBalancer** endpoint is a network interface in your subnet that serves an endpoint for communicating with a Gateway Load Balancer that you've configured as a VPC endpoint service.
---
Only Gateway endpoint is a viable option for SageMaker service. So you will add a Gateway endpoint here. A Gateway endpoint needs to be added to a route table, so you will need to create a route table and associated it with your subnet first.
```
# Create a route table
rt_res = ec2.create_route_table(
VpcId=vpc_res["Vpc"]["VpcId"],
TagSpecifications=[
{"ResourceType": "route-table", "Tags": [{"Key": "Service", "Value": "SageMaker"}]}
],
)
pp.pprint(rt_res)
# Associate the route table with the subnet
ass_rt_res = ec2.associate_route_table(
RouteTableId=rt_res["RouteTable"]["RouteTableId"], SubnetId=sn_res["Subnet"]["SubnetId"]
)
pp.pprint(ass_rt_res)
```
Next, let's check service name of S3 bucket.
```
# Check out service name for S3
services = ec2.describe_vpc_endpoint_services()
for s in services["ServiceNames"]:
if "s3" in s:
print(s)
# Create a gateway endpoint
region_name = boto3.Session().region_name
iep_res = ec2.create_vpc_endpoint(
VpcEndpointType="Gateway",
VpcId=vpc_res["Vpc"]["VpcId"],
ServiceName=f"com.amazonaws.{region_name}.s3", # return of previous cell
RouteTableIds=[rt_res["RouteTable"]["RouteTableId"]],
# you don't need to add a tag, it is only
# used as a convenient way to filter through your
# endpoints in the future
TagSpecifications=[
{"ResourceType": "vpc-endpoint", "Tags": [{"Key": "Service", "Value": "SageMaker"}]}
],
)
pp.pprint(iep_res)
```
Now you have added a Gateway endpoint to the route table of the subnet. This endpoint allows the subnet to talk to your S3 bucket **privately**. The traffic between the subnet and your S3 bucket does not leave AWS network. Let's create another training job to verify that the training container can access the data in your S3 bucket.
```
training_job_name = "example-training-job-{}".format(current_time())
ct_res = sm_cli.create_training_job(
TrainingJobName=training_job_name,
AlgorithmSpecification=algorithm_specification,
RoleArn=role_arn,
InputDataConfig=input_data_config,
OutputDataConfig=output_data_config,
VpcConfig=vpc_config,
ResourceConfig=resource_config,
StoppingCondition=stopping_condition,
EnableNetworkIsolation=enable_network_isolation,
EnableManagedSpotTraining=False,
)
# watch to to succeed
stopped = False
while not stopped:
tj_state = sm_cli.describe_training_job(TrainingJobName=training_job_name)
if tj_state["TrainingJobStatus"] in ["Completed", "Stopped", "Failed"]:
stopped = True
else:
print("Training in progress")
time.sleep(30)
if tj_state["TrainingJobStatus"] == "Failed":
print("Training job failed ")
print("Failed Reason: {}".format(tj_state["FailureReason"]))
else:
print("Training job completed")
```
## Review
Let's review what you did in this notebook: you have created
- a VPC
- a subnet inside the VPC
- a security group inside the VPC
The VPC is isolated from the Internet, because you did not add an Internet Gateway to it.
You created a training job in the subnet. The traffic in and out the SageMaker Instance running your training container is controlled by the security group permissions. You verified that this training job failed, because SageMaker cannot download data from your S3 bucket.
Next, you added
- a route table to your subnet
- an S3 Gateway Endpoint to the route table
Then you verified that once you added the S3 Gateway Endpoint to your VPC, the same training job can go through.
## Practical considerations
If you are an ML practioner, then most likely you will not need to touch VPC, because the network admin in your organization should have configured the VPC, subnet, security group, route table and VPC endpoints for you. The reason we discussed VPC configuration in this notebook is to get you familiar with the basic concepts of network engineering, so that when something goes wrong, you can message your network admin with more precise questions or requests.
One common situation is that your org owns a VPC has has both public and private subnet. You are configuring a SageMaker training job on an EC2 / Notebook Instance / Studio in the public subnet and you want the training job to be executed in the private subnet. In that case, all you need to to is to pass the subnet id and security group id to the `CreateTrainingJob` API and set the `EnableNetworkIsolation` flag to `True`.
## Clean up
Now, let's tear down all resources you created in this notebook.
```
# delete the entire VPC and its associated resources
# adapted from https://gist.github.com/alberto-morales/b6d7719763f483185db27289d51f8ec5
def vpc_cleanup(vpcid):
"""Remove VPC from AWS
Set your region/access-key/secret-key from env variables or boto config.
:param vpcid: id of vpc to delete
"""
if not vpcid:
return
print("Removing VPC ({}) from AWS".format(vpcid))
ec2 = boto3.resource("ec2")
ec2client = ec2.meta.client
vpc = ec2.Vpc(vpcid)
# detach default dhcp_options if associated with the vpc
dhcp_options_default = ec2.DhcpOptions("default")
if dhcp_options_default:
dhcp_options_default.associate_with_vpc(VpcId=vpc.id)
# detach and delete all gateways associated with the vpc
for gw in vpc.internet_gateways.all():
vpc.detach_internet_gateway(InternetGatewayId=gw.id)
gw.delete()
# delete any instances
for subnet in vpc.subnets.all():
for instance in subnet.instances.all():
instance.terminate()
# delte all subnets
for subnet in vpc.subnets.all():
for interface in subnet.network_interfaces.all():
interface.delete()
subnet.delete()
# delete all route table associations
for rt in vpc.route_tables.all():
for rta in rt.associations:
if not rta.main:
rta.delete()
try:
rt.delete()
except Exception as e:
pass
# delete our endpoints
for ep in ec2client.describe_vpc_endpoints(Filters=[{"Name": "vpc-id", "Values": [vpcid]}])[
"VpcEndpoints"
]:
ec2client.delete_vpc_endpoints(VpcEndpointIds=[ep["VpcEndpointId"]])
# delete our security groups
for sg in vpc.security_groups.all():
if sg.group_name != "default":
sg.delete()
# delete any vpc peering connections
for vpcpeer in ec2client.describe_vpc_peering_connections(
Filters=[{"Name": "requester-vpc-info.vpc-id", "Values": [vpcid]}]
)["VpcPeeringConnections"]:
ec2.VpcPeeringConnection(vpcpeer["VpcPeeringConnectionId"]).delete()
# delete non-default network acls
for netacl in vpc.network_acls.all():
if not netacl.is_default:
netacl.delete()
# finally, delete the vpc
ec2client.delete_vpc(VpcId=vpcid)
return
vpc_cleanup(vpc_res["Vpc"]["VpcId"])
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.dates as mdates
from datetime import date
from datetime import datetime
import numpy as np
from summer.utils import ref_times_to_dti
from autumn.tools.inputs.demography.queries import get_population_by_agegroup
from autumn.models.covid_19.detection import create_cdr_function
from autumn.tools.utils.utils import apply_moving_average
from autumn.tools.curve.scale_up import scale_up_function
from autumn.tools.project import get_project
from autumn.settings import Region, Models
from autumn.models.covid_19.constants import AGEGROUP_STRATA, BASE_DATETIME
from autumn.models.covid_19.mixing_matrix.macrodistancing import weight_mobility_data
from autumn.tools.plots.utils import REF_DATE
from autumn.tools import inputs
from autumn.tools.inputs.database import get_input_db
from autumn.tools.utils.display import pretty_print
from autumn.tools.inputs.social_mixing.build_synthetic_matrices import build_synthetic_matrices
from autumn.models.covid_19.detection import get_testing_numbers_for_region
age_integers = [int(group) for group in AGEGROUP_STRATA]
model = Models.SM_SIR
region = Region.COXS_BAZAR
```
## Population
```
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
project = get_project(model, region)
total_pops = inputs.get_population_by_agegroup(
AGEGROUP_STRATA,
project.param_set.baseline["country"]["iso3"],
"FDMN",
year=project.param_set.baseline["population"]["year"]
)
print(f"total modelled population of {region} is: {round(sum(total_pops) / 1e3, 3)} thousand")
ax.bar(age_integers, total_pops, width=4)
ax.set_title(region)
ax.set_ylabel("population")
ax.set_xlabel("starting age of age bracket")
fig.suptitle("population distribution by age")
```
## Mobility
### Mobility is only available at the national level, not for Cox's Bazar or FDMNs
```
print("Whether the mobility effects are actually turned on at all:")
project.param_set.baseline["is_dynamic_mixing_matrix"]
y_upper = 2.
# Collate data together
input_db = get_input_db()
mob_df = input_db.query("mobility", conditions={"iso3": "BGD"})
times = [datetime.strptime(i, "%Y-%m-%d") for i in mob_df["date"]]
google_mob_df = weight_mobility_data(mob_df, project.param_set.baseline["mobility"]["google_mobility_locations"])
# Get plots ready
mob_fig, mob_axes = plt.subplots(1, 2, figsize=(12, 6))
plot_left_date = date(2020, 1, 1)
plot_right_date = times[-1] # Not sure why this is necessary
# Plot raw mobility data
ax = mob_axes[0]
for mobility_domain in ["grocery_and_pharmacy", "residential", "parks", "retail_and_recreation", "transit_stations"]:
ax.plot(times, mob_df[mobility_domain], label=mobility_domain)
ax.set_ylim((0., y_upper))
ax.tick_params(axis="x", labelrotation=45)
ax.set_title("raw Google mobility domains")
ax.legend(loc="lower right")
ax.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot processed mobility data
ax = mob_axes[1]
for location in list(project.param_set.baseline["mobility"]["google_mobility_locations"].keys()):
ax.plot(times, google_mob_df[location], label=location)
ax.tick_params(axis="x", labelrotation=45)
ax.set_ylim((0., y_upper))
ax.legend(loc="lower left")
ax.set_title("mobility as implemented in the model")
mob_fig.tight_layout(w_pad=1.5, h_pad=3.5)
ax.set_xlim(left=plot_left_date, right=plot_right_date)
```
## Mixing matrix
### Check how mixing matrix is specified for each region
```
print(f"Modelled country: {project.param_set.baseline['country']['iso3']}")
print(f"Modelled sub-region: {project.param_set.baseline['population']['region']}")
print(f"Proxy country: {project.param_set.baseline['ref_mixing_iso3']}")
print("Always age-adjusted under SM-SIR code")
```
### Display the matrix and the matrix components
```
agegroup_types = {
"base age groups": AGEGROUP_STRATA,
"modelled age groups": project.param_set.baseline["age_groups"],
}
for title, agegroups in agegroup_types.items():
mixing_matrix = build_synthetic_matrices(
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["ref_mixing_iso3"],
agegroups,
True,
project.param_set.baseline["population"]["region"]
)
fig = plt.figure(figsize=(12, 8))
positions = [1, 2, 3, 5, 6]
for i_loc, location in zip(positions, mixing_matrix.keys()):
ax = fig.add_subplot(2, 3, i_loc)
ax.imshow(
np.flipud(np.transpose(mixing_matrix[location])),
cmap=cm.hot,
vmin=0,
vmax=mixing_matrix[location].max(),
origin="lower"
)
ax.set_title(location.replace("_", " "))
ax.set_xticks([])
ax.set_yticks([])
fig.suptitle(title)
```
## Case detection
```
# Get the CDR function of tests
cdr_from_tests_func = create_cdr_function(
project.param_set.baseline["testing_to_detection"]["assumed_tests_parameter"],
project.param_set.baseline["testing_to_detection"]["assumed_cdr_parameter"],
)
# Get the denominator population
testing_pops = get_population_by_agegroup(
project.param_set.baseline["age_groups"],
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["population"]["region"]
)
# Process the data
test_times, test_values = get_testing_numbers_for_region("BGD", "FDMN")
test_dates = ref_times_to_dti(BASE_DATETIME, [int(time) for time in test_times])
per_capita_tests = [i_tests / sum(testing_pops) for i_tests in test_values]
dummy_tests = np.linspace(0, max(per_capita_tests), 200)
if project.param_set.baseline["testing_to_detection"]["assumed_tests_parameter"]:
smoothed_per_capita_tests = apply_moving_average(
per_capita_tests,
project.param_set.baseline["testing_to_detection"]["smoothing_period"]
)
else:
smoothed_per_capita_tests = per_capita_tests
cdr_function_of_time = scale_up_function(
test_times,
[cdr_from_tests_func(test_rate) for test_rate in smoothed_per_capita_tests],
smoothness=0.2, method=4, bound_low=0.,
)
# Plot
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
fig.tight_layout(w_pad=1.5, h_pad=5)
def sort_axis_dates(ax):
axis.tick_params(axis="x", labelrotation=45)
axis.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot daily number of tests
axis = axes[0, 0]
axis.plot(test_dates, test_values, marker="o")
axis.set_title("daily testing numbers")
sort_axis_dates(axis)
# Plot daily number of tests
axis = axes[0, 1]
axis.plot(test_dates, per_capita_tests, label="raw")
axis.plot(test_dates, smoothed_per_capita_tests, label="smoothed")
axis.set_title("daily per capita testing rate")
sort_axis_dates(axis)
axis.legend()
# Plot relationship of daily tests to CDR proportion
axis = axes[1, 0]
axis.plot(dummy_tests, cdr_from_tests_func(dummy_tests))
axis.scatter(per_capita_tests, [cdr_from_tests_func(i_tests) for i_tests in per_capita_tests], color="r")
axis.set_ylabel("case detection proportion")
axis.set_xlabel("per capita testing rate")
axis.set_title("daily per capita tests to CDR relationship")
axis.set_ylim(top=1.)
# Plot CDR values
axis = axes[1, 1]
axis.scatter(test_dates, [cdr_from_tests_func(i_test_rate) for i_test_rate in smoothed_per_capita_tests], color="r")
axis.plot(test_dates, [cdr_function_of_time(time) for time in test_times])
axis.set_title("Final case detection rate")
axis.set_ylabel("proportion")
sort_axis_dates(axis)
fig.tight_layout()
```
| github_jupyter |
##### (exceprt from Python Machine Learning Essentials, Supplementary Materials)
## Sections
- [Classifying handwritten digits](#Classifying-handwritten-digits)
- [Obtaining the MNIST dataset](#Obtaining-the-MNIST-dataset)
- [Implementing a multi-layer perceptron](#Implementing-a-multi-layer-perceptron)
- [Training an artificial neural network](#Training-an-artificial-neural-network)
- [Debugging neural networks with gradient checking](#Debugging-neural-networks-with-gradient-checking)
---
# Classifying handwritten digits
## Obtaining the MNIST dataset
[[back to top](#Sections)]
The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts:
- Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, 60,000 samples)
- Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, 60,000 labels)
- Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, 10,000 samples)
- Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, 10,000 labels)
In this section, we will only be working with a subset of MNIST, thus, we only need to download the training set images and training set labels. After downloading the files, I recommend unzipping the files using the Unix/Linux gzip tool from the terminal for efficiency, e.g., using the command
gzip *ubyte.gz -d
in your local MNIST download directory, or, using your favorite unzipping tool if you are working with a machine running on Microsoft Windows. The images are stored in byte form, and using the following function, we will read them into NumPy arrays that we will use to train our MLP.
### Get MNIST Dataset
**Note**: The following commands will work on Linux/Unix (e.g. Mac OSX) Platforms
```
!mkdir -p ../data/mnist
!curl http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz --output ../data/mnist/train-images-idx3-ubyte.gz
!curl http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz --output ../data/mnist/train-labels-idx1-ubyte.gz
!curl http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz --output ../data/mnist/t10k-images-idx3-ubyte.gz
!curl http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz --output ../data/mnist/t10k-labels-idx1-ubyte.gz
```
### Load MNIST Data
```
import os
import struct
import numpy as np
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte'
% kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('data/mnist', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('data/mnist', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
```
Visualize the first digit of each class:
```
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_all.png', dpi=300)
plt.show()
```
Visualize 25 different versions of "7":
```
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = X_train[y_train == 7][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_7.png', dpi=300)
plt.show()
```
Uncomment the following lines to optionally save the data in CSV format.
However, note that those CSV files will take up a substantial amount of storage space:
- train_img.csv 1.1 GB (gigabytes)
- train_labels.csv 1.4 MB (megabytes)
- test_img.csv 187.0 MB
- test_labels 144 KB (kilobytes)
```
#np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')
#np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')
X_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')
y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')
#np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')
#np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')
X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')
y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')
```
<br>
<br>
## Implementing a multi-layer perceptron
[[back to top](#Sections)]
```
import numpy as np
from scipy.special import expit
import sys
class NeuralNetMLP(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_data = X_data[idx], y_data[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx], self.w1, self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
```
<br>
<br>
## Training an artificial neural network
[[back to top](#Sections)]
```
nn = NeuralNetMLP(n_output=10,
n_features=X_train.shape[1],
n_hidden=50,
l2=0.1,
l1=0.0,
epochs=1000,
eta=0.001,
alpha=0.001,
decrease_const=0.00001,
minibatches=50,
random_state=1)
nn.fit(X_train, y_train, print_progress=True)
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
batches = np.array_split(range(len(nn.cost_)), 1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)), cost_avgs, color='red')
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
plt.tight_layout()
plt.savefig('./figures/cost2.png', dpi=300)
plt.show()
y_train_pred = nn.predict(X_train)
acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
y_test_pred = nn.predict(X_test)
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab= y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
```
<br>
<br>
# Debugging neural networks with gradient checking
[[back to top](#Sections)]
```
import numpy as np
from scipy.special import expit
import sys
class MLPGradientCheck(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):
""" Apply gradient checking (for debugging only)
Returns
---------
relative_error : float
Relative error between the numerically
approximated gradients and the backpropagated gradients.
"""
num_grad1 = np.zeros(np.shape(w1))
epsilon_ary1 = np.zeros(np.shape(w1))
for i in range(w1.shape[0]):
for j in range(w1.shape[1]):
epsilon_ary1[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1 - epsilon_ary1, w2)
cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1 + epsilon_ary1, w2)
cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)
num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary1[i, j] = 0
num_grad2 = np.zeros(np.shape(w2))
epsilon_ary2 = np.zeros(np.shape(w2))
for i in range(w2.shape[0]):
for j in range(w2.shape[1]):
epsilon_ary2[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 - epsilon_ary2)
cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 + epsilon_ary2)
cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)
num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary2[i, j] = 0
num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))
grad = np.hstack((grad1.flatten(), grad2.flatten()))
norm1 = np.linalg.norm(num_grad - grad)
norm2 = np.linalg.norm(num_grad)
norm3 = np.linalg.norm(grad)
relative_error = norm1 / (norm2 + norm3)
return relative_error
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_data = X_data[idx], y_data[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx], self.w1, self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
## start gradient checking
grad_diff = self._gradient_checking(X=X[idx], y_enc=y_enc[:, idx],
w1=self.w1, w2=self.w2,
epsilon=1e-5,
grad1=grad1, grad2=grad2)
if grad_diff <= 1e-7:
print('Ok: %s' % grad_diff)
elif grad_diff <= 1e-4:
print('Warning: %s' % grad_diff)
else:
print('PROBLEM: %s' % grad_diff)
# update weights; [alpha * delta_w_prev] for momentum learning
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn_check = MLPGradientCheck(n_output=10,
n_features=X_train.shape[1],
n_hidden=10,
l2=0.0,
l1=0.0,
epochs=10,
eta=0.001,
alpha=0.0,
decrease_const=0.0,
minibatches=1,
random_state=1)
nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
```
| github_jupyter |
```
# default_exp starter
```
# Starter Code
> Utility functions for binary classification on scientific paper abstracts using GPT-2
```
#hide
from nbdev.showdoc import *
```
## GPT-2 Prompt Manipulation
The simplest approach is to to feed in a small number of trainging examples into the prompt directly for zero-shot classification.
### Setup
```
#export
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch
import json
import pandas as pd
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained('gpt2')
model.eval().cuda()
```
### Utilities
```
#export
def generate(prompt, max_length=5, stop_token=None):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
generated_text_ids = model.generate(input_ids=input_ids.cuda(), max_length=max_length+len(input_ids[0]), do_sample=False)
generated_text = tokenizer.decode(generated_text_ids[0], clean_up_tokenization_spaces=True)
post_prompt_text = generated_text[len(tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)):]
return prompt + post_prompt_text[:post_prompt_text.find(stop_token) if stop_token else None]
# Note that the logits are shifted over 1 to the left, since HuggingFace doesn't give a logit for the first token
def get_logits_and_tokens(text):
input_ids = tokenizer.encode(text, return_tensors="pt")
tokens = [tokenizer.decode([input_id]) for input_id in input_ids[0]]
output = model(input_ids.cuda())
return output.logits[0][:-1], tokens
EXAMPLE_PROMPT = """Horrible: negative
Great: positive
Bad:"""
generated_text = generate(EXAMPLE_PROMPT, stop_token="\n")
generated_text
logits, tokens = get_logits_and_tokens(generated_text)
last_token_probs = torch.softmax(logits[-1], dim=0)
negative_prob = last_token_probs[tokenizer.encode(" negative")[0]]
positive_prob = last_token_probs[tokenizer.encode(" positive")[0]]
print(f"tokens: {tokens}\nnegative prob: {negative_prob}\npositive prob: {positive_prob}")
```
### Load Data
Define helper function to load text from `.jsonl` files.
```
#export
def load_jsonl(filename):
f = open(filename)
return [json.loads(line) for line in f.read().splitlines()]
train_examples = load_jsonl("data/train.jsonl")
train_examples[-1]
#export
def render_example(example):
title = example["text"].split(".")[0].strip()
abstract = example["text"][len(title)+1:].strip()
return f'TITLE: {title}\nABSTRACT: {abstract}\nLABEL: {"AI" if example["label"] == "True" else "NOT AI"}'
#export
def render_end_example(example):
title = example["text"].split(".")[0].strip()
abstract = example["text"][len(title)+1:].strip()
return f"TITLE: {title}\nABSTRACT: {abstract}\nLABEL:"
#export
def make_prompt(instructions, train_examples, end_example):
rendered_train_examples = "\n\n--\n\n".join([render_example(example) for example in train_examples])
return f"""{instructions}
{rendered_train_examples}
--
{render_end_example(end_example)}"""
INSTRUCTIONS = "Classify the following examples based on whether they are AI-relevant or not:"
prompt = make_prompt(INSTRUCTIONS, train_examples[:4], train_examples[4])
print(prompt)
generated_text = generate(prompt, stop_token="\n")
print(generated_text)
```
## Extra Helper Function
These are some extra utility functions that were not defined in the original starter code, but were still useful.
```
train = load_jsonl("data/train.jsonl")
df = pd.DataFrame(train)
```
Collect some random samples across classes. This should be flexible enough to generalize beyong the `'AI'` and `'Not AI'` labels.
```
df.label.unique()
samples_per_label = 2
samples = []
for label in df['label'].unique():
group = df[df.label == label]
idxs = group.index[:samples_per_label]
samples += [train[idx] for idx in idxs]
samples
#export
def uniform_samples(json='data/train.jsonl', n_samples=2):
superset = load_jsonl(json)
df = pd.DataFrame(superset)
samples = []
for label in df['label'].unique():
group = df[df.label == label]
idxs = group.index[:n_samples]
samples += [superset[idx] for idx in idxs]
return samples
```
| github_jupyter |
# Master Pytorch 7 : RNN Basic
- RNN์ ๊ธฐ์ด์ ๋ํด ์์๋ณด์
# RNN one Cell process
```
import torch
import torch.nn as nn
# hello์ ์๋ ๊ฐ๊ฐ์ ๋ฌธ์๋ค์ ์ํซ๋ฒกํฐ๋ก ๋ณํ
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]
# RNN์ ํ cell ์ ์ ํ ์ถ๋ ฅ
cell = nn.RNN(input_size = 4, hidden_size = 2, batch_first = True)
cell
hidden = torch.randn(1, 1, 2) # (num_layers * num_direction) X batch_size X hidden_size
hidden
inputs = torch.Tensor([h, e, l, l, o])
inputs
for p in cell.parameters():
print(p)
for c in inputs:
c = c.view(1, 1, -1) # input : (batch_size X seq_len X input_size) if batch_first = True
out, hidden = cell(c, hidden)
print(c.size(), out.size())
# ์ ์์
์ ํ ๋ฒ์ ๊ฐ๋ฅํ๋ค.
# input : (Batch_size X seq_len X input_size) if batch_first == True
inputs = inputs.view(1, 5, -1) # Batch X seq_len X input_size
out, hidden = cell(inputs, hidden)
print(out.size(), hidden.size())
# sequence length = 5๋ก ๋์ค๋๋ฐ, ์ด๊ฒ์ ๊ณง hidden์ด 5๊ฐ ์ด์ด์ ธ์๋ค๋ ๋ป
hidden = torch.randn(1, 3, 2)
# cell ํ๊ฐ RNN : input_dim(4) -> output_dim(2) / seqence = 5, batch = 3
inputs = torch.Tensor([[h,e,l,l,o],
[e,o,l,l,l],
[l,l,e,e,l]])
inputs
# input : (batch, seq_len, input_size) when batch_first = True
# B X S X I
out, hidden = cell(inputs, hidden)
print(inputs.size(), out.size())
out
```
# RNN Example
```
import torch
import torch.nn as nn
torch.manual_seed(777)
idx2char = ['h', 'i', 'e', 'l', 'o']
# hihell -> ihello๋ก ๊ฐ๋ฅด์ณ๋ณด์
x_data = [0, 1, 0, 2, 3, 3] # hihell
one_hot_lookup = [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
y_data = [1, 0, 2, 3, 3, 4] # ihello
x_one_hot = [one_hot_lookup[x] for x in x_data]
x_one_hot # hihell์ ๋ํ one-hot ๋ฒกํฐ
# ๋ฐ์ดํฐ ์ค๋น
inputs = torch.Tensor(x_one_hot)
labels = torch.LongTensor(y_data).view(6,1)
print(inputs)
print(labels)
class_n = 5
input_size = 5 # one-hot ๋ฒกํฐ์ ์ฌ์ด์ฆ
hidden_size = 5 # hidden ๋ ์ด์ด์ ์ฌ์ด์ฆ
batch_size = 1 # ํ ๋ฌธ์ฅ
sequence_length = 1 # ํ ๊ธ์๋น ํ๋์ฉ
layer_n = 1 # one-layer RNN
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size, batch_first = True)
def forward(self, hidden, x):
x = x.view(batch_size, sequence_length, input_size) # Reshape input(batch_first = True)
out, hidden = self.rnn(x, hidden)
return hidden, out.view(-1, class_n)
def init_hidden(self):
return torch.zeros(layer_n, batch_size, hidden_size)
model = Model()
print(model)
import torch.optim as optim
import sys
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
epoch_n = 100
for epoch in range(1, epoch_n+1):
optimizer.zero_grad()
loss = 0
hidden = model.init_hidden() # ์ด๊ธฐ๊ฐ ์ค์ (h0)
sys.stdout.write("predicted string: ")
for input, label in zip(inputs, labels):
# print(input.size(), label.size())
hidden, output = model(hidden, input)
val, idx = output.max(1) # ์ต๋๊ฐ(val)๊ณผ index(idx) ์ถ๋ ฅ
sys.stdout.write(idx2char[idx.data[0]])
loss += loss_function(output, label)
print(' Epoch : %d/100, Loss : %1.3f' %(epoch, loss.data[0]))
loss.backward()
optimizer.step()
```
# ์ค์ค๋ก ํด๋ณด๊ธฐ
- "hihello"์์ "hihell"๋ฅผ ์
๋ ฅ๊ฐ์ผ๋ก ํ ํ 'o'๋ฅผ ์์ธกํ์
# ๋ฐ์ดํฐ ์์ฑ
```
import torch
import torch.nn as nn
idx2char = ['h', 'i', 'e', 'l', 'o']
h = [1,0,0,0,0]
i = [0,1,0,0,0]
e = [0,0,1,0,0]
l = [0,0,0,1,0]
o = [0,0,0,0,1]
x_data = [h,i,h,e,l,l]
y_data = [i,h,e,l,l,o]
inputs = torch.Tensor(x_data)
labels = torch.LongTensor([1, 0, 2, 3, 3, 4]).view(6, 1) # index2char
print(inputs)
print(labels)
class_n = 5
input_size = 5
hidden_size = 5
batch_size = 1
seq_n = 1
layer_n = 1
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size,
batch_first = True)
def forward(self, x, hidden):
x = x.view(batch_size, seq_n, input_size)
out, hidden = self.rnn(x, hidden)
return out.view(-1, class_n), hidden
def init_hidden(self):
return torch.zeros(layer_n, batch_size, hidden_size)
model = Model()
print(model)
import torch.optim as optim
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.1)
epoch_n = 100
loss_list = []
for epoch in range(1, epoch_n + 1):
optimizer.zero_grad()
hidden = model.init_hidden()
loss = 0
for input, label in zip(inputs, labels):
output, hidden = model(input, hidden)
val, idx = output.max(1)
if epoch % 10 == 0:
print(idx2char[idx.data[0]], end = '')
loss += loss_function(output, label)
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print(' Epoch : %d/100, Loss : %1.3f' %(epoch, loss))
loss_list.append(loss)
```
# Loss ์๊ฐํ
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss_list)
```
# ํ๊ธ ๋ฒ์ ผ
- "๋ด๊ฐ ์ฌ๋ํ๋ ์ฌ๋"์์ ๋ง์ง๋ง '์ฌ๋' ๋ง์ถ๊ธฐ
```
from konlpy.tag import Okt
import torch
# POS ํ๊น
ํจ์
okt = Okt()
# POS ํ๊น
์ ์ฉ
my_string = '๋ด๊ฐ ์ฌ๋ํ๋ ์ฌ๋'
tokens = okt.pos(my_string)
# ์ฌ์ฉ๋ ๋ชจ๋ ๊ธ์ ๋ชจ์
letters = [val for val, tag in tokens] + [' ']
print(letters)
# ๊ธ์๋ฅผ Tensor๋ก ๋ณํํ๋ ์ฌ์ ์์ฑ
letter2tensor = {}
for i, val in enumerate(letters):
tensor = torch.zeros(1, len(letters))
tensor[0][i] = 1
letter2tensor[val] = tensor
letter2tensor
x_data = ['๋ด', '๊ฐ', ' ', '์ฌ๋', 'ํ๋', ' '] # ์
๋ ฅ๊ฐ
y_data = [1, 5, 2, 3, 5, 4] # ์ถ๋ ฅ๊ฐ, letters์ index๊ฐ์ผ๋ก label ์์ฑ(CorssEntropyLoss ํจ์ ์ธ ๋ index๊ฐ์ผ๋ก ๋ฃ์ด์ค)
labels = torch.LongTensor(y_data).view(6, 1)
inputs = torch.zeros(len(x_data), len(letters)) # x_data๋ฅผ Tensor๋ก ๋ณํ(์์ ๋ง๋ ์ฌ์ ์ด์ฉํด์)
for i, val in enumerate(x_data):
inputs[i] = letter2tensor[val]
print(inputs)
print(labels)
import torch.nn as nn
class_n = 6 # ๋ถ๋ฅ๋๋ ๊ฐ์, ์ฐ๋ฆฌ๊ฐ ๊ฐ์ง๊ณ ์๋ letters์ ๊ฐ์๊ฐ 6๊ฐ์ด๊ธฐ ๋๋ฌธ์ class๋ 6(6๊ฐ ์ค ํ๋๋ก ์ธ์๋ผ์ผํ๋๊น)
input_size = 6 # input์ ๊ธธ์ด, inputs = (1 X 1 X 6)
hidden_size = 6 # hidden layer์ size
batch_size = 1
seq_n = 1
layer_n = 1
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size,
batch_first = True)
def forward(self, x, hidden):
x = x.view(batch_size, seq_n, input_size)
output, hidden = self.rnn(x, hidden)
return output.view(-1, class_n), hidden
def init_hidden(self):
return torch.zeros(layer_n, batch_size, hidden_size)
model = Model()
print(model)
import torch.optim as optim
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.1)
epoch_n = 30
for epoch in range(epoch_n):
optimizer.zero_grad()
hidden = model.init_hidden()
loss = 0
for input, label in zip(inputs, labels):
output, hidden = model(input, hidden)
loss += loss_function(output, label)
val, idx = output.max(1)
print(letters[idx.data[0]], end = '')
loss.backward()
optimizer.step()
print('\tEpoch ; %d/100, Loss : %1.3f' %(epoch, loss))
loss = 0
```
# ์๋ก ๋ฐฐ์ด ๊ฒ
## 1. nn.rnn(batch_first = True)
- Pytorch Document๋ฅผ ๋ณด๋ฉด inputs์ shape๋ (batch_size, seq_n, input_size)๋ก ๋์ด์๋ค.
- ํ์ง๋ง batch_size์ ๋จผ์ ์ฐ๊ณ ์ถ๋ค๋ฉด batch_first = True ์ต์
์ ์ถ๊ฐํด์ฃผ๋ฉด ๋๋ค.
- nn.rnn(input_size, hidden_size, batch_fisrt = True) -> inputs์ shape = (batch_size, seq_n, input_size)
- ์ฐธ๊ณ : https://github.com/yunjey/pytorch-tutorial/issues/122
# ์ค๋ฅ
1. untimeError: invalid argument 2: size '[1 x 1 x 6]' is invalid for input with 4 elements at ..\aten\src\TH\THStorage.cpp:84
- output size์ hidden size์ ํฌ๊ธฐ๊ฐ ๊ฐ์์ผ ํ๋? ๊ทธ๋ฐ ๊ฒ ๊ฐ๋ค.
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, ฮฑ=0.8, ฮฒ=1., ฮณ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * ฮฑ + img * ฮฒ + ฮณ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, ฮฑ, img, ฮฒ, ฮณ)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
# importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
######## Helper functions #######
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
'''Updated method to use cv2.addWeight() to overlay lines'''
img = np.copy(img)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
for line in lines:
for x1,y1,x2,y2 in line:
if x1!=0 and x2!=0:
cv2.line(line_img, (x1, y1), (x2, y2), color, thickness)
img = weighted_img(line_img, img, 1, 0.9, 0)
return img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, ฮฑ=0.8, ฮฒ=1., ฮณ=0.):
return cv2.addWeighted(initial_img, ฮฑ, img, ฮฒ, ฮณ)
# For debugging purpose only
def show_image(img, title=""):
plt.figure()
plt.title(title)
plt.imshow(img)
def single_lines(initial_img, lines):
'''Given lines returned from hough transform, return a single left averaging the lines on the left side, and
a single right line averaging the lines on the right side'''
left_line_x = []
left_line_y = []
right_line_x = []
right_line_y = []
# sort the lines
for line in lines:
for x1,y1,x2,y2 in line:
slope = (y2 - y1)/(x2 - x1)
if math.fabs(slope) < 0.5:
continue
if slope <=0:
left_line_x.extend([x1, x2])
left_line_y.extend([y1, y2])
else:
right_line_x.extend([x1, x2])
right_line_y.extend([y1, y2])
# handle case where there is no lines.
if not left_line_x:
left_line_x.append(0)
if not left_line_y:
left_line_y.append(0)
if not right_line_x:
right_line_x.append(0)
if not right_line_y:
right_line_y.append(0)
min_y = int(initial_img.shape[0] * (3/5)) # reasonable vison range
max_y = int(initial_img.shape[0]) # bottom of the image
fit_left = np.poly1d(np.polyfit(left_line_y, left_line_x, deg=1))
left_x_start = int(fit_left(max_y)) if not math.isnan(fit_left(max_y)) else 0
left_x_end = int(fit_left(min_y)) if not math.isnan(fit_left(min_y)) else 0
fit_right = np.poly1d(np.polyfit(right_line_y, right_line_x, deg=1))
right_x_start = int(fit_right(max_y)) if not math.isnan(fit_right(max_y)) else 0
right_x_end = int(fit_right(min_y)) if not math.isnan(fit_right(min_y)) else 0
return [[[left_x_start, max_y, left_x_end, min_y],
[right_x_start, max_y, right_x_end, min_y]]]
###### main functions #######
def process_image(image):
gray_image = grayscale(image)
kernel_size = 5
blur_gray_image = gaussian_blur(gray_image, kernel_size)
low_threshold = 100
high_threshold = 200
edges = canny(blur_gray_image, low_threshold, high_threshold)
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(imshape[1]/2,imshape[0]/2),(imshape[1],imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
rho = 6
theta = np.pi/60
threshold = 160
min_line_length = 40
max_line_gap = 25
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
final_single_lines = single_lines(image, lines)
combo = draw_lines(image, final_single_lines, thickness=8)
return combo
def process_all_test_images():
path = "test_images/"
output_path_name = "test_images_output/"
os.makedirs(output_path_name, exist_ok=True)
for fileName in os.listdir(path):
print("processing:", fileName)
combo = process_image(mpimg.imread(os.path.join(path, fileName)))
show_image(combo, fileName)
mpimg.imsave(output_path_name + fileName, combo)
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
# clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
# Algorithm used :

```
%matplotlib inline
import gym
import itertools
import matplotlib
import numpy as np
import pandas as pd
import sys
if "../" not in sys.path:
sys.path.append("../")
from collections import defaultdict
from lib.envs.windy_gridworld import WindyGridworldEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = WindyGridworldEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 100 == 0:
print("\rEpisode {}/{}.".format(i_episode + 1, num_episodes), end="")
sys.stdout.flush()
# Reset the environment and pick the first action
state = env.reset()
#Chaque action est modelisรฉ par un numero, on va mettre une prob
#qui suit le epsilon greedy pour choisir l'action a prendre selon
#l'etat.
action_probs = policy(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
# One step in the environment
for t in itertools.count():
# Take a step
next_state, reward, done, _ = env.step(action)
# Pick the next action
next_action_probs = policy(next_state)
next_action = np.random.choice(np.arange(len(next_action_probs)), p=next_action_probs)
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
# TD Update
td_target = reward + discount_factor * Q[next_state][next_action]
td_delta = td_target - Q[state][action]
Q[state][action] += alpha * td_delta
if done:
break
action = next_action
state = next_state
return Q, stats
Q, stats = sarsa(env, 200)
plotting.plot_episode_stats(stats)
```
| github_jupyter |
# References and ports
GDS allows defining the component once in memory and reference to that structure in other components.
As you build complex components you can include references to other simpler components. Adding a reference is like having a pointer to a component.
The GDSII specification allows the use of references, and similarly gdsfactory uses them (with the `add_ref()` function). So what is a reference? Simply put: **A reference does not contain any geometry. It only *points* to an existing geometry**.
Say you have a ridiculously large polygon with 100 billion vertices that you call BigPolygon. It's huge, and you need to use it in your design 250 times. Well, a single copy of BigPolygon takes up 1MB of memory, so you don't want to make 250 copies of it. You can instead *references* the polygon 250 times. Each reference only uses a few bytes of memory -- it only needs to know the memory address of BigPolygon and a few other things. This way, you can keep one copy of BigPolygon and use it again and again.
Let's start by making a blank geometry (`Component`) then adding a single polygon to it.
```
import numpy as np
import gdsfactory as gf
gf.config.set_plot_options(show_subports=False)
# Create a blank Component
p = gf.Component("component_with_polygon")
# Add a polygon
xpts = [0, 0, 5, 6, 9, 12]
ypts = [0, 1, 1, 2, 2, 0]
p.add_polygon([xpts, ypts], layer=(2, 0))
# plot the Component with the polygon in it
p
```
Now, you want to reuse this polygon repeatedly without creating multiple copies of it.
To do so, you need to make a second blank `Component`, this time called `c`.
In this new Component you *reference* our Component `p` which contains our polygon.
```
c = gf.Component("Component_with_references") # Create a new blank Component
poly_ref = c.add_ref(p) # Reference the Component "p" that has the polygon in it
c
```
you just made a copy of your polygon -- but remember, you didn't actually
make a second polygon, you just made a reference (aka pointer) to the original
polygon. Let's add two more references to `c`:
```
poly_ref2 = c.add_ref(p) # Reference the Component "p" that has the polygon in it
poly_ref3 = c.add_ref(p) # Reference the Component "p" that has the polygon in it
c
```
Now you have 3x polygons all on top of each other. Again, this would appear
useless, except that you can manipulate each reference indepedently. Notice that
when you called `c.add_ref(p)` above, we saved the result to a new variable each
time (`poly_ref`, `poly_ref2`, and `poly_ref3`)? You can use those variables to
reposition the references.
```
poly_ref2.rotate(15) # Rotate the 2nd reference we made 15 degrees
poly_ref3.rotate(30) # Rotate the 3rd reference we made 30 degrees
c
```
Now you're getting somewhere! You've only had to make the polygon once, but you're
able to reuse it as many times as you want.
## Modifying the referenced geometry
What happens when you change the original geometry that the reference points to? In your case, your references in
`c` all point to the Component `p` that with the original polygon. Let's try
adding a second polygon to `p`.
First you add the second polygon and make sure `P` looks like you expect:
```
# Add a 2nd polygon to "p"
xpts = [14, 14, 16, 16]
ypts = [0, 2, 2, 0]
p.add_polygon([xpts, ypts], layer=(1, 0))
p
```
That looks good. Now let's find out what happened to `c` that contains the
three references. Keep in mind that you have not modified `c` or executed any
functions/operations on `c` -- all you have done is modify `p`.
```
c
```
**When you modify the original geometry, all of the
references automatically reflect the modifications.** This is very powerful,
because you can use this to make very complicated designs from relatively simple
elements in a computation- and memory-efficienct way.
Let's try making references a level deeper by referencing `c`. Note here we use
the `<<` operator to add the references -- this is just shorthand, and is
exactly equivalent to using `add_ref()`
```
c2 = gf.Component() # Create a new blank Component
d_ref1 = c2.add_ref(c) # Reference the Component "c" that 3 references in it
d_ref2 = c2 << c # Use the "<<" operator to create a 2nd reference to c
d_ref3 = c2 << c # Use the "<<" operator to create a 3rd reference to c
d_ref1.move([20, 0])
d_ref2.move([40, 0])
c2
```
As you've seen you have two ways to add a reference to our component:
1. create the reference and add it to the component
```
c = gf.Component("reference_sample")
w = gf.components.straight(width=0.6)
wr = w.ref()
c.add(wr)
c
```
2. or do it in a single line
```
c = gf.Component("reference_sample_shorter_syntax")
wr = c << gf.components.straight(width=0.6)
c
```
in both cases you can move the reference `wr` after created
```
import gdsfactory as gf
c = gf.Component("two_references")
wr1 = c << gf.components.straight(width=0.6)
wr2 = c << gf.components.straight(width=0.6)
wr2.movey(10)
c.add_ports(wr1.get_ports_list(), prefix="top_")
c.add_ports(wr2.get_ports_list(), prefix="bot_")
c.ports
```
You can also auto_rename ports using gdsfactory default convention, where ports are numbered clockwise starting from the bottom left
```
c.auto_rename_ports()
c.ports
c
```
## Arrays of references
In GDS, there's a type of structure called a "CellArray" which takes a cell and repeats it NxM times on a fixed grid spacing. For convenience, `Component` includes this functionality with the add_array() function.
Note that CellArrays are not compatible with ports (since there is no way to access/modify individual elements in a GDS cellarray)
gdsfactory also provides with more flexible arrangement options if desired, see for example `grid()` and `packer()`.
As well as `gf.components.array`
Let's make a new Component and put a big array of our Component `c` in it:
```
c3 = gf.Component() # Create a new blank Component
aref = c3.add_array(
c, columns=6, rows=3, spacing=[20, 15]
) # Reference the Component "c" 3 references in it with a 3 rows, 6 columns array
c3
```
CellArrays don't have ports and there is no way to access/modify individual elements in a GDS cellarray.
gdsfactory provides you with similar functions in `gf.components.array` and `gf.components.array_2d`
```
c4 = gf.Component() # Create a new blank Component
aref = c4 << gf.components.array(component=c, columns=3, rows=2)
c4.add_ports(aref.get_ports_list())
c4
gf.components.array?
```
You can also create an array of references for periodic structures. Lets create a [Distributed Bragg Reflector](https://picwriter.readthedocs.io/en/latest/components/dbr.html)
```
import gdsfactory as gf
@gf.cell
def dbr_period(w1=0.5, w2=0.6, l1=0.2, l2=0.4, straight=gf.components.straight):
"""Return one DBR period."""
c = gf.Component()
r1 = c << straight(length=l1, width=w1)
r2 = c << straight(length=l2, width=w2)
r2.connect(port="o1", destination=r1.ports["o2"])
c.add_port("o1", port=r1.ports["o1"])
c.add_port("o2", port=r2.ports["o2"])
return c
l1 = 0.2
l2 = 0.4
n = 3
period = dbr_period(l1=l1, l2=l2)
period
dbr = gf.Component("DBR")
dbr.add_array(period, columns=n, rows=1, spacing=(l1 + l2, 100))
dbr
```
Finally we need to add ports to the new component
```
p0 = dbr.add_port("o1", port=period.ports["o1"])
p1 = dbr.add_port("o2", port=period.ports["o2"])
p1.midpoint = [(l1 + l2) * n, 0]
dbr
```
## Connect references
We have seen that once you create a reference you can manipulate the reference to move it to a location. Here we are going to connect that reference to a port. Remeber that we follow that a certain reference `source` connects to a `destination` port
```
bend = gf.components.bend_circular()
bend
c = gf.Component("sample_reference_connect")
mmi = c << gf.components.mmi1x2()
b = c << gf.components.bend_circular()
b.connect("o1", destination=mmi.ports["o2"])
c.add_port("o1", port=mmi.ports["o1"])
c.add_port("o2", port=b.ports["o2"])
c.add_port("o3", port=mmi.ports["o3"])
c
```
## Port naming
You have the freedom to name the ports as you want, and you can use `gf.port.auto_rename_ports(prefix='o')` to rename them later on.
Here is the default naming convention.
Ports are numbered clock-wise starting from the bottom left corner
Optical ports have `o` prefix and Electrical ports `e` prefix
The port naming comes in most cases from the `gdsfactory.cross_section`. For example
- `gdsfactory.cross_section.strip` has ports `o1` for input and `o2` for output
- `gdsfactory.cross_section.metal1` has ports `e1` for input and `e2` for output
```
import gdsfactory as gf
size = 4
c = gf.components.nxn(west=2, south=2, north=2, east=2, xsize=size, ysize=size)
c
c = gf.components.straight_heater_metal(length=30)
c
c.ports
```
You can get the optical ports by `layer`
```
c.get_ports_dict(layer=(1, 0))
```
or by `width`
```
c.get_ports_dict(width=0.5)
c0 = gf.components.straight_heater_metal()
c0.ports
c1 = c0.copy()
c1.auto_rename_ports_layer_orientation()
c1.ports
c2 = c0.copy()
c2.auto_rename_ports()
c2.ports
```
You can also rename them with a different port naming convention
- prefix: add `e` for electrical `o` for optical
- clockwise
- counter-clockwise
- orientation `E` East, `W` West, `N` North, `S` South
Here is the default one we use (clockwise starting from bottom left west facing port)
```
3 4
|___|_
2 -| |- 5
| |
1 -|______|- 6
| |
8 7
```
```
import gdsfactory as gf
c = gf.Component("demo_ports")
nxn = gf.components.nxn(west=2, north=2, east=2, south=2, xsize=4, ysize=4)
ref = c.add_ref(nxn)
c.add_ports(ref.ports)
c
ref.get_ports_list() # by default returns ports clockwise starting from bottom left west facing port
c.auto_rename_ports()
c
```
You can also get the ports counter-clockwise
```
4 3
|___|_
5 -| |- 2
| |
6 -|______|- 1
| |
7 8
```
```
c.auto_rename_ports_counter_clockwise()
c
c.get_ports_list(clockwise=False)
c.ports_layer
c.port_by_orientation_cw("W0")
c.port_by_orientation_ccw("W1")
```
Lets extend the East facing ports (orientation = 0 deg)
```
import gdsfactory as gf
nxn = gf.components.nxn(
west=2,
north=2,
east=2,
south=2,
cross_section=gf.cross_section.strip,
xsize=4,
ysize=4,
)
c = gf.components.extension.extend_ports(component=nxn, orientation=0)
c
c.ports
```
## pins
You can add pins (port markers) to each port. Each foundry PDK does this differently, so gdsfactory supports all of them.
- square with port inside the component
- square centered (half inside, half outside component)
- triangular
- path (SiEPIC)
by default Component.show() will add triangular pins, so you can see the direction of the port in Klayout.
```
gf.components.mmi1x2(decorator=gf.add_pins.add_pins)
gf.components.mmi1x2(decorator=gf.add_pins.add_pins_triangle)
```
## component_sequence
When you have repetitive connections you can describe the connectivity as an ASCII map
```
import gdsfactory as gf
bend180 = gf.components.bend_circular180()
wg_pin = gf.components.straight_pin(length=40)
wg = gf.components.straight()
# Define a map between symbols and (component, input port, output port)
symbol_to_component = {
"D": (bend180, "o1", "o2"),
"C": (bend180, "o2", "o1"),
"P": (wg_pin, "o1", "o2"),
"-": (wg, "o1", "o2"),
}
# Generate a sequence
# This is simply a chain of characters. Each of them represents a component
# with a given input and and a given output
sequence = "DC-P-P-P-P-CD"
component = gf.components.component_sequence(
sequence=sequence, symbol_to_component=symbol_to_component
)
component.name = "component_sequence"
component
```
As the sequence is defined as a string you can use the string operations to easily build complex sequences
| github_jupyter |
# Processing training data from raw files
# Notebook nยฐ1
```
basepath = '/data/conda/recnn/data'
### Importing usefull packages ###
%load_ext cython
import sys
import copy
import numpy as np
import multiprocessing as mp
from functools import partial
from rootpy.vector import LorentzVector
sys.path.append("..")
### Importing preprocessing functions ###
from recnn.preprocessing import _pt
from recnn.preprocessing import randomize
from recnn.preprocessing import multithreadmap
from recnn.preprocessing import sequentialize_by_pt
%%cython -f -+ -I/usr/local/include --link-args=-Wl,-rpath,/usr/local/lib -lm -L/usr/local/lib -lfastjettools -lfastjet -lfastjetplugins -lsiscone_spherical -lsiscone
import numpy as np
cimport numpy as np
np.import_array()
from libcpp.pair cimport pair
from libcpp.vector cimport vector
cdef extern from "/home/yohann/Desktop/stage/recnn/notebooks/fj.cc":
void fj(vector[double]& a,
vector[vector[int]]& trees,
vector[vector[double]]& contents,
vector[double]& masses,
vector[double]& pts,
double R, int jet_algorithm)
cpdef cluster(np.ndarray[np.double_t, ndim=2, mode="c"] a,
R=1.0, jet_algorithm=0):
cdef vector[double] v
cdef vector[vector[int]] trees
cdef vector[vector[double]] contents
cdef vector[double] masses
cdef vector[double] pts
for value in a.ravel():
v.push_back(value)
fj(v, trees, contents, masses, pts, R=R, jet_algorithm=jet_algorithm)
jets = []
for tree, content, mass, pt in zip(trees, contents, masses, pts):
tree = np.array(tree).reshape(-1, 2)
content = np.array(content).reshape(-1, 4)
jets.append((tree, content, mass, pt))
return jets
def cast(event, soft=0):
"""
Converts an envent into a list of p4, usable by fastjet
"""
a = np.zeros((len(event)+soft, 4))
for i, p in enumerate(event):
a[i, 3] = p[0]
a[i, 0] = p[1]
a[i, 1] = p[2]
a[i, 2] = p[3]
### Robustness check : sprinkling soft particles ###
for i in range(len(event), len(event)+soft):
v = LorentzVector()
v.set_pt_eta_phi_m(10e-5, np.random.rand() * 10 - 5, np.random.rand() * 2 * np.pi, 0.0)
a[i, 0] = v.px
a[i, 1] = v.py
a[i, 2] = v.pz
a[i, 3] = v.e
return(a)
def ff(e):
"""
create the Jet dictionary stucture from fastjet
"""
ye=e[-1]
e=e[0]
t=cast(e, soft=0)
tree, content, mass, pt = cluster(t, jet_algorithm=1)[0] # dump highest pt jet only
jet = {}
jet["root_id"] = 0
jet["tree"] = tree # tree structure, tree[i] constains [left son, right son] of subjet i
jet["content"] = content # list of every p4 of every subjet used to create the full jet
jet["mass"] = mass
jet["pt"] = pt
jet["energy"] = content[0, 3]
px = content[0, 0]
py = content[0, 1]
pz = content[0, 2]
p = (content[0, 0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
phi = np.arctan2(py, px)
jet["eta"] = eta
jet["phi"] = phi
jet["genpt"] = ye
return(jet)
### Loading and "jetting" data with ff ###
signallist = ['/BackgroundJEC.npy']
signal = []
for path_file in signallist:
events = np.array(np.load(basepath+path_file))
signal = signal + multithreadmap(ff, events)
signal[0]
```
# Notebook nยฐ2
## W vs QCD
```
### creating files to be preprocessed ###
def extractgenpt(e):
return(e["genpt"])
print(len(signal))
X = np.array(signal)
y = np.array(multithreadmap(extractgenpt,X))
```
# Notebook nยฐ3
### preprocessing function
```
%%cython -f -+ -I/usr/local/include --link-args=-Wl,-rpath,/usr/local/lib -lm -L/usr/local/lib -lfastjettools -lfastjet -lfastjetplugins -lsiscone_spherical -lsiscone
import numpy as np
cimport numpy as np
np.import_array()
from libcpp.pair cimport pair
from libcpp.vector cimport vector
cdef extern from "/home/yohann/Desktop/stage/recnn/notebooks/fj.cc":
void fj(vector[double]& a,
vector[vector[int]]& trees,
vector[vector[double]]& contents,
vector[double]& masses,
vector[double]& pts,
double R, int jet_algorithm)
cpdef cluster(np.ndarray[np.double_t, ndim=2, mode="c"] a,
R=0.3, jet_algorithm=0):
cdef vector[double] v
cdef vector[vector[int]] trees
cdef vector[vector[double]] contents
cdef vector[double] masses
cdef vector[double] pts
for value in a.ravel():
v.push_back(value)
fj(v, trees, contents, masses, pts, R=R, jet_algorithm=jet_algorithm)
jets = []
for tree, content, mass, pt in zip(trees, contents, masses, pts):
tree = np.array(tree).reshape(-1, 2)
content = np.array(content).reshape(-1, 4)
jets.append((tree, content, mass, pt))
return jets
def preprocess(jet, output="kt", colinear_splits=0, trimming=0.0):
"""
preprocesses the data to make it usable by the recnn
Preprocessing algorithm:
1. j = the highest pt anti-kt jet (R=1)
2. run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN
3. phi = sj1.phi(); for all c, do c.rotate_z(-phi)
4. bv = sj1.boost_vector(); bv.set_perp(0); for all c, do c.boost(-bv)
5. deltaz = sj1.pz - sj2.pz; deltay = sj1.py - sj2.py; alpha = -atan2(deltaz, deltay); for all c, do c.rotate_x(alpha)
6. if sj3.pz < 0: for all c, do c.set_pz(-c.pz)
7. finally recluster all transformed constituents c into a single jet
"""
jet = copy.deepcopy(jet)
constituents = jet["content"][jet["tree"][:, 0] == -1]
genpt=jet["genpt"]
### Robustness check : Colinear splits ###
for i in range(colinear_splits):
j = np.argmax([_pt(c) for c in constituents])
v = LorentzVector(constituents[j])
eps = np.random.rand()
p1 = LorentzVector()
p2 = LorentzVector()
p1.set_pt_eta_phi_m(v.pt() * eps, v.eta(), v.phi(), v.m() * eps ** 0.5)
p2.set_pt_eta_phi_m(v.pt() * (1. - eps), v.eta(), v.phi(), 0.0)
constituents[j][0] = p1.px
constituents[j][1] = p1.py
constituents[j][2] = p1.pz
constituents[j][3] = p1.e
constituents = np.vstack([constituents,
np.array([[p2.px, p2.py, p2.pz, p2.e]])])
### run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN ###
subjets = cluster(constituents, R=0.3, jet_algorithm=0)
### trimming ###
if trimming > 0.0:
subjets = [(tree, content, mass, pt) for tree, content, mass, pt in subjets if pt > trimming * jet["pt"]]
else:
subjets = [(tree, content, mass, pt) for tree, content, mass, pt in subjets]
### Rot phi ###
# phi = sj1.phi()
# for all c, do c.rotate_z(-phi)
v = subjets[0][1][0]
v = LorentzVector(v)
phi = v.phi()
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i])
v.rotate_z(-phi)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### boost ###
# bv = sj1.boost_vector()
# bv.set_perp(0)
# for all c, do c.boost(-bv)
v = subjets[0][1][0]
v = LorentzVector(v)
bv = v.boost_vector()
bv.set_perp(0)
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i])
v.boost(-bv)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### Rot alpha ###
# deltaz = sj1.pz - sj2.pz
# deltay = sj1.py - sj2.py
# alpha = -atan2(deltaz, deltay)
# for all c, do c.rotate_x(alpha)
if len(subjets) >= 2:
deltaz = subjets[0][1][0, 2] - subjets[1][1][0, 2]
deltay = subjets[0][1][0, 1] - subjets[1][1][0, 1]
alpha = -np.arctan2(deltaz, deltay)
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i])
v.rotate_x(alpha)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### flip if necessary ###
# if sj3.pz < 0: for all c, do c.set_pz(-c.pz)
if len(subjets) >= 3 and subjets[2][1][0, 2] < 0:
for _, content, _, _ in subjets:
for i in range(len(content)):
content[i, 2] *= -1.0
### finally recluster all transformed constituents c into a single jet ###
constituents = []
for tree, content, _, _ in subjets:
constituents.append(content[tree[:, 0] == -1])
constituents = np.vstack(constituents)
if output == "anti-kt":
subjets = cluster(constituents, R=100., jet_algorithm=1)
elif output == "kt":
subjets = cluster(constituents, R=100., jet_algorithm=0)
elif output == "cambridge":
subjets = cluster(constituents, R=100., jet_algorithm=2)
else:
raise
jet["tree"] = subjets[0][0]
jet["content"] = subjets[0][1]
v = LorentzVector(jet["content"][0])
jet["phi"] = v.phi()
jet["eta"] = v.eta()
jet["energy"] = v.E()
jet["mass"] = v.m()
jet["pt"] = v.pt()
jet["root_id"] = 0
jet["genpt"] = genpt
return(jet)
```
### Convert data
```
f = basepath+'/npyfilesregression/subjet_oriented_'
### eliminate single particles ###
i=0
while i < (len(y)):
if X[i]['tree'].shape == (1, 2):
X,y=np.delete(X,i),np.delete(y,i)
else :
i+=1
### Save all versions of the dataset ###
### anti-kt ###
#random permutation
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
#preprocess
X_ = multithreadmap(preprocess,X_,output='anti-kt')
#separate training and testing data
#saving
np.save(f+"anti-kt_train.npy",np.array([X_, y_]))
### kt ###
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
X_ = multithreadmap(preprocess,X_,output='kt')
np.save(f+"kt_train.npy", np.array([X_, y_]))
### cambridge ###
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
X_ = multithreadmap(preprocess,X_,output='cambridge')
np.save(f+"cambridge_train.npy", np.array([X_, y_]))
### random tree ###
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
X_=multithreadmap(randomize,multithreadmap(preprocess,X_,output="anti-kt"))
np.save(f+"random_train.npy", np.array([X_, y_]))
### seq by pt ###
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
X_=multithreadmap(sequentialize_by_pt,multithreadmap(preprocess,X_,output="anti-kt"),reverse=False)
np.save(f+"seqpt_train.npy", np.array([X_, y_]))
### seq by pt reversed ###
flush = np.random.permutation(len(X))
X_,y_ = np.copy(X[flush]),np.copy(y[flush])
X_=multithreadmap(sequentialize_by_pt,multithreadmap(preprocess,X_,output="anti-kt"),reverse=True)
np.save(f+"seqpt_reversed_train.npy", np.array([X_, y_]))
```
# Verification of the formating
```
### Load data to check ###
fd = f+"anti-kt_test.npy"
X, y = np.load(fd)
### import plt and set options ###
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
plt.rcParams["figure.figsize"] = (7,6)
### Check for signal ###
a1 = []
w1=[]
for i,j in enumerate(X):
constituents = j["content"][j["tree"][:, 0] == -1]
# if len(constituents)>1:
# constituents = np.delete(constituents,0,0)
if y[i]==1:
a1.append(np.array([[LorentzVector(c).eta(),
LorentzVector(c).phi()] for c in constituents]))
w1.append([LorentzVector(c).pt() for c in constituents])
w1 = [item for sublist in w1 for item in sublist]
w1=100*np.array(w1)/sum(w1)
a1 = np.vstack(a1)
plt.close()
t=plt.hist2d(a1[:, 0], a1[:, 1], range=[(-0.5,0.5), (-0.5,0.5)],
bins=200, cmap=plt.cm.jet,weights=w1,norm=LogNorm())
cbar = plt.colorbar()
plt.xlabel(r'$\eta$')
plt.ylabel(r'$\varphi$')
cbar.set_label(r'% of p$_t$')
#plt.savefig('tau_pfd_log_bis.png',dpi=600, transparent=True)
plt.show()
### For background ###
a = []
w=[]
for i,j in enumerate(X):
constituents = j["content"][j["tree"][:, 0] == -1]
# if len(constituents)>1:
# constituents = np.delete(constituents,0,0)
if y[i]==0:
a.append(np.array([[LorentzVector(c).eta(),
LorentzVector(c).phi()] for c in constituents]))
w.append([LorentzVector(c).pt() for c in constituents])
w = [item for sublist in w for item in sublist]
w=100*np.array(w)/sum(w)
a = np.vstack(a)
plt.close()
t=plt.hist2d(a[:, 0], a[:, 1], range=[(-0.5,0.5), (-0.5,0.5)],
bins=200, cmap=plt.cm.jet, weights=w,norm=LogNorm())
cbar = plt.colorbar()
plt.xlabel(r'$\eta$')
plt.ylabel(r'$\varphi$')
cbar.set_label(r'% of p$_t$')
#plt.savefig('non_tau_pfd_log_bis.png',dpi=600, transparent=True)
plt.show()
### few taus plotting ###
a = []
w=[]
njets = 10
i0=2000
i1=i0+njets
for i,j in enumerate(X[i0:i1]):
constituents = j["content"][j["tree"][:, 0] == -1]
if y[i+i0]==1:
a.append(np.array([[LorentzVector(c).eta(),
LorentzVector(c).phi()] for c in constituents]))
w.append([LorentzVector(c).pt() for c in constituents])
for i in range(len(a)):
plt.scatter(a[i][:,0],a[i][:,1],s=w[i]*100)
plt.show()
```
| github_jupyter |
# Plagiarism Text Data
In this project, you will be tasked with building a plagiarism detector that examines a text file and performs binary classification; labeling that file as either plagiarized or not, depending on how similar the text file is when compared to a provided source text.
The first step in working with any dataset is loading the data in and noting what information is included in the dataset. This is an important step in eventually working with this data, and knowing what kinds of features you have to work with as you transform and group the data!
So, this notebook is all about exploring the data and noting patterns about the features you are given and the distribution of data.
> There are not any exercises or questions in this notebook, it is only meant for exploration. This notebook will note be required in your final project submission.
---
## Read in the Data
The cell below will download the necessary data and extract the files into the folder `data/`.
This data is a slightly modified version of a dataset created by Paul Clough (Information Studies) and Mark Stevenson (Computer Science), at the University of Sheffield. You can read all about the data collection and corpus, at [their university webpage](https://ir.shef.ac.uk/cloughie/resources/plagiarism_corpus.html).
> **Citation for data**: Clough, P. and Stevenson, M. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [Download]
```
!wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip
!unzip data
# import libraries
import pandas as pd
import numpy as np
import os
```
This plagiarism dataset is made of multiple text files; each of these files has characteristics that are is summarized in a `.csv` file named `file_information.csv`, which we can read in using `pandas`.
```
csv_file = 'data/file_information.csv'
plagiarism_df = pd.read_csv(csv_file)
# print out the first few rows of data info
plagiarism_df.head(10)
```
## Types of Plagiarism
Each text file is associated with one **Task** (task A-E) and one **Category** of plagiarism, which you can see in the above DataFrame.
### Five task types, A-E
Each text file contains an answer to one short question; these questions are labeled as tasks A-E.
* Each task, A-E, is about a topic that might be included in the Computer Science curriculum that was created by the authors of this dataset.
* For example, Task A asks the question: "What is inheritance in object oriented programming?"
### Four categories of plagiarism
Each text file has an associated plagiarism label/category:
1. `cut`: An answer is plagiarized; it is copy-pasted directly from the relevant Wikipedia source text.
2. `light`: An answer is plagiarized; it is based on the Wikipedia source text and includes some copying and paraphrasing.
3. `heavy`: An answer is plagiarized; it is based on the Wikipedia source text but expressed using different words and structure. Since this doesn't copy directly from a source text, this will likely be the most challenging kind of plagiarism to detect.
4. `non`: An answer is not plagiarized; the Wikipedia source text is not used to create this answer.
5. `orig`: This is a specific category for the original, Wikipedia source text. We will use these files only for comparison purposes.
> So, out of the submitted files, the only category that does not contain any plagiarism is `non`.
In the next cell, print out some statistics about the data.
```
# print out some stats about the data
print('Number of files: ', plagiarism_df.shape[0]) # .shape[0] gives the rows
# .unique() gives unique items in a specified column
print('Number of unique tasks/question types (A-E): ', (len(plagiarism_df['Task'].unique())))
print('Unique plagiarism categories: ', (plagiarism_df['Category'].unique()))
```
You should see the number of text files in the dataset as well as some characteristics about the `Task` and `Category` columns. **Note that the file count of 100 *includes* the 5 _original_ wikipedia files for tasks A-E.** If you take a look at the files in the `data` directory, you'll notice that the original, source texts start with the filename `orig_` as opposed to `g` for "group."
> So, in total there are 100 files, 95 of which are answers (submitted by people) and 5 of which are the original, Wikipedia source texts.
Your end goal will be to use this information to classify any given answer text into one of two categories, plagiarized or not-plagiarized.
### Distribution of Data
Next, let's look at the distribution of data. In this course, we've talked about traits like class imbalance that can inform how you develop an algorithm. So, here, we'll ask: **How evenly is our data distributed among different tasks and plagiarism levels?**
Below, you should notice two things:
* Our dataset is quite small, especially with respect to examples of varying plagiarism levels.
* The data is distributed fairly evenly across task and plagiarism types.
```
# Show counts by different tasks and amounts of plagiarism
# group and count by task
counts_per_task=plagiarism_df.groupby(['Task']).size().reset_index(name="Counts")
print("\nTask:")
display(counts_per_task)
# group by plagiarism level
counts_per_category=plagiarism_df.groupby(['Category']).size().reset_index(name="Counts")
print("\nPlagiarism Levels:")
display(counts_per_category)
# group by task AND plagiarism level
counts_task_and_plagiarism=plagiarism_df.groupby(['Task', 'Category']).size().reset_index(name="Counts")
print("\nTask & Plagiarism Level Combos :")
display(counts_task_and_plagiarism)
```
It may also be helpful to look at this last DataFrame, graphically.
Below, you can see that the counts follow a pattern broken down by task. Each task has one source text (original) and the highest number on `non` plagiarized cases.
```
import matplotlib.pyplot as plt
% matplotlib inline
# counts
group = ['Task', 'Category']
counts = plagiarism_df.groupby(group).size().reset_index(name="Counts")
plt.figure(figsize=(8,5))
plt.bar(range(len(counts)), counts['Counts'], color = 'blue')
```
## Up Next
This notebook is just about data loading and exploration, and you do not need to include it in your final project submission.
In the next few notebooks, you'll use this data to train a complete plagiarism classifier. You'll be tasked with extracting meaningful features from the text data, reading in answers to different tasks and comparing them to the original Wikipedia source text. You'll engineer similarity features that will help identify cases of plagiarism. Then, you'll use these features to train and deploy a classification model in a SageMaker notebook instance.
| github_jupyter |
# ๅพๅๅ็ฑป
ๅจๆญค้กน็ฎไธญ๏ผไฝ ๅฐๅฏนย [CIFAR-10 ๆฐๆฎ้](https://www.cs.toronto.edu/~kriz/cifar.html) ไธญ็ๅพ็่ฟ่กๅ็ฑปใ่ฏฅๆฐๆฎ้ๅ
ๅซ้ฃๆบใ็ซ็ๅๅ
ถไป็ฉไฝใไฝ ้่ฆ้ขๅค็่ฟไบๅพ็๏ผ็ถๅ็จๆๆๆ ทๆฌ่ฎญ็ปไธไธชๅท็งฏ็ฅ็ป็ฝ็ปใๅพ็้่ฆๆ ๅๅ๏ผnormalized๏ผ๏ผๆ ็ญพ้่ฆ้็จ one-hot ็ผ็ ใไฝ ้่ฆๅบ็จๆๅญฆ็็ฅ่ฏๆๅปบๅท็งฏ็ใๆๅคงๆฑ ๅ๏ผmax pooling๏ผใไธขๅผ๏ผdropout๏ผๅๅฎๅ
จ่ฟๆฅ๏ผfully connected๏ผ็ๅฑใๆๅ๏ผไฝ ้่ฆๅจๆ ทๆฌๅพ็ไธ็ๅฐ็ฅ็ป็ฝ็ป็้ขๆต็ปๆใ
## ่ทๅๆฐๆฎ
่ฏท่ฟ่กไปฅไธๅๅ
๏ผไปฅไธ่ฝฝย [CIFAR-10 ๆฐๆฎ้๏ผPython็๏ผ](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)ใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
```
## ๆข็ดขๆฐๆฎ
่ฏฅๆฐๆฎ้ๅๆไบๅ ้จๅ๏ผๆนๆฌก๏ผbatches๏ผ๏ผไปฅๅ
ไฝ ็ๆบๅจๅจ่ฎก็ฎๆถๅ
ๅญไธ่ถณใCIFAR-10 ๆฐๆฎ้ๅ
ๅซ 5 ไธช้จๅ๏ผๅ็งฐๅๅซไธบย `data_batch_1`ใ`data_batch_2`๏ผไปฅๆญค็ฑปๆจใๆฏไธช้จๅ้ฝๅ
ๅซไปฅไธๆไธช็ฑปๅซ็ๆ ็ญพๅๅพ็๏ผ
* ้ฃๆบ
* ๆฑฝ่ฝฆ
* ้ธ็ฑป
* ็ซ
* ้นฟ
* ็
* ้่
* ้ฉฌ
* ่นๅช
* ๅก่ฝฆ
ไบ่งฃๆฐๆฎ้ไนๆฏๅฏนๆฐๆฎ่ฟ่ก้ขๆต็ๅฟ
็ปๆญฅ้ชคใไฝ ๅฏไปฅ้่ฟๆดๆนย `batch_id` ๅ `sample_id` ๆข็ดขไธ้ข็ไปฃ็ ๅๅ
ใ`batch_id`ย ๆฏๆฐๆฎ้ไธไธช้จๅ็ ID๏ผ1 ๅฐ 5๏ผใ`sample_id` ๆฏ่ฏฅ้จๅไธญๅพ็ๅๆ ็ญพๅฏน๏ผlabel pair๏ผ็ IDใ
้ฎ้ฎไฝ ่ชๅทฑ๏ผโๅฏ่ฝ็ๆ ็ญพๆๅชไบ๏ผโใโๅพ็ๆฐๆฎ็ๅผ่ๅดๆฏๅคๅฐ๏ผโใโๆ ็ญพๆฏๆ้กบๅบๆๅ๏ผ่ฟๆฏ้ๆบๆๅ็๏ผโใๆ่็ฑปไผผ็้ฎ้ข๏ผๆๅฉไบไฝ ้ขๅค็ๆฐๆฎ๏ผๅนถไฝฟ้ขๆต็ปๆๆดๅ็กฎใ
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 50
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
```
## ๅฎ็ฐ้ขๅค็ๅฝๆฐ
### ๆ ๅๅ
ๅจไธ้ข็ๅๅ
ไธญ๏ผๅฎ็ฐ `normalize`ย ๅฝๆฐ๏ผไผ ๅ
ฅๅพ็ๆฐๆฎ `x`๏ผๅนถ่ฟๅๆ ๅๅ Numpy ๆฐ็ปใๅผๅบ่ฏฅๅจ 0 ๅฐ 1 ็่ๅดๅ
๏ผๅซ 0 ๅ 1๏ผใ่ฟๅๅฏน่ฑกๅบ่ฏฅๅ `x` ็ๅฝข็ถไธๆ ทใ
```
def normalize(x):
"""
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
"""
# TODO: Implement Function
return (x / 255)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_normalize(normalize)
```
### One-hot ็ผ็
ๅไนๅ็ไปฃ็ ๅๅ
ไธๆ ท๏ผไฝ ๅฐไธบ้ขๅค็ๅฎ็ฐไธไธชๅฝๆฐใ่ฟๆฌก๏ผไฝ ๅฐๅฎ็ฐย `one_hot_encode`ย ๅฝๆฐใ่พๅ
ฅ๏ผไนๅฐฑๆฏ `x`๏ผๆฏไธไธชๆ ็ญพๅ่กจใๅฎ็ฐ่ฏฅๅฝๆฐ๏ผไปฅ่ฟๅไธบ one_hot ็ผ็ ็ Numpy ๆฐ็ป็ๆ ็ญพๅ่กจใๆ ็ญพ็ๅฏ่ฝๅผไธบ 0 ๅฐ 9ใๆฏๆฌก่ฐ็จย `one_hot_encode` ๆถ๏ผๅฏนไบๆฏไธชๅผ๏ผone_hot ็ผ็ ๅฝๆฐๅบ่ฏฅ่ฟๅ็ธๅ็็ผ็ ใ็กฎไฟๅฐ็ผ็ ๆ ๅฐไฟๅญๅฐ่ฏฅๅฝๆฐๅค้ขใ
ๆ็คบ๏ผไธ่ฆ้ๅคๅๆ่ฝฎๅญใ
```
import numpy as np
from sklearn import preprocessing
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
# TODO: Implement Function
return np.eye(10)[x]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_one_hot_encode(one_hot_encode)
```
### ้ๆบๅๆฐๆฎ
ไนๅๆข็ดขๆฐๆฎๆถ๏ผไฝ ๅทฒ็ปไบ่งฃๅฐ๏ผๆ ทๆฌ็้กบๅบๆฏ้ๆบ็ใๅ้ๆบๅไธๆฌกไนไธไผๆไปไนๅ
ณ็ณป๏ผไฝๆฏๅฏนไบ่ฟไธชๆฐๆฎ้ๆฒกๆๅฟ
่ฆใ
## ้ขๅค็ๆๆๆฐๆฎๅนถไฟๅญ
่ฟ่กไธๆน็ไปฃ็ ๅๅ
๏ผๅฐ้ขๅค็ๆๆ CIFAR-10 ๆฐๆฎ๏ผๅนถไฟๅญๅฐๆไปถไธญใไธ้ข็ไปฃ็ ่ฟไฝฟ็จไบ 10% ็่ฎญ็ปๆฐๆฎ๏ผ็จๆฅ้ช่ฏใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
```
# ๆฃๆฅ็น
่ฟๆฏไฝ ็็ฌฌไธไธชๆฃๆฅ็นใๅฆๆไฝ ไปไนๆถๅๅณๅฎๅๅๅฐ่ฏฅ่ฎฐไบๆฌ๏ผๆ้่ฆ้ๆฐๅฏๅจ่ฏฅ่ฎฐไบๆฌ๏ผไฝ ๅฏไปฅไป่ฟ้ๅผๅงใ้ขๅค็็ๆฐๆฎๅทฒไฟๅญๅฐๆฌๅฐใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
```
## ๆๅปบ็ฝ็ป
ๅฏนไบ่ฏฅ็ฅ็ป็ฝ็ป๏ผไฝ ้่ฆๅฐๆฏๅฑ้ฝๆๅปบไธบไธไธชๅฝๆฐใไฝ ็ๅฐ็ๅคง้จๅไปฃ็ ้ฝไฝไบๅฝๆฐๅค้ขใ่ฆๆดๅ
จ้ขๅฐๆต่ฏไฝ ็ไปฃ็ ๏ผๆไปฌ้่ฆไฝ ๅฐๆฏๅฑๆพๅ
ฅไธไธชๅฝๆฐไธญใ่ฟๆ ทไฝฟๆไปฌ่ฝๅคๆไพๆดๅฅฝ็ๅ้ฆ๏ผๅนถไฝฟ็จๆไปฌ็็ปไธๆต่ฏๆฃๆต็ฎๅ็้่ฏฏ๏ผ็ถๅๅๆไบค้กน็ฎใ
>**ๆณจๆ**๏ผๅฆๆไฝ ่งๅพๆฏๅจๅพ้พๆฝๅบ่ถณๅค็ๆถ้ดๅญฆไน ่ฟ้จ่ฏพ็จ๏ผๆไปฌไธบๆญค้กน็ฎๆไพไบไธไธชๅฐๆทๅพใๅฏนไบๆฅไธๆฅ็ๅ ไธช้ฎ้ข๏ผไฝ ๅฏไปฅไฝฟ็จย [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) ๆ [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers)ย ็จๅบๅ
ไธญ็็ฑปๆฅๆๅปบๆฏไธชๅฑ็บง๏ผไฝๆฏโๅท็งฏๅๆๅคงๆฑ ๅๅฑ็บงโ้จๅ็ๅฑ็บง้คๅคใTF Layers ๅ Keras ๅ TFLearn ๅฑ็บง็ฑปไผผ๏ผๅ ๆญคๅพๅฎนๆๅญฆไผใ
>ไฝๆฏ๏ผๅฆๆไฝ ๆณๅ
ๅๅฉ็จ่ฟ้จ่ฏพ็จ๏ผ่ฏทๅฐ่ฏ่ชๅทฑ่งฃๅณๆๆ้ฎ้ข๏ผไธไฝฟ็จ TF Layers ็จๅบๅ
ไธญ็ไปปไฝ็ฑปใไฝ ไพ็ถๅฏไปฅไฝฟ็จๅ
ถไป็จๅบๅ
ไธญ็็ฑป๏ผ่ฟไบ็ฑปๅไฝ ๅจ TF Layers ไธญ็็ฑปๅ็งฐๆฏไธๆ ท็๏ผไพๅฆ๏ผไฝ ๅฏไปฅไฝฟ็จ TF Neural Network ็ๆฌ็ `conv2d`ย ็ฑป [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d)๏ผ่ไธๆฏ TF Layers ็ๆฌ็ `conv2d`ย ็ฑป [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d)ใ
ๆไปฌๅผๅงๅง๏ผ
### ่พๅ
ฅ
็ฅ็ป็ฝ็ป้่ฆ่ฏปๅๅพ็ๆฐๆฎใone-hot ็ผ็ ๆ ็ญพๅไธขๅผไฟ็ๆฆ็๏ผdropout keep probability๏ผใ่ฏทๅฎ็ฐไปฅไธๅฝๆฐ๏ผ
* ๅฎ็ฐ `neural_net_image_input`
* ่ฟๅ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)
* ไฝฟ็จ `image_shape` ่ฎพ็ฝฎๅฝข็ถ๏ผ้จๅๅคงๅฐ่ฎพไธบ `None`
* ไฝฟ็จ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) ไธญ็ TensorFlowย `name`ย ๅๆฐๅฏน TensorFlow ๅ ไฝ็ฌฆ "x" ๅฝๅ
* ๅฎ็ฐ `neural_net_label_input`
* ่ฟๅ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)
* ไฝฟ็จ `n_classes` ่ฎพ็ฝฎๅฝข็ถ๏ผ้จๅๅคงๅฐ่ฎพไธบ `None`
* ไฝฟ็จ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) ไธญ็ TensorFlowย `name`ย ๅๆฐๅฏน TensorFlow ๅ ไฝ็ฌฆ "y" ๅฝๅ
* ๅฎ็ฐ `neural_net_keep_prob_input`
* ่ฟๅ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)๏ผ็จไบไธขๅผไฟ็ๆฆ็
* ไฝฟ็จ [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) ไธญ็ TensorFlowย `name`ย ๅๆฐๅฏน TensorFlow ๅ ไฝ็ฌฆ "keep_prob" ๅฝๅ
่ฟไบๅ็งฐๅฐๅจ้กน็ฎ็ปๆๆถ๏ผ็จไบๅ ่ฝฝไฟๅญ็ๆจกๅใ
ๆณจๆ๏ผTensorFlow ไธญ็ `None`ย ่กจ็คบๅฝข็ถๅฏไปฅๆฏๅจๆๅคงๅฐใ
```
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32, shape = (None, *image_shape), name = "x")
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
# TODO: Implement Function
return tf.placeholder(tf.int8, shape = (None, n_classes), name = "y")
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32, shape = None, name = "keep_prob")
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
```
### ๅท็งฏๅๆๅคงๆฑ ๅๅฑ
ๅท็งฏๅฑ็บง้ๅๅค็ๅพ็ใๅฏนไบๆญคไปฃ็ ๅๅ
๏ผไฝ ๅบ่ฏฅๅฎ็ฐๅฝๆฐย `conv2d_maxpool`ย ไปฅไพฟๅบ็จๅท็งฏ็ถๅ่ฟ่กๆๅคงๆฑ ๅ๏ผ
* ไฝฟ็จ `conv_ksize`ใ`conv_num_outputs`ย ๅ `x_tensor` ็ๅฝข็ถๅๅปบๆ้๏ผweight๏ผๅๅ็ฝฎ๏ผbias๏ผใ
* ไฝฟ็จๆ้ๅ `conv_strides` ๅฏนย `x_tensor` ๅบ็จๅท็งฏใ
* ๅปบ่ฎฎไฝฟ็จๆไปฌๅปบ่ฎฎ็้ด่ท๏ผpadding๏ผ๏ผๅฝ็ถไนๅฏไปฅไฝฟ็จไปปไฝๅ
ถไป้ด่ทใ
* ๆทปๅ ๅ็ฝฎ
* ๅๅท็งฏไธญๆทปๅ ้็บฟๆงๆฟๆดป๏ผnonlinear activation๏ผ
* ไฝฟ็จ `pool_ksize`ย ๅ `pool_strides` ๅบ็จๆๅคงๆฑ ๅ
* ๅปบ่ฎฎไฝฟ็จๆไปฌๅปบ่ฎฎ็้ด่ท๏ผpadding๏ผ๏ผๅฝ็ถไนๅฏไปฅไฝฟ็จไปปไฝๅ
ถไป้ด่ทใ
**ๆณจๆ**๏ผๅฏนไบ**ๆญคๅฑ**๏ผ**่ฏทๅฟไฝฟ็จ**ย [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) ๆ [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers)๏ผไฝๆฏไป็ถๅฏไปฅไฝฟ็จ TensorFlow ็ [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn)ย ๅ
ใๅฏนไบๆๆ**ๅ
ถไปๅฑ**๏ผไฝ ไพ็ถๅฏไปฅไฝฟ็จๅฟซๆทๆนๆณใ
```
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
# TODO: Implement Function
input_chanel = int(x_tensor.shape[3])
output_chanel = conv_num_outputs
weight_shape = (*conv_ksize,input_chanel,output_chanel) # *
weight = tf.Variable(tf.random_normal(weight_shape, stddev = 0.1)) #ๆ้
bias = tf.Variable(tf.zeros(output_chanel)) #่ฎพ็ฝฎๅ็ฝฎ้กน
l_active = tf.nn.conv2d(x_tensor, weight, (1, *conv_strides, 1), 'SAME')
l_active = tf.nn.bias_add(l_active,bias)
#active_layers = tf.nn.relu(tf.add(tf.matmul(features,label),bias)) #ReLu
mx_layer = tf.nn.relu(l_active)
return tf.nn.max_pool(mx_layer, (1, *pool_ksize, 1), (1, *pool_strides, 1), 'VALID')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_con_pool(conv2d_maxpool)
```
### ๆๅนณๅๅฑ
ๅฎ็ฐ `flatten`ย ๅฝๆฐ๏ผๅฐ `x_tensor`ย ็็ปดๅบฆไปๅ็ปดๅผ ้๏ผ4-D tensor๏ผๅๆไบ็ปดๅผ ้ใ่พๅบๅบ่ฏฅๆฏๅฝข็ถ๏ผ*้จๅๅคงๅฐ๏ผBatch Size๏ผ*๏ผ*ๆๅนณๅๅพ็ๅคงๅฐ๏ผFlattened Image Size๏ผ*๏ผใๅฟซๆทๆนๆณ๏ผๅฏนไบๆญคๅฑ๏ผไฝ ๅฏไปฅไฝฟ็จ [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) ๆ [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) ๅ
ไธญ็็ฑปใๅฆๆไฝ ๆณ่ฆๆดๅคงๆๆ๏ผๅฏไปฅไป
ไฝฟ็จๅ
ถไป TensorFlow ็จๅบๅ
ใ
```
from functools import reduce
from operator import mul
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
# TODO: Implement Function
_, *image_size = x_tensor.get_shape().as_list()
#print(*image_size)
return tf.reshape(x_tensor, (-1, reduce(mul, image_size)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_flatten(flatten)
```
### ๅฎๅ
จ่ฟๆฅ็ๅฑ
ๅฎ็ฐ `fully_conn`ย ๅฝๆฐ๏ผไปฅๅ `x_tensor`ย ๅบ็จๅฎๅ
จ่ฟๆฅ็ๅฑ็บง๏ผๅฝข็ถไธบ๏ผ*้จๅๅคงๅฐ๏ผBatch Size๏ผ*๏ผ*num_outputs*๏ผใๅฟซๆทๆนๆณ๏ผๅฏนไบๆญคๅฑ๏ผไฝ ๅฏไปฅไฝฟ็จ [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) ๆ [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) ๅ
ไธญ็็ฑปใๅฆๆไฝ ๆณ่ฆๆดๅคงๆๆ๏ผๅฏไปฅไป
ไฝฟ็จๅ
ถไป TensorFlow ็จๅบๅ
ใ
```
def fully_conn(x_tensor, num_outputs):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
num_input = x_tensor.get_shape().as_list()[1]
weight_shape = (num_input, num_outputs)
#print(weight_shape)
weight = tf.Variable(tf.truncated_normal(weight_shape, stddev = 0.1))
bias = tf.Variable(tf.zeros(num_outputs))
activation = tf.nn.bias_add(tf.matmul(x_tensor, weight), bias)
return tf.nn.relu(activation)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_fully_conn(fully_conn)
```
### ่พๅบๅฑ
ๅฎ็ฐ `output`ย ๅฝๆฐ๏ผๅ x_tensorย ๅบ็จๅฎๅ
จ่ฟๆฅ็ๅฑ็บง๏ผๅฝข็ถไธบ๏ผ*้จๅๅคงๅฐ๏ผBatch Size๏ผ*๏ผ*num_outputs*๏ผใๅฟซๆทๆนๆณ๏ผๅฏนไบๆญคๅฑ๏ผไฝ ๅฏไปฅไฝฟ็จ [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) ๆ [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) ๅ
ไธญ็็ฑปใๅฆๆไฝ ๆณ่ฆๆดๅคงๆๆ๏ผๅฏไปฅไป
ไฝฟ็จๅ
ถไป TensorFlow ็จๅบๅ
ใ
**ๆณจๆ**๏ผ่ฏฅๅฑ็บงไธๅบๅบ็จ Activationใsoftmax ๆไบคๅ็ต๏ผcross entropy๏ผใ
```
def output(x_tensor, num_outputs):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
num_input = x_tensor.get_shape().as_list()[1] #not 0
weight_shape = (num_input, num_outputs)
weight = tf.Variable(tf.truncated_normal(weight_shape, stddev = 0.1))
bias = tf.Variable(tf.zeros(num_outputs))
return tf.nn.bias_add(tf.matmul(x_tensor,weight),bias)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_output(output)
```
### ๅๅปบๅท็งฏๆจกๅ
ๅฎ็ฐๅฝๆฐ `conv_net`๏ผย ๅๅปบๅท็งฏ็ฅ็ป็ฝ็ปๆจกๅใ่ฏฅๅฝๆฐไผ ๅ
ฅไธๆนๅพ็ `x`๏ผๅนถ่พๅบๅฏนๆฐ๏ผlogits๏ผใไฝฟ็จไฝ ๅจไธๆนๅๅปบ็ๅฑๅๅปบๆญคๆจกๅ๏ผ
* ๅบ็จ 1ใ2 ๆ 3 ไธชๅท็งฏๅๆๅคงๆฑ ๅๅฑ๏ผConvolution and Max Pool layers๏ผ
* ๅบ็จไธไธชๆๅนณๅฑ๏ผFlatten Layer๏ผ
* ๅบ็จ 1ใ2 ๆ 3 ไธชๅฎๅ
จ่ฟๆฅๅฑ๏ผFully Connected Layers๏ผ
* ๅบ็จไธไธช่พๅบๅฑ๏ผOutput Layer๏ผ
* ่ฟๅ่พๅบ
* ไฝฟ็จย `keep_prob` ๅๆจกๅไธญ็ไธไธชๆๅคไธชๅฑๅบ็จ [TensorFlow ็ Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout)
```
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x = conv2d_maxpool(x, 64, (3, 3), (1, 1), (2, 2), (2, 2))
x = tf.nn.dropout(x, keep_prob)
x = conv2d_maxpool(x, 128, (3, 3), (1, 1), (2, 2), (2, 2))
x = tf.nn.dropout(x, keep_prob)
# x has shape (batch, 8, 8, 128)
x = conv2d_maxpool(x, 256, (3, 3), (1, 1), (2, 2), (2, 2))
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 512)
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
# TODO: return output
return output(x, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
```
## ่ฎญ็ป็ฅ็ป็ฝ็ป
### ๅๆฌกไผๅ
ๅฎ็ฐๅฝๆฐ `train_neural_network`ย ไปฅ่ฟ่กๅๆฌกไผๅ๏ผsingle optimization๏ผใ่ฏฅไผๅๅบ่ฏฅไฝฟ็จ `optimizer`ย ไผๅ `session`๏ผๅ
ถไธญ `feed_dict`ย ๅ
ทๆไปฅไธๅๆฐ๏ผ
* `x` ่กจ็คบๅพ็่พๅ
ฅ
* `y` ่กจ็คบๆ ็ญพ
* `keep_prob` ่กจ็คบไธขๅผ็ไฟ็็
ๆฏไธช้จๅ้ฝไผ่ฐ็จ่ฏฅๅฝๆฐ๏ผๆไปฅ `tf.global_variables_initializer()`ย ๅทฒ็ป่ขซ่ฐ็จใ
ๆณจๆ๏ผไธ้่ฆ่ฟๅไปปไฝๅ
ๅฎนใ่ฏฅๅฝๆฐๅชๆฏ็จๆฅไผๅ็ฅ็ป็ฝ็ปใ
```
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
# TODO: Implement Function
session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)
```
### ๆพ็คบๆฐๆฎ
ๅฎ็ฐๅฝๆฐย `print_stats`ย ไปฅ่พๅบๆๅคฑๅ้ช่ฏๅ็กฎ็ใไฝฟ็จๅ
จๅฑๅ้ `valid_features`ย ๅ `valid_labels`ย ่ฎก็ฎ้ช่ฏๅ็กฎ็ใไฝฟ็จไฟ็็ `1.0`ย ่ฎก็ฎๆๅคฑๅ้ช่ฏๅ็กฎ็๏ผloss and validation accuracy๏ผใ
```
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
# TODO: Implement Function
global valid_features, valid_labels
validation_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.0})
loss = session.run( cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
prt = 'Loss: {:.4f} Accuracy: {:.4f}'
print(prt.format(loss, validation_accuracy, prec=3))
```
### ่ถ
ๅๆฐ
่ฐ่ฏไปฅไธ่ถ
ๅๆฐ๏ผ
* ่ฎพ็ฝฎ `epochs` ่กจ็คบ็ฅ็ป็ฝ็ปๅๆญขๅญฆไน ๆๅผๅง่ฟๆๅ็่ฟญไปฃๆฌกๆฐ
* ่ฎพ็ฝฎ `batch_size`๏ผ่กจ็คบๆบๅจๅ
ๅญๅ
่ฎธ็้จๅๆๅคงไฝ็งฏใๅคง้จๅไบบ่ฎพไธบไปฅไธๅธธ่งๅ
ๅญๅคงๅฐ๏ผ
* 64
* 128
* 256
* ...
* ่ฎพ็ฝฎ `keep_probability` ่กจ็คบไฝฟ็จไธขๅผๆถไฟ็่็น็ๆฆ็
```
# TODO: Tune Parameters
epochs = 200
batch_size = 128
keep_probability = 0.5
```
### ๅจๅไธช CIFAR-10 ้จๅไธ่ฎญ็ป
ๆไปฌๅ
็จๅไธช้จๅ๏ผ่ไธๆฏ็จๆๆ็ CIFAR-10 ๆนๆฌก่ฎญ็ป็ฅ็ป็ฝ็ปใ่ฟๆ ทๅฏไปฅ่็ๆถ้ด๏ผๅนถๅฏนๆจกๅ่ฟ่ก่ฟญไปฃ๏ผไปฅๆ้ซๅ็กฎ็ใๆ็ป้ช่ฏๅ็กฎ็่พพๅฐ 50% ๆไปฅไธไนๅ๏ผๅจไธไธ้จๅๅฏนๆๆๆฐๆฎ่ฟ่กๆจกๅใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
```
### ๅฎๅ
จ่ฎญ็ปๆจกๅ
็ฐๅจ๏ผๅไธช CIFAR-10 ้จๅ็ๅ็กฎ็ๅทฒ็ปไธ้ไบ๏ผ่ฏ่ฏๆๆไบไธช้จๅๅงใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
```
# ๆฃๆฅ็น
ๆจกๅๅทฒไฟๅญๅฐๆฌๅฐใ
## ๆต่ฏๆจกๅ
ๅฉ็จๆต่ฏๆฐๆฎ้ๆต่ฏไฝ ็ๆจกๅใ่ฟๅฐๆฏๆ็ป็ๅ็กฎ็ใไฝ ็ๅ็กฎ็ๅบ่ฏฅ้ซไบ 50%ใๅฆๆๆฒก่พพๅฐ๏ผ่ฏท็ปง็ปญ่ฐๆดๆจกๅ็ปๆๅๅๆฐใ
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
"""
Test the saved model against the test dataset
"""
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
```
## ไธบไฝๅ็กฎ็ๅชๆ50-80%๏ผ
ไฝ ๅฏ่ฝๆณ้ฎ๏ผไธบไฝๅ็กฎ็ไธ่ฝๆด้ซไบ๏ผ้ฆๅ
๏ผๅฏนไบ็ฎๅ็ CNN ็ฝ็ปๆฅ่ฏด๏ผ50% ๅทฒ็ปไธไฝไบใ็บฏ็ฒน็ๆต็ๅ็กฎ็ไธบ10%ใไฝๆฏ๏ผไฝ ๅฏ่ฝๆณจๆๅฐๆไบบ็ๅ็กฎ็[่ฟ่ฟ่ถ
่ฟ 80%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130)ใ่ฟๆฏๅ ไธบๆไปฌ่ฟๆฒกๆไป็ปๆๆ็็ฅ็ป็ฝ็ป็ฅ่ฏใๆไปฌ่ฟ้่ฆๆๆกไธไบๅ
ถไปๆๅทงใ
## ๆไบค้กน็ฎ
ๆไบค้กน็ฎๆถ๏ผ็กฎไฟๅ
่ฟ่กๆๆๅๅ
๏ผ็ถๅๅไฟๅญ่ฎฐไบๆฌใๅฐ notebook ๆไปถๅฆๅญไธบโdlnd_image_classification.ipynbโ๏ผๅๅจ็ฎๅฝ "File" -> "Download as" ๅฆๅญไธบ HTML ๆ ผๅผใ่ฏทๅจๆไบค็้กน็ฎไธญๅ
ๅซ โhelper.pyโ ๅ โproblem_unittests.pyโ ๆไปถใ
| github_jupyter |
```
# Deprecated
# packages: random
import random
# packages: data structure
import numpy as np
import pandas as pd
import astropy.io as io
# packages: image generation and plot generation
from matplotlib import pyplot as plt
# pandas
# https://pandas.pydata.org/pandas-docs/stable/tutorials.html
# https://pandas.pydata.org/pandas-docs/stable/10min.html
# ascii:io
# http://docs.astropy.org/en/stable/io/ascii/
# matplotlib
# https://nickcharlton.net/posts/drawing-animating-shapes-matplotlib.html
# numpy: empty canvas
def empty_canvas(image_side_length=100):
return np.indices((image_side_length, image_side_length))
# scikit learn: circle
def circle_sk(canvas, x_center=50, y_center=50, radius=30):
y, x = canvas
circle = (x - x_center)**2 + (y - y_center)**2 < radius**2
img = circle.astype(float)
return img
# scikit learn: rectangle
def rect_sk(canvas, x_center=50, y_center=50, radius=30):
y, x = canvas
rect = (x < x_center + radius) & (x > x_center - radius) & (y < y_center + radius) & (y > y_center - radius)
img = rect.astype(float)
return img
# scikit learn: rectangle
#def triangle_sk(canvas, x_center=50, y_center=50, radius=30):
# y, x = canvas
# rect = (x < x_center + radius) & (x > x_center - radius) & (y < y_center + radius) & (y > y_center - radius)
# img = rect.astype(float)
# return img
# plot for SPI package
def plot_spi(img):
plt.axes()
plt.imshow(img)
plt.clf()
# matplotlib pyplot
def circle_plt(x_center=0, y_center=0, radius=0.75, fc='r', show=False):
plt.axes()
circle = plt.Circle((x_center, y_center), radius=radius, fc=fc)
plt.gca().add_patch(circle)
plt.axis('scaled')
imgplot = plt.imshow(img)
imgplot = plt.savefig("test3.png", dpi = (200))
#imgplot = plt.imshow()
if show:
plt.show()
# test each individual function
def test_individual():
#circle()
img = circle_sk()
plot_spi(img)
#star()
return
# generate one image data set
def generate_dataset(nb_obj,
image_side_length=100,
index_start=0,
shape='rect',
x_min=32,
x_max=32,
y_min=32,
y_max=32,
radius_min=10,
radius_max=10,
show_plot=False,
verbose=False):
# initiate image values
fac = -1.0
#x_center_list = np.random.uniform(0 + fac* radius_max, image_side_length + fac* radius_max, nb_obj)
#y_center_list = np.random.uniform(0 + fac* radius_max, image_side_length + fac* radius_max, nb_obj)
x_center_list = np.random.uniform(x_min, x_max, nb_obj)
y_center_list = np.random.uniform(y_min, y_max, nb_obj)
radius_list = np.random.uniform(radius_min, radius_max, nb_obj)
print('x ranges', min(x_center_list), max(x_center_list))
print('y ranges', min(y_center_list), max(y_center_list))
column_names = ['ident', 'x_center', 'y_center', 'radius', 'shape']
# create empty data structures
tab_list = np.empty((nb_obj, len(column_names)))
img_list = np.empty((nb_obj, image_side_length, image_side_length))
# create empty canvas for a single image
canvas = empty_canvas(image_side_length=image_side_length)
# loop over objects
icount = 0
for i_obj in np.arange(nb_obj):
# draw object properties from list
x_center = x_center_list[i_obj]
y_center = y_center_list[i_obj]
radius = radius_list[i_obj]
# identification value
ident = int(index_start + i_obj)
# create object
if shape == 'rect':
img = rect_sk(canvas, x_center=x_center, y_center=y_center, radius=radius)
shape_num = 0
elif shape == 'circ':
img = circle_sk(canvas, x_center=x_center, y_center=y_center, radius=radius)
shape_num = 1
# add tabular data to data list structure
tab_list[i_obj] = [ident, x_center, y_center, radius, int(shape_num)]
# add image data to image list structure
img_list[i_obj] = img
# plot image
if show_plot and icount <20:
icount+=1
plt.figure()
plt.axes()
plt.imshow(img)
# Data Frame: Tabular Data for Objects
tab_list = pd.DataFrame(tab_list,columns=column_names)
# verbose
if verbose:
print(tab_list[0:10])
print(img_list[0:10])
return tab_list, img_list
# save data
def save_data(f_data_list, f_img_list, data_list, img_list, verbose=False):
# Pandas Data Frame for tabular data: save to file
data_list.to_csv(f_data_list)
# Numpy Array for image data: save to file
np.save(f_img_list, img_list)
# verbose
if verbose:
print(f_data_list_pd)
print(f_img_list)
return
# combine data sets
def combine_data(frames, data_type='tab'):
if data_type=='tab':
data = pd.concat(frames)
elif data_type=='img':
data = np.concatenate(frames)
return data
# randomize data
def randomize_data(tab, img, seed=5, verbose=False):
if verbose:
print('Before:', tab)
# create randomized indices
random.seed(seed)
nb_tab = len(tab)
ind_random = np.arange(nb_tab)
random.shuffle(ind_random)
# re-order data based on randomized indices
tab = tab.iloc[ind_random]
img = img[ind_random]
if verbose:
print('After:', tab)
return tab, img
# split data
def split_data(nb_train, nb_valid, nb_test, tab, img, printcheck=0):
ind_start_train = 0
ind_end_train = ind_start_valid = ind_start_train + nb_train
ind_end_valid = ind_start_test = ind_start_valid + nb_valid
ind_end_test = ind_start_test + nb_test
if printcheck > 0:
print(tab[0:printcheck])
print(ind_start_train, ind_end_train)
# good place for unit test
# split data in train, valid, test
tab_train = tab[ind_start_train: ind_end_train]
img_train = img[ind_start_train: ind_end_train]
tab_valid = tab[ind_start_valid: ind_end_valid]
img_valid = img[ind_start_valid: ind_end_valid]
tab_test = tab[ind_start_test: ind_end_test]
img_test = img[ind_start_test: ind_end_test]
return tab_train, tab_valid, tab_test, img_train, img_valid, img_test
# Generate Data Parameters
nb_obj = 5000
#seed = 47283
image_side_length = 64
x_min, x_max = 10, 54
y_min, y_max = 10, 54
radius_min, radius_max = 4,30
show_plot = True
# Generate Data
tab_a, img_a = generate_dataset(nb_obj, image_side_length=image_side_length, radius_min=radius_min, radius_max=radius_max, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, shape='rect', show_plot=show_plot)
tab_b, img_b = generate_dataset(nb_obj, image_side_length=image_side_length, radius_min=radius_min, radius_max=radius_max, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, shape='circ', show_plot=show_plot, index_start=nb_obj)
# combine data
tab = combine_data([tab_a, tab_b])
img = combine_data([img_a, img_b], data_type='img')
# randomize data
tab, img = randomize_data(tab, img, verbose=True)
print('range', np.min(img), np.max(img))
# save data
f_tab = 'test_generate_pipeline_circle_data.csv'
f_img = 'test_generate_pipeline_circle_image.npy'
save_data(f_tab, f_img, tab, img, verbose=False )
```
# Example: read data file and prepare data for network
```
# read data from file
data_list = pd.read_csv(f_tab)
img_list = np.load(f_img)
print('range', np.min(img), np.max(img))
# Training parameters
batch_size = 20
num_classes = 2
epochs = 5
train_me = True
nb_train = 1000
nb_valid = 100
nb_test = 1000
img_rows = img_cols = img_list.shape[1]
# Prepare data
# ... split data
output = split_data(nb_train, nb_valid, nb_test, tab, img, printcheck=0)
y_train_temp, y_valid_temp, y_test_temp, x_train, x_valid, x_test = output
print(np.min(x_train), np.max(x_train))
# ... identify value to train on
y_train = y_train_temp['shape'].values
y_valid = y_valid_temp['shape'].values
y_test = y_test_temp['shape'].values
print("X train, valid, test shapes:", "\n", x_train.shape,"\n", x_valid.shape,"\n", x_test.shape)
print("y train, valid, test shapes:", "\n", y_train.shape,"\n", y_valid.shape,"\n", y_test.shape)
''' MY DATA
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_valid = x_valid.reshape(x_valid.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_valid = x_valid.reshape(x_valid.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_valid = x_valid.astype('float32')
x_test = x_test.astype('float32')
print('range', np.min(x_train), np.max(x_train))
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_valid.shape[0], 'valid samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_valid = keras.utils.to_categorical(y_valid, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# create model
model = Sequential()
model.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=input_shape))
#model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
if train_me:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_valid, y_valid))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
a = np.array([1.])
b = a.astype('float32')
print(a, b)
import PIL.ImageDraw as ImageDraw,PIL.Image as Image, PIL.ImageShow as ImageShow
im = Image.new("RGB", (400,300))
draw = ImageDraw.Draw(im)
draw.arc((100,100,300,200),0,270,fill=255)
im.show()
```
| github_jupyter |
# <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS-109B Introduction to Data Science
## Lab 6: Convolutional Neural Networks 2
**Harvard University**<br>
**Spring 2020**<br>
**Instructors:** Mark Glickman, Pavlos Protopapas, and Chris Tanner<br>
**Lab Instructors:** Chris Tanner and Eleni Angelaki Kaxiras<br>
**Content:** Eleni Angelaki Kaxiras, Cedric Flamant, Pavlos Protopapas
---
```
# RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
```
## Learning Goals
In this lab we will continue with Convolutional Neural Networks (CNNs), will look into the `tf.data` interface which enables us to build complex input pipelines for our data. We will also touch upon visualization techniques to peak into our CNN's hidden layers.
By the end of this lab, you should be able to:
- know how a CNN works from start to finish
- use `tf.data.Dataset` to import and, if needed, transform, your data for feeding into the network. Transformations might include normalization, scaling, tilting, resizing, or applying other data augmentation techniques.
- understand how `saliency maps` are implemented with code.
<a id=top></a>
## Table of Contents
1. **Part 1**: [Beginning-to-end Convolutional Neural Networks](#part1).
2. **Part 2**: [Image Pipelines with `tf.data.Dataset`](#part2).
3. **Part 3**: [Hidden Layer Visualization, Saliency Maps](#part3).
```
import numpy as np
from scipy.optimize import minimize
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (5,5)
%matplotlib inline
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D,\
Dropout, Flatten, Activation, Input
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.metrics import AUC, Precision, Recall, FalsePositives, \
FalseNegatives, TruePositives, TrueNegatives
from tensorflow.keras.preprocessing import image
from tensorflow.keras.regularizers import l2
from __future__ import absolute_import, division, print_function, unicode_literals
tf.keras.backend.clear_session() # For easy reset of notebook state.
print(tf.__version__) # You should see a > 2.0.0 here!
from tf_keras_vis.utils import print_gpus
print_gpus()
## Additional Packages required if you don't already have them
# While in your conda environment,
# imageio
# Install using "conda install imageio"
# pillow
# Install using "conda install pillow"
# tensorflow-datasets
# Install using "conda install tensorflow-datasets"
# tf-keras-vis
# Install using "pip install tf-keras-vis"
# tensorflow-addons
# Install using "pip install tensorflow-addons"
from tf_keras_vis.saliency import Saliency
from tf_keras_vis.utils import normalize
import tf_keras_vis.utils as utils
from matplotlib import cm
from tf_keras_vis.gradcam import Gradcam
np.random.seed(109)
tf.random.set_seed(109)
```
## Part 0: Running on SEAS JupyterHub
**PLEASE READ**: [Instructions for Using SEAS JupyterHub](https://canvas.harvard.edu/courses/65462/pages/instructions-for-using-seas-jupyterhub?module_item_id=638544)
SEAS and FAS are providing you with a platform in AWS to use for the class (accessible from the 'Jupyter' menu link in Canvas). These are AWS p2 instances with a GPU, 10GB of disk space, and 61 GB of RAM, for faster training for your networks. Most of the libraries such as keras, tensorflow, pandas, etc. are pre-installed. If a library is missing you may install it via the Terminal.
**NOTE: The AWS platform is funded by SEAS and FAS for the purposes of the class. It is FREE for you - not running against your personal AWS credit. For this reason you are only allowed to use it for purposes related to this course, and with prudence.**
**Help us keep this service: Make sure you stop your instance as soon as you do not need it. Your instance will terminate after 30 min of inactivity.**

*source: CS231n Stanford, Google Cloud Tutorial*
<a id=part1></a>
## Part 1: Beginning-to-end Convolutional Neural Networks

*image [source](http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/)*
<BR><BR>
We will go through the various steps of training a CNN, including:
- difference between cross-validation and validation
- specifying a loss, metrics, and an optimizer,
- performing validation,
- using callbacks, specifically `EarlyStopping`, which stops the training when training is no longer improving the validation metrics,
- learning rate significance
<BR><BR>
<div class="exercise" style="background-color:#b3e6ff"><b>Table Exercise</b>: Use the whiteboard next to your table to draw a CNN from start to finish as per the instructions. We will then draw it together in class.</div>
<a id=part2></a> [Back to Table of Contents](#top)
## Part 2: Image Preprocessing: Using `tf.data.Dataset`
```
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
```
`tf.data` API in `tensorflow` enables you to build complex **input pipelines** from simple, reusable pieces. For example, the pipeline for an image model might aggregate data from files in a distributed file system, apply random perturbations to each image, and merge randomly selected images into a batch for training.
The pipeline for a text model might involve extracting symbols from raw text data, converting them to embedding identifiers with a lookup table, and batching together sequences of different lengths. The `tf.data API` makes it possible to handle large amounts of data, read from different data formats, and perform complex transformations.
The `tf.data API` introduces a `tf.data.Dataset` that represents a sequence of **elements**, consistฮนฮฝฮณ of one or more **components**. For example, in an image pipeline, an element might be a single training example, with a pair of tensor components representing the image and its label.
To create an input pipeline, you must start with a data **source**. For example, to construct a Dataset from data in memory, you can use `tf.data.Dataset.from_tensors()` or `tf.data.Dataset.from_tensor_slices()`. Alternatively, if your input data is stored in a file in the recommended TFRecord format, you can use `tf.data.TFRecordDataset()`.
The Dataset object is a Python iterable. You may view its elements using a for loop:
```
dataset = tf.data.Dataset.from_tensor_slices(tf.random.uniform([4, 10], minval=1, maxval=10, dtype=tf.int32))
for elem in dataset:
print(elem.numpy())
```
Once you have a Dataset object, you can **transform** it into a new Dataset by chaining method calls on the `tf.data.Dataset` object. For example, you can apply per-element transformations such as `Dataset.map()`, and multi-element transformations such as `Dataset.batch()`. See the [documentation](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for `tf.data.Dataset` for a complete list of transformations.
The `map` function takes a function and returns a new and augmented dataset.
```
dataset = dataset.map(lambda x: x*2)
for elem in dataset:
print(elem.numpy())
```
Datasets are powerful objects because they are effectively dictionaries that can store tensors and other data such as the response variable. We can also construct them by passing small sized `numpy` arrays, such as in the following example.
Tensorflow has a plethora of them:
```
# uncomment to see available datasets
#tfds.list_builders()
```
#### `mnist` dataset
```
# load mnist
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train.shape, y_train.shape
# take only 10 images for simplicity
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# In case you want to retrieve the images/numpy arrays
for element in iter(train_dataset.take(1)):
image = element[0].numpy()
print(image.shape)
print(image.shape)
plt.figure()
plt.imshow(image, cmap='gray')
plt.show()
```
Once you have your Model, you may pass a Dataset instance directly to the methods `fit()`, `evaluate()`, and `predict()`. The difference with the way we have been previously using these methods is that we are not passing the images and labels separately. They are now both in the Dataset object.
```
model.fit(train_dataset, epochs=3)
model.evaluate(test_dataset)
```
#### Data Augmentation
```
fig, axes = plt.subplots(1,6, figsize=(10,3))
for i, (image, label) in enumerate(train_dataset.take(4)):
axes[i].imshow(image)
axes[i].set_title(f'{label:.2f}')
image_flip_up = tf.image.flip_up_down(np.expand_dims(image, axis=2)).numpy()
image_rot_90 = tf.image.rot90(np.expand_dims(image, axis=2), k=1).numpy()
axes[4].imshow(image_flip_up.reshape(28,-1))
axes[4].set_title(f'{label:.2f}-flip')
axes[5].imshow(image_rot_90.reshape(28,-1))
axes[5].set_title(f'{label:.2f}-rot90')
plt.show();
```
#### Note:
The tf.data API is a set of utilities in TensorFlow 2.0 for loading and preprocessing data in a way that's fast and scalable. You also have the option to use the `keras` [`ImageDataGenerator`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator), that accepts `numpy` arrays, instead of the Dataset. We think it's good for you to learn to use Datasets.
As a general rule, for input to NNs, Tensorflow recommends that you use `numpy` arrays if your data is small and fit in memory, and `tf.data.Datasets` otherwise.
#### References:
1. `tf.data.Dataset` [Documentation](https://www.tensorflow.org/api_docs/python/tf/data/Dataset).
2. Import [`numpy` arrays in Tensorflow](https://www.tensorflow.org/tutorials/load_data/numpy)
### The Street View House Numbers (SVHN) Dataset
We will play with the SVHN real-world image dataset. It can be seen as similar in flavor to MNIST (e.g., the images are of small cropped digits), but incorporates an order of magnitude more labeled data (over 600,000 digit images) and comes from a significantly harder, unsolved, real world problem (recognizing digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images.
All digits have been resized to a fixed resolution of 32-by-32 pixels. The original character bounding boxes are extended in the appropriate dimension to become square windows, so that resizing them to 32-by-32 pixels does not introduce aspect ratio distortions. Nevertheless this preprocessing introduces some distracting digits to the sides of the digit of interest. Loading the .mat files creates 2 variables: X which is a 4-D matrix containing the images, and y which is a vector of class labels. To access the images, $X(:,:,:,i)$ gives the i-th 32-by-32 RGB image, with class label $y(i)$.

*Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, Andrew Y. Ng Reading Digits in Natural Images with Unsupervised Feature Learning NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011.*
```
# Will take some time but will only load once
train_svhn_cropped, test_svhn_cropped = tfds.load('svhn_cropped', split=['train', 'test'], shuffle_files=False)
isinstance(train_svhn_cropped, tf.data.Dataset)
# # convert to numpy if needed
features = next(iter(train_svhn_cropped))
images = features['image'].numpy()
labels = features['label'].numpy()
images.shape, labels.shape
for i, element in enumerate(train_svhn_cropped):
if i==1: break;
image = element['image']
label = element['label']
print(label)
# batch_size indicates that the dataset should be divided in batches
# each consisting of 4 elements (a.k.a images and their labels)
# take_size chooses a number of these batches, e.g. 3 of them for display
batch_size = 4
take_size = 3
# Plot
fig, axes = plt.subplots(take_size,batch_size, figsize=(10,10))
for i, element in enumerate(train_svhn_cropped.batch(batch_size).take(take_size)):
for j in range(4):
image = element['image'][j]
label = element['label'][j]
axes[i][j].imshow(image)
axes[i][j].set_title(f'true label={label:d}')
```
Here we convert from a collection of dictionaries to a collection of tuples. We will still have a `tf.data.Dataset`
```
def normalize_image(img):
return tf.cast(img, tf.float32)/255.
def normalize_dataset(element):
img = element['image']
lbl = element['label']
return normalize_image(img), lbl
train_svhn = train_svhn_cropped.map(normalize_dataset)
test_svhn = test_svhn_cropped.map(normalize_dataset)
isinstance(train_svhn, tf.data.Dataset)
```
#### Define our CNN model
```
n_filters = 16
input_shape = (32, 32, 3)
svhn_model = Sequential()
svhn_model.add(Conv2D(n_filters, (3, 3), activation='relu', input_shape=input_shape))
svhn_model.add(MaxPooling2D((2, 2)))
svhn_model.add(Conv2D(n_filters*2, (3, 3), activation='relu'))
svhn_model.add(MaxPooling2D((2, 2)))
svhn_model.add(Conv2D(n_filters*4, (3, 3), activation='relu'))
svhn_model.add(Flatten())
svhn_model.add(Dense(n_filters*2, activation='relu'))
svhn_model.add(Dense(10, activation='softmax'))
svhn_model.summary()
loss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels
optimizer = Adam(lr=0.001)
metrics = ['accuracy']
# Compile model
svhn_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
```
#### With Early Stopping
```
%%time
batch_size = 64
epochs=15
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_accuracy` is no longer improving
monitor='val_accuracy',
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1)
]
history = svhn_model.fit(train_svhn.batch(batch_size), #.take(50), # change 50 only
epochs=epochs,
callbacks=callbacks,
validation_data=test_svhn.batch(batch_size)) #.take(50))
def print_history(history):
fig, ax = plt.subplots(1, 1, figsize=(8,4))
ax.plot((history.history['accuracy']), 'b', label='train')
ax.plot((history.history['val_accuracy']), 'g' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Accuracy', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
fig, ax = plt.subplots(1, 1, figsize=(8,4))
ax.plot((history.history['loss']), 'b', label='train')
ax.plot((history.history['val_loss']), 'g' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Loss', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
plt.show();
print_history(history)
svhn_model.save('svhn_good.h5')
```
#### Too High Learning Rate
```
loss = keras.losses.sparse_categorical_crossentropy
optimizer = Adam(lr=0.5) # really big learning rate
metrics = ['accuracy']
# Compile model
svhn_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
%%time
batch_size = 64
epochs=10
history = svhn_model.fit(train_svhn.batch(batch_size), #.take(50), # change 50 to see the difference
epochs=epochs,
validation_data=test_svhn.batch(batch_size)) #.take(50))
print_history(history)
fig.savefig('../images/train_high_lr.png')
```
#### Too Low Learning Rate
Experiment with the learning rate using a small sample of the training set by using .take(num) which takes only `num` number of samples.
```
history = svhn_model.fit(train_svhn.batch(batch_size).take(50))
```
```
#loss = keras.losses.categorical_crossentropy
loss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels
optimizer = Adam(lr=1e-5) # very low learning rate
metrics = ['accuracy']
# Compile model
svhn_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
%%time
batch_size = 32
epochs=10
history = svhn_model.fit(train_svhn.batch(batch_size).take(50),
epochs=epochs,
validation_data=test_svhn.batch(batch_size)) #.take(50))
print_history(history)
fig.savefig('../images/train_50.png')
```
#### Changing the batch size
```
#loss = keras.losses.categorical_crossentropy
loss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels
optimizer = Adam(lr=0.001)
metrics = ['accuracy']
# Compile model
svhn_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
%%time
batch_size = 2
epochs=5
history = svhn_model.fit(train_svhn.batch(batch_size),
epochs=epochs,
validation_data=test_svhn.batch(batch_size))
print_history(history)
```
<a id=part3></a> [Back to Table of Contents](#top)
## Part 3: Hidden Layer Visualization, Saliency Maps
[Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf)
It is often said that Deep Learning Models are black boxes. But we can peak into these boxes.
#### Let's train a small model on MNIST
```
from tensorflow.keras.datasets import mnist
# load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train.min(), x_train.max()
x_train = x_train.reshape((60000, 28, 28, 1)) # Reshape to get third dimension
x_test = x_test.reshape((10000, 28, 28, 1))
x_train = x_train.astype('float32') / 255 # Normalize between 0 and 1
x_test = x_test.astype('float32') / 255
# Convert labels to categorical data
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train.min(), x_train.max()
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data(
# path='mnist.npz')
x_train.shape
class_idx = 0
indices = np.where(y_test[:, class_idx] == 1.)[0]
# pick some random input from here.
idx = indices[0]
img = x_test[idx]
# pick some random input from here.
idx = indices[0]
# Lets sanity check the picked image.
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
#plt.imshow(test_images[idx][..., 0])
img = x_test[idx] * 255
img = img.astype('float32')
img = np.squeeze(img) # trick to reduce img from (28,28,1) to (28,28)
plt.imshow(img, cmap='gray');
input_shape=(28, 28, 1)
num_classes = 10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
num_samples = x_train.shape[0]
num_samples
%%time
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.2,
shuffle=True)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Let's look at the layers with `tf.keras.viz`
https://pypi.org/project/tf-keras-vis/
And an example: https://github.com/keisen/tf-keras-vis/blob/master/examples/visualize_conv_filters.ipynb
We can identify layers by their layer id:
```
# Alternatively we can specify layer_id as -1 since it corresponds to the last layer.
layer_id = 0
model.layers[layer_id].name, model.layers[-2].name
```
Or you may look at their output
```
output = [model.layers[layer_id].output]
output
# # You may also replace part of your NN with other parts,
# # e.g. replace the activation function of the last layer
# # with a linear one
# model.layers[-1].activation = tf.keras.activations.linear
```
Generate Feature Maps
```
def get_feature_maps(model, layer_id, input_image):
"""Returns intermediate output (activation map) from passing an image to the model
Parameters:
model (tf.keras.Model): Model to examine
layer_id (int): Which layer's (from zero) output to return
input_image (ndarray): The input image
Returns:
maps (List[ndarray]): Feature map stack output by the specified layer
"""
model_ = Model(inputs=[model.input], outputs=[model.layers[layer_id].output])
return model_.predict(np.expand_dims(input_image, axis=0))[0,:,:,:].transpose((2,0,1))
# Choose an arbitrary image
image_id = 67
img = x_test[image_id,:,:,:]
img.shape
img_to_show = np.squeeze(img)
plt.imshow(img_to_show, cmap='gray')
# Was this successfully predicted?
img_batch = (np.expand_dims(img,0))
print(img_batch.shape)
predictions_single = model.predict(img_batch)
print(f'Prediction is: {np.argmax(predictions_single[0])}')
# layer id should be for a Conv layer, a Flatten will not do
maps = get_feature_maps(model, layer_id, img)# [0:10]
maps.shape
# Plot just a subset
maps = get_feature_maps(model, layer_id, img)[0:10]
fig, ax = plt.subplots()
img = np.squeeze(img)
ax.imshow(img + 0.5)
label = y_test[image_id,:]
label = int(np.where(label == 1.)[0])
ax.set_title(f'true label = {label}')
f, ax = plt.subplots(3,3, figsize=(8,8))
for i, axis in enumerate(ax.ravel()):
axis.imshow(maps[i], cmap='gray')
```
### `tf_keras_vis.gradcam.Gradcam`
[Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization](https://arxiv.org/pdf/1610.02391.pdf)
```
#from tensorflow.keras import backend as K
# Define modifier to replace a softmax function of the last layer to a linear function.
def model_modifier(m):
m.layers[-1].activation = tf.keras.activations.linear
#img_batch = (np.expand_dims(img,0))
# Define modifier to replace a softmax function of the last layer to a linear function.
def model_modifier(m):
m.layers[-1].activation = tf.keras.activations.linear
# Create Saliency object
saliency = Saliency(model, model_modifier)
# Define loss function. Pass it the correct class label.
loss = lambda output: tf.keras.backend.mean(output[:, tf.argmax(y_test[image_id])])
# Generate saliency map
print(img_batch.shape)
saliency_map = saliency(loss, img_batch)
saliency_map = normalize(saliency_map)
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) #, subplot_kw={'xticks': [], 'yticks': []})
ax[0].imshow(saliency_map[0], cmap='jet')
ax[1].imshow(img);
# from matplotlib import cm
# from tf_keras_vis.gradcam import Gradcam
# Create Gradcam object
gradcam = Gradcam(model, model_modifier)
# Generate heatmap with GradCAM
cam = gradcam(loss, img_batch)
cam = normalize(cam)
f, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5),
subplot_kw={'xticks': [], 'yticks': []})
for i in range(len(cam)):
heatmap = np.uint8(cm.jet(cam[i])[..., :3] * 255)
ax.imshow(img)
ax.imshow(heatmap, cmap='jet', alpha=0.5)
```
| github_jupyter |
# Logical Operators
This notebook demonstrates use of the `ge` infix function for the comparison of two tensors. In this particular example, the probablity density `f` at two-dimensional location `(x, y)` is compared against a value `u` drawn uniformly from the unit interval `[0, 1)`. If `f(x, y) >= u`, or `f(x, y) ge u`, the point `(x, y)` [is accepted, otherwise rejected](#Accept-or-Reject-Samples). Infix operators `eq` (`==`), `ge` (`>=`), `le` (`<=`), `gt` (`>`), `lt` (`<`) are implemented for these containers/tensors:
```
RandomAccessible
RandomAccessibleInterval
RealRandomAccessible
RealRandomAccessibleRealInterval
```
and for scalars with any of these tensors.
## Dependencies and Imports
```
// set up dependencies
// use local maven repository; not yet deployed to remote maven repositories.
@file:Repository("*mavenLocal")
@file:Repository("https://maven.scijava.org/content/groups/public")
@file:Repository("https://jitpack.io")
// uncomment to search in your local maven repo
// requires installation into local maven repository (./gradlew build publishToMavenLocal)
@file:DependsOn("org.ntakt:ntakt:0.1.0-SNAPSHOT")
// uncomment to search in jitpack (TODO)
// @file:DependsOn("com.github.saalfeldlab:ntakt:<tbd>")
%use lets-plot
import kotlin.math.PI
import kotlin.math.pow
import kotlin.random.Random
import net.imglib2.RandomAccessibleInterval as RAI
import org.ntakt.*
import net.imglib2.type.numeric.real.DoubleType
import net.imglib2.view.Views
```
## Set up Data
```
val rng = Random(100)
val dims = longArrayOf(600, 400)
val mean = dims.map { it / 2.0 }.toDoubleArray()
val sigma = dims.map { it / 10.0 }.map{ it * it }.toDoubleArray()
val sigmaInverse = sigma.map { 1.0 / it }.toDoubleArray()
val sigmaDeterminant = sigma.map { it * it }.sum()
val twoPiPow = (2*PI).pow(2)
val normalizationFactor = (twoPiPow * sigmaDeterminant).pow(-0.5)
val exponent = ntakt.function(2, { 0.0.asType() }) { p, t ->
val dx = p.getDoublePosition(0) - mean[0];
val dy = p.getDoublePosition(1) - mean[1];
t.set(-0.5 * (dx * dx * sigmaInverse[0] + dy * dy * sigmaInverse[1]))
}
val gaussianInfinite = exponent.exp() * normalizationFactor
val gaussian = gaussianInfinite.rastered.interval(*dims)
val uniform = ntakt.doubles(*dims) { rng.nextDouble() }
```
### Visualize Conditional Distributions
```
val bellCurvesAtY =
Views.hyperSlice(gaussian, 1, 100L).flatIterable.map { it.realDouble } +
Views.hyperSlice(gaussian, 1, 200L).flatIterable.map { it.realDouble } +
Views.hyperSlice(gaussian, 1, 250L).flatIterable.map { it.realDouble }
val dY = mapOf<String, Any>(
"X" to DoubleArray(dims[0].toInt()) { it.toDouble() }.let { it + it + it },
"bell curve at y" to bellCurvesAtY,
"y" to Array(dims[0].toInt()) { "100" } + Array(dims[0].toInt()) { "200" } + Array(dims[0].toInt()) { "250" }
)
val p = lets_plot(dY) { x = "X"; color = "y" }
p +
geom_line { y = "bell curve at y" } +
ggsize(800, 500)
val bellCurvesAtX =
Views.hyperSlice(gaussian, 0, 200L).flatIterable.map { it.realDouble } +
Views.hyperSlice(gaussian, 0, 300L).flatIterable.map { it.realDouble } +
Views.hyperSlice(gaussian, 0, 350L).flatIterable.map { it.realDouble }
val dX = mapOf<String, Any>(
"X" to DoubleArray(dims[1].toInt()) { it.toDouble() }.let { it + it + it },
"bell curve at x" to bellCurvesAtX,
"x" to Array(dims[1].toInt()) { "200" } + Array(dims[1].toInt()) { "300" } + Array(dims[1].toInt()) { "350" }
)
val p = lets_plot(dX) { x = "X"; color = "x" }
p +
geom_line { y = "bell curve at x" } +
ggsize(800, 500)
```
## Accept or Reject Samples
```
val sampled = gaussian ge uniform * normalizationFactor
val points = sampled.where()
```
### Visualize samples
```
val scatterData = mapOf<String, Any>(
"X" to points.map { it.getDoublePosition(0) } + listOf(mean[0]),
"Y" to points.map { it.getDoublePosition(1) } + listOf(mean[1]),
"label" to Array(points.size) { "sample" } + arrayOf("mean"),
"alpha" to DoubleArray(points.size) { 0.1 } + doubleArrayOf(1.0)
)
lets_plot(scatterData) { x = "X"; y = "Y"; color = "label"; alpha = "alpha" } +
geom_point(size = 3.0) +
geom_density2d(color="black") +
ggsize(900, 600)
val sampleMean = Pair(
points.map{ it.getDoublePosition(0) }.sum() / points.size,
points.map{ it.getDoublePosition(1) }.sum() / points.size
)
val (meanX, meanY) = sampleMean
sampleMean
val sampleVariance = points.fold(DoubleArray(3)) { m, p ->
val dX = p.getDoublePosition(0) - meanX
val dY = p.getDoublePosition(1) - meanY
m[0] += dX*dX
m[1] += dX*dY
m[2] += dY*dY
m
}.map { it / (points.size - 1) }
sampleVariance
sigma[0] to sigma[1]
```
| github_jupyter |
# Global And Local Alignment
```
import sys
import numpy as np
from collections import Counter
import re
from collections import defaultdict
from functools import partial
```
The score_function returns a score for a pair of characters. These may be the same different or one may be missing. The code supports three scoring frameworks, one where all match/mismatch/indel scores have fixed values, and one based on BLOSUM62 and the other based PAM250. Other scoring matrices can be incorporated.
```
def score_function(score_name="BASIC", A=1, B=0, C=1):
def score(a, b, indel, s):
"""Return the match/mismatch/indel score for the symbols a, and b."""
return indel if '-' in [a,b] else s[a][b]
if "BLOSUM62" == score_name:
inputString = """
A C D E F G H I K L M N P Q R S T V W Y
A 4 0 -2 -1 -2 0 -2 -1 -1 -1 -1 -2 -1 -1 -1 1 0 0 -3 -2
C 0 9 -3 -4 -2 -3 -3 -1 -3 -1 -1 -3 -3 -3 -3 -1 -1 -1 -2 -2
D -2 -3 6 2 -3 -1 -1 -3 -1 -4 -3 1 -1 0 -2 0 -1 -3 -4 -3
E -1 -4 2 5 -3 -2 0 -3 1 -3 -2 0 -1 2 0 0 -1 -2 -3 -2
F -2 -2 -3 -3 6 -3 -1 0 -3 0 0 -3 -4 -3 -3 -2 -2 -1 1 3
G 0 -3 -1 -2 -3 6 -2 -4 -2 -4 -3 0 -2 -2 -2 0 -2 -3 -2 -3
H -2 -3 -1 0 -1 -2 8 -3 -1 -3 -2 1 -2 0 0 -1 -2 -3 -2 2
I -1 -1 -3 -3 0 -4 -3 4 -3 2 1 -3 -3 -3 -3 -2 -1 3 -3 -1
K -1 -3 -1 1 -3 -2 -1 -3 5 -2 -1 0 -1 1 2 0 -1 -2 -3 -2
L -1 -1 -4 -3 0 -4 -3 2 -2 4 2 -3 -3 -2 -2 -2 -1 1 -2 -1
M -1 -1 -3 -2 0 -3 -2 1 -1 2 5 -2 -2 0 -1 -1 -1 1 -1 -1
N -2 -3 1 0 -3 0 1 -3 0 -3 -2 6 -2 0 0 1 0 -3 -4 -2
P -1 -3 -1 -1 -4 -2 -2 -3 -1 -3 -2 -2 7 -1 -2 -1 -1 -2 -4 -3
Q -1 -3 0 2 -3 -2 0 -3 1 -2 0 0 -1 5 1 0 -1 -2 -2 -1
R -1 -3 -2 0 -3 -2 0 -3 2 -2 -1 0 -2 1 5 -1 -1 -3 -3 -2
S 1 -1 0 0 -2 0 -1 -2 0 -2 -1 1 -1 0 -1 4 1 -2 -3 -2
T 0 -1 -1 -1 -2 -2 -2 -1 -1 -1 -1 0 -1 -1 -1 1 5 0 -2 -2
V 0 -1 -3 -2 -1 -3 -3 3 -2 1 1 -3 -2 -2 -3 -2 0 4 -3 -1
W -3 -2 -4 -3 1 -2 -2 -3 -3 -2 -1 -4 -4 -2 -3 -3 -2 -3 11 2
Y -2 -2 -3 -2 3 -3 2 -1 -2 -1 -1 -2 -3 -1 -2 -2 -2 -1 2 7
"""
elif "PAM250" == score_name:
inputString = """
A C D E F G H I K L M N P Q R S T V W Y
A 2 -2 0 0 -3 1 -1 -1 -1 -2 -1 0 1 0 -2 1 1 0 -6 -3
C -2 12 -5 -5 -4 -3 -3 -2 -5 -6 -5 -4 -3 -5 -4 0 -2 -2 -8 0
D 0 -5 4 3 -6 1 1 -2 0 -4 -3 2 -1 2 -1 0 0 -2 -7 -4
E 0 -5 3 4 -5 0 1 -2 0 -3 -2 1 -1 2 -1 0 0 -2 -7 -4
F -3 -4 -6 -5 9 -5 -2 1 -5 2 0 -3 -5 -5 -4 -3 -3 -1 0 7
G 1 -3 1 0 -5 5 -2 -3 -2 -4 -3 0 0 -1 -3 1 0 -1 -7 -5
H -1 -3 1 1 -2 -2 6 -2 0 -2 -2 2 0 3 2 -1 -1 -2 -3 0
I -1 -2 -2 -2 1 -3 -2 5 -2 2 2 -2 -2 -2 -2 -1 0 4 -5 -1
K -1 -5 0 0 -5 -2 0 -2 5 -3 0 1 -1 1 3 0 0 -2 -3 -4
L -2 -6 -4 -3 2 -4 -2 2 -3 6 4 -3 -3 -2 -3 -3 -2 2 -2 -1
M -1 -5 -3 -2 0 -3 -2 2 0 4 6 -2 -2 -1 0 -2 -1 2 -4 -2
N 0 -4 2 1 -3 0 2 -2 1 -3 -2 2 0 1 0 1 0 -2 -4 -2
P 1 -3 -1 -1 -5 0 0 -2 -1 -3 -2 0 6 0 0 1 0 -1 -6 -5
Q 0 -5 2 2 -5 -1 3 -2 1 -2 -1 1 0 4 1 -1 -1 -2 -5 -4
R -2 -4 -1 -1 -4 -3 2 -2 3 -3 0 0 0 1 6 0 -1 -2 2 -4
S 1 0 0 0 -3 1 -1 -1 0 -3 -2 1 1 -1 0 2 1 -1 -2 -3
T 1 -2 0 0 -3 0 -1 0 0 -2 -1 0 0 -1 -1 1 3 0 -5 -3
V 0 -2 -2 -2 -1 -1 -2 4 -2 2 2 -2 -1 -2 -2 -1 0 4 -6 -2
W -6 -8 -7 -7 0 -7 -3 -5 -3 -2 -4 -4 -6 -5 2 -2 -5 -6 17 0
Y -3 0 -4 -4 7 -5 0 -1 -4 -1 -2 -2 -5 -4 -4 -3 -3 -2 0 10
"""
elif "BASIC" == score_name:
return lambda a, b: -C if '-' in [a,b] else A if a == b else -B
else:
raise ValueError("score_name constrained to ['BASIC', 'BLOSUM62', 'PAM250']")
# divide the inputString into lines where each line is a row from the tables above
lines = inputString.strip().splitlines()
# determine the order of the amino acids repesented in the table.
amino_acids = "".join(re.split(" +", lines[0].strip()))
# setup a dictionary of dictionaries to encode the matrix
target = defaultdict(dict)
# step through the remaining lines and add them to the target matrix
for row in lines[1:]:
# get the amino acid associated with this row and then the row values
rowId, *values = re.split(" +", row.strip())
# put the data into the dictionary of dictionaries
for colId, value in zip(amino_acids, values):
target[rowId][colId] = int(value)
# return a function that returns a score for match/mismatch/indel based on data from the matrix
# and the passed in indel score, C.
return partial(score, indel=-C, s=target)
```
An internal function _sequence_similarity, computes the sequence similarity between sequences X and Y. It will compute either a global alignment or a local alignment.
```
def _sequence_similarity(X, Y, s, global_seq):
"""
Compute the sequence similarity between to sequences X and Y. We return a populated matrix representing
the confusion between the two sequences as well as the optimal alignment data.
This is our implementation of the Needleman-Wunsch Algorithm and the local alignment algorithm.
Arguments
---------
X: sequence to align
Y: sequence to align
s: a function taking a pair of parameters, the two characters to compare
global_seq:
"""
def maximum(*args):
"""
Given an array of tuples return a tuple where the first element is a string consisting of all first
elements in the array of tuples where the second element is maximal in the array. E.G.
maximum([('a',5),('b',3),('c',6),('d',3),('e',6)]) = ('ce',6)
"""
m = max(map(lambda sv: sv[1], args))
s = "".join(map(lambda sv: sv[0], filter(lambda sv: sv[1] == m, args)))
return (s, m)
def horzValue(i, j):
"""
Return the value of the horizontal path.
We use the s function to return a similarity score which will be an indel score
"""
return mat[i, j-1][1] + s('-', X[j-1])
def diagValue(i, j):
"""
Return the value of the diagonal path.
We use the s function to return a similarity score which will be a match/mismatch score
"""
return mat[i-1, j-1][1] + s(Y[i-1], X[j-1])
def vertValue(i, j):
"""
Return the value of the vertical path.
We use the s function to return a similarity score which will be an indel score
"""
return mat[i-1, j][1] + s(Y[i-1], '-')
m = len(Y) + 1
n = len(X) + 1
index = (-1, -1)
maxval = -sys.maxsize
minval = -sys.maxsize * global_seq
mat = np.empty((m, n), dtype=tuple)
for i in range(0, m):
mat[i, 0] = maximum(("", minval), ("v", i * s('-', '-')))
maxval, index = (mat[i, 0][1], (i, 0)) if mat[i, 0][1] > maxval else (maxval, index)
for j in range(0, n):
mat[0, j] = maximum(("", minval), ("h", j * s('-', '-')))
maxval, index = (mat[0, j][1], (0, j)) if mat[0, j][1] > maxval else (maxval, index)
for i in range(1, m):
for j in range(1, n):
mat[i, j] = maximum(("", minval), ("h", horzValue(i, j)), ("d", diagValue(i, j)), ("v", vertValue(i, j)))
maxval, index = (mat[i, j][1], (i, j)) if mat[i, j][1] > maxval else (maxval, index)
if global_seq:
index = m-1, n-1
maxval = mat[m-1, n-1][1]
return (mat, maxval, index)
def global_sequence_similarity():
return partial(_sequence_similarity, global_seq=True)
def local_sequence_similarity():
return partial(_sequence_similarity, global_seq=False)
```
Traceback returns one of potentially many optimal alignments. Each point where a cell has more than
one maximal value, we have a bi or tri - furication leading to a doubling/tripling in the number of equivalent alignments observed along a particular path.
```
def global_traceback(mat, X, Y):
"""
Traceback returns one of potentially many optimal alignments. Each point where a cell has more than
one maximal value, we have a bifurication leading to a doubling in the number of optimal alignments.
"""
n, m = mat.shape[1]-1, mat.shape[0]-1
retval = ("","")
while m > 0 or n > 0:
a, b = ("","")
if 'h' in mat[m, n][0]:
a, b = X[n-1], "-"
n = n - 1
elif 'd' in mat[m, n][0]:
a, b = X[n-1], Y[m-1]
n, m = n - 1, m - 1
elif 'v' in mat[m, n][0]:
a, b = "-", Y[m-1]
m = m - 1
else:
break
retval = (a + retval[0], b + retval[1])
return retval
def local_traceback(mat, X, Y):
"""
Traceback returns one of potentially many optimal alignments. Each point where a cell has more than
one maximal value, we have a bifurication leading to a doubling in the number of optimal alignments.
"""
n, m = mat.shape[1]-1, mat.shape[0]-1
retval = ("","")
while (m > 0 or n > 0) and mat[m, n][1] > 0:
a, b = ("","")
if 'h' in mat[m, n][0]:
a, b = X[n-1], "-"
n = n - 1
elif 'd' in mat[m, n][0]:
a, b = X[n-1], Y[m-1]
n, m = n - 1, m - 1
elif 'v' in mat[m, n][0]:
a, b = "-", Y[m-1]
m = m - 1
else:
break
retval = (a + retval[0], b + retval[1])
return retval
```
Print a formatted result wraps sequences at <width> characters and identifies offsets
```
def print_formatted(traceback_results, width=100):
"""
Print a formatted result wraps sequences at <width> characters and identifies offsets
"""
def chunk(s, w):
"""
Break s into w width chunks and return a list. Used to print results
"""
retval = []
start = 0
while len(s):
end = start + w
retval.append(s[start:end])
s = s[end:]
return retval
for (alignedX, alignedY) in traceback_results:
s1 = 0
s2 = 0
print()
for (A, B) in zip(chunk(alignedX, width), chunk(alignedY, width)):
print(f'{s1:>5} {A}{" "*(width-len(A))} {(s1 + len(A) - 1):<5}')
print(f" {''.join(a if a==b else ' ' for a, b in zip(A, B))}{' '*(width-len(A))}")
print(f'{s2:>5} {B}{" "*(width-len(B))} {(s2 + len(B) - 1):<5}')
print()
s1 += len(A)
s2 += len(B)
```
Try data from the class notes. Dont use BLOSUM62 and set indel=-1, match=1, mismatch=0
```
def trial_1():
X, Y = "ACGC", "GACTAC"
similarity = global_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("BASIC", C=1))
print(maxval,index)
print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_1()
```
Try data from Rosalind Question.
```
def trial_2():
X, Y = "MEANLY", "PLEASANTLY"
similarity = global_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("BLOSUM62", C=5))
print(maxval,index)
print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_2()
```
Try another data set from Rosalind Question.
```
def trial_3():
X, Y = """\
ILYPRQSMICMSFCFWDMWKKDVPVVLMMFLERRQMQSVFSWLVTVKTDCGKGIYNHRKYLGLPTMTAGDWHWIKKQNDPHEW\
FQGRLETAWLHSTFLYWKYFECDAVKVCMDTFGLFGHCDWDQQIHTCTHENEPAIAFLDLYCRHSPMCDKLYPVWDMACQTCH\
FHHSWFCRNQEMWMKGDVDDWQWGYHYHTINSAQCNQWFKEICKDMGWDSVFPPRHNCQRHKKCMPALYAGIWMATDHACTFM\
VRLIYTENIAEWHQVYCYRSMNMFTCGNVCLRCKSWIFVKNYMMAPVVNDPMIEAFYKRCCILGKAWYDMWGICPVERKSHWE\
IYAKDLLSFESCCSQKKQNCYTDNWGLEYRLFFQSIQMNTDPHYCQTHVCWISAMFPIYSPFYTSGPKEFYMWLQARIDQNMH\
GHANHYVTSGNWDSVYTPEKRAGVFPVVVPVWYPPQMCNDYIKLTYECERFHVEGTFGCNRWDLGCRRYIIFQCPYCDTMKIC\
YVDQWRSIKEGQFRMSGYPNHGYWFVHDDHTNEWCNQPVLAKFVRSKIVAICKKSQTVFHYAYTPGYNATWPQTNVCERMYGP\
HDNLLNNQQNVTFWWKMVPNCGMQILISCHNKMKWPTSHYVFMRLKCMHVLMQMEYLDHFTGPGEGDFCRNMQPYMHQDLHWE\
GSMRAILEYQAEHHRRAFRAELCAQYDQEIILWSGGWGVQDCGFHANYDGSLQVVSGEPCSMWCTTVMQYYADCWEKCMFA""", """\
ILIPRQQMGCFPFPWHFDFCFWSAHHSLVVPLNPQMQTVFQNRGLDRVTVKTDCHDHRWKWIYNLGLPTMTAGDWHFIKKHVV\
RANNPHQWFQGRLTTAWLHSTFLYKKTEYCLVRHSNCCHCDWDQIIHTCAFIAFLDLYQRHWPMCDKLYCHFHHSWFCRNQEM\
SMDWNQWFPWDSVPRANCLEEGALIALYAGIWANSMKRDMKTDHACTVRLIYVCELHAWLKYCYTSINMLCGNVCLRCKSWIF\
VKLFYMYAPVVNTIEANSPHYYKRCCILGQGICPVERKSHCEIYAKDLLSFESCCSQKQNCYTDNWGLEYRLFFQHIQMECTD\
PHANRGWTSCQTAKYWHFNLDDRPPKEFYMWLQATPTDLCMYQHCLMFKIVKQNFRKQHGHANPAASTSGNWDSVYTPEKMAY\
KDWYVSHPPVDMRRNGSKMVPVWYPPGIWHWKQSYKLTYECFFTVPGRFHVEGTFGCNRWDHQPGTRRDRQANHQFQCPYSDT\
MAIWEHAYTYVDQWRSIKEGQMPMSGYPNHGQWNVHDDHTNEQERSPICNQPVLAKFVRSKNVSNHEICKKSQTVFHWACEAQ\
TNVCERMLNNQHVAVKRNVTFWWQMVPNCLWSCHNKMTWPTRPEQHRLFFVKMRLKCMHEYLDVAPSDFCRNMQAYMHSMRAI\
LEYQADFDLKRRLRAIAPMDLCAQYDQEIILWSGGYIYDQSLQVVSCEGCSYYADCYVKCINVKEKCMFA"""
similarity = global_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("BLOSUM62", C=5))
print(maxval,index)
print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_3()
```
#### Process a BIG dataset - warning can take some time to execute
```
def trial_4():
[X, Y, *_] = open("rosalind_ba5e.txt").read().split("\n")
similarity = global_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("BLOSUM62", C=5))
print(maxval)
print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_4()
def printmat(mat, X, Y):
ylen, xlen = mat.shape
print(" "+" ".join(Y))
for x in range(xlen):
print(" | "+" | ".join(f"{mat[y, x][1]:3}" for y in range(ylen))+" |")
if x+1 < xlen: print(f"{X[x]:1} -","-"*(ylen*6),sep="")
def trial_5():
X, Y = "ACGC", "GATTGA"
similarity = local_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("BASIC",A=4, B=1, C=2))
printmat(mat, X, Y)
print("Value is",maxval,"at",index)
printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)
print()
print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_5()
def trial_6():
X, Y = "MEANLY", "PENALTY"
similarity = local_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("PAM250", C=5))
printmat(mat, X, Y)
print("Value is",maxval,"at",index)
printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)
print()
print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_6()
def trial_7():
[X, Y, *_] = open("local_alignment.txt").read().split("\n")
similarity = local_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("PAM250", C=5))
print("Value is",maxval,"at",index)
print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
trial_7()
```
```
1062
YQAGIIRQPPRGD-RGVSDRNYSQCGKQ-NQ-AQLDNNPTWTKYEIEWRVQI-LPPGAGVFEGDNGQNQCLCPNW--A-W-EQPCQW----GALHS-NEQYPNRIHLWAPMSKLHIKIEKSSYN-RNAQ-FPNRCMYECE-FPSY-REQVDSCHYENVQIAF-TIFSGAEQKRKFCSCHFWSNFIDQAVFSTGLI-PWCYRRDDHSAFFMPNWNKQ--YKHPQLQFRVAGEGTQCRPFYTREMFTKVSAWRIAGRFAGPYERHHDAHLELWY-QHHKVRT-GQQLGIIWNNRDKTRNPCPFSAY-Y-NK--LP-WWK-I-NQ-N-AFYNCLQNIAHSTHDETHEFNPVKCIDWLQGTMV-P------TECKKGFVHEKCECYRNPGPPLHDMYHQMEDIFGVRFDCLTGWKHLS------D---YNPC-QERRNINDFYIFAYEIAPAVKNLVLSPQPLADATKKCAFNYTPLDQSPVVIACK---WYIHQPI-CMLL----IVLIC-AMDKYNAHMIVIRTTEGQQPMHACRMTEGPGMCMKEPLVTFTLPAQWQWPNHEFKYVYMYVLNYHLSQYTYTDEGHAGGQHYSFNVAVDVGMAWGHNRCYCQPACYSQQETQTRTIDYEKWQYMKHQAFKWGLWFCEQER-HA--WFKGQNRCEMFTAKMTRMGADSNLDQYKLMLAQNYEEQWEQPIMECGMSEIIEIDPPYRSELIFTFWPFCTYSPWQNLIKCRCNNVIEEMDQCVP-LTF-IGFGVKQAGGIQA-WAFYKE--EWTSTYYLMCQCMKSDKAQYPYEIILFWMQ--P-MDTGE--QEPPQQNMWIFLPHSWFFDWCCNAPWSEICSSRHD--H---GQ-CQDAFYPCELFTVF
Y-P-MSRKTAKSQFIEWCDW-F--CFNHWTNWAPLSIVRTSVAFAV-W-GHCWYPCG-GVCKTNRCKDD-FCGRWRKALFAEGPRDWKCCKNDLQNWNPQYSQGTR--NTK-RMVATTNQTMIEWKQSHIFETW-LF-CHVIIEYNWSAF-W-MWMNRNEAFNSIIKSGYPKLLL-T-QY-P-L-SQG--STPIVKPL-IRRD-QGKFW-A-WAQMWWFREPT-NIPTA-D-Y-CHSW--WQ--SR-ADLQ-NDRDMGP-EADASFYVEFWYWVRCAARTYGQQLGIIWNNRLKTRNPCPYSADGIQNKENYVFWWKNMCTKSHIAFYYCLQNVAHYTHDVTAEFNPVKCIDWLQGHMVLSSWFKYNTECKKLFVHEKCECYRM----FCGV---VEDIFGVRFH--TGWKHLSTAKPVPHVCVYNPSVQERRNINDFYIF-YEIAPAVKNLVLSAQPLHDYTKKCAFNYTPITITRIISTRNQIIW-AHVVIACQFYSPHQMLLIELAMDKYCADMNVRRSTEGHQPMHACRSTFGPGMAAKEPLVTFTLVAFWQWPNHEFQYVYMYTED-KIIQIG-PHLSN-GCEMVEYCVDC-YAK-RPCYRAYSAEAQYWRMITEAEDYSYKTRNAIAATATVRGQ-YCHPFRWLGIVWM-AHHDC-FFANECGTICI-PQMAEMRPPETTPYEI--DIIFMMF-WKE--HMSTTIL-DVVGMYRP-ATFSHWHDAHH-QCEPYLTPL-MCQSKLVFDAAFT--QVG-VKGVW-YHTEKLELMAGFNHM-K-FKKEEAQ---QSCFYWFQDCPDYDPPDAVRKTDEKHIRAHGEIWWLMRYYCMYHILHI-ASRHEWMHLRWDQACTNPGY--ELFE-F
```
```
# Expected Number of High-Scoring Alignments
from math import exp, log
K = 0.050
lamda = 0.25
S = 35
m = 250
n = 1000000000
E = K*m*n*exp(-lamda*S)
print(f"E value associated with S>={S} =",E)
# The Number of high-scoring alignments is poisson distributed with expected value E.
# probability of finding 0 alignments with score >= S is e^-E
p_no_alignments = exp(-E)
print(f"probability of no alignments with score >= {S} is",p_no_alignments)
print("probability of finding at least one alignment (p-value) is",1-p_no_alignments)
# Normalized Scores
s_prime = (lamda*S - log(K)) / log(2)
print("Normalized score S' =",s_prime)
N = n*m
E = N/2**s_prime
print("E-value",E)
```
# Question 3
Assume the background frequencies for all four nucleotides are equal, and consider the DNA substitution matrix which give all matches a score +1 and all mismatches a score -1.
What is the expected score for this matri?Is this a valid matrix for local alignment? Why?
```
from genomics import *
X=add_codon(10)
Y=add_codon(1000)
X, Y = "MEANLY", "PENALTY"
similarity = local_sequence_similarity()
mat,maxval,index = similarity(X, Y, s=score_function("PAM250", C=5))
# printmat(mat, X, Y)
print("Value is",maxval,"at",index)
# printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)
print(X)
print(Y)
print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])
print(Y)
print(mat)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Bertha-ding/20MA573-yuning-ding/blob/master/hw/hw3-3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
- Prove the following facts: Supose $f$ is a function satisfying
- $f(0) = f_{min},$ and $\lim_{x\to \infty}f(x) = f_{max}$
- $f$ is continuous
- $f$ is strictly increasing
then, for any $p\in (f_{min}, f_{max})$,
- there exists unique $\hat \sigma$, such that $f(\hat \sigma) = p$ and
$$\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p|.$$
- Now we denote by $f(\sigma)$ the BSM put price with the following parameters:
- vol_ratio = $\sigma$; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
Answer the following questions:
- What is $f_{min}$ and $f_{max}$?
- Is $f$ strictly increasing on $(0,\infty)$? Justify your answer.
- If the market put price is $10$, then what's the implied volatility?
- Find its implied volatility with the following parameters:
- BSM call price is 10.; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
```
import numpy as np
import scipy.stats as ss
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call'
# -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price #this will be used for calibration
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
Gbm.bsm_price = bsm_price
gbm1 = Gbm(
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2)
option1 = VanillaOption(
otype = -1,
strike = 110.,
maturity = 1.
)
def error_function(vol, gbm, option):
gbm.vol_ratio = vol
return abs(option.market_price - gbm.bsm_price(option))
import scipy.optimize as so
def implied_volatility(gbm, option):
init_vol = .1 #initial guess
return so.fmin(error_function, init_vol,
args = (gbm, option), disp = 0)[0]
option1.market_price = 10
print('>>>>>>>>implied volatility is ' +
str(implied_volatility(gbm1, option1)))
```
Find its implied volatility of call option with same parameters.
```
gbm1 = Gbm(
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2)
option1 = VanillaOption(
otype = 1,
strike = 110.,
maturity = 1.
)
print('>>>>>>>>implied volatility is ' +
str(implied_volatility(gbm1, option1)))
```
The first proof and answers of questions are attached in hw3-4.
| github_jupyter |
# Chapter 2: Conditional probability
----
```
import numpy as np
```
## Simulating the frequentist interpretation
Recall that the frequentist interpretation of conditional probability based on a large number `n` of repetitions of an experiment is $P(A|B) โ n_{AB}/n_{B}$, where $n_{AB}$ is the number of times that $A \cap B$ occurs and $n_{B}$ is the number of times that $B$ occurs. Let's try this out by simulation, and verify the results of Example 2.2.5. So let's use [`numpy.random.choice`](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.choice.html) to simulate `n` families, each with two children.
```
np.random.seed(34)
n = 10**5
child1 = np.random.choice([1,2], n, replace=True)
child2 = np.random.choice([1,2], n, replace=True)
print('child1:\n{}\n'.format(child1))
print('child2:\n{}\n'.format(child2))
```
Here `child1` is a NumPy `array` of length `n`, where each element is a 1 or a 2. Letting 1 stand for "girl" and 2 stand for "boy", this `array` represents the gender of the elder child in each of the `n` families. Similarly, `child2` represents the gender of the younger child in each family.
Alternatively, we could have used
```
np.random.choice(["girl", "boy"], n, replace=True)
```
but it is more convenient working with numerical values.
Let $A$ be the event that both children are girls and $B$ the event that the elder is a girl. Following the frequentist interpretation, we count the number of repetitions where $B$ occurred and name it `n_b`, and we also count the number of repetitions where $A \cap B$ occurred and name it `n_ab`. Finally, we divide `n_ab` by ` n_b` to approximate $P(A|B)$.
```
n_b = np.sum(child1==1)
n_ab = np.sum((child1==1) & (child2==1))
print('P(both girls | elder is girl) = {:0.2F}'.format(n_ab / n_b))
```
The ampersand `&` is an elementwise $AND$, so `n_ab` is the number of families where both the first child and the second child are girls. When we ran this code, we got 0.50, confirming our answer $P(\text{both girls | elder is a girl}) = 1/2$.
Now let $A$ be the event that both children are girls and $B$ the event that at least one of the children is a girl. Then $A \cap B$ is the same, but `n_b` needs to count the number of families where at least one child is a girl. This is accomplished with the elementwise $OR$ operator `|` (this is not a conditioning bar; it is an inclusive $OR$, returning `True` if at least one element is `True`).
```
n_b = np.sum((child1==1) | (child2==2))
n_ab = np.sum((child1==1) & (child2==1))
print('P(both girls | at least one girl) = {:0.2F}'.format(n_ab / n_b))
```
For us, the result was 0.33, confirming that $P(\text{both girls | at least one girl}) = 1/3$.
## Monty Hall simulation
Many long, bitter debates about the Monty Hall problem could have been averted by trying it out with a simulation. To study how well the never-switch strategy performs, let's generate 10<sup>5</sup> runs of the Monty Hall game. To simplify notation, assume the contestant always chooses door 1. Then we can generate a vector specifying which door has the car for each repetition:
```
np.random.seed(55)
n = 10**5
cardoor = np.random.choice([1,2,3] , n, replace=True)
print('The never-switch strategy has success rate {:.3F}'.format(np.sum(cardoor==1) / n))
```
At this point we could generate the vector specifying which doors Monty opens, but that's unnecessary since the never-switch strategy succeeds if and only if door 1 has the car! So the fraction of times when the never-switch strategy succeeds is `numpy.sum(cardoor==1)/n`, which was 0.331in our simulation. This is very close to 1/3.
What if we want to play the Monty Hall game interactively? We can do this by programming a Python class that would let us play interactively or let us run a simulation across many trials.
```
class Monty():
def __init__(self):
""" Object creation function. """
self.state = 0
self.doors = np.array([1, 2, 3])
self.prepare_game()
def get_success_rate(self):
""" Return the rate of success in this series of plays: num. wins / num. plays. """
if self.num_plays > 0:
return 1.0*self.num_wins / self.num_plays
else:
return 0.0
def prepare_game(self):
""" Prepare initial values for game play, and randonly choose the door with the car. """
self.num_plays = 0
self.num_wins = 0
self.cardoor = np.random.choice(self.doors)
self.players_choice = None
self.montys_choice = None
def choose_door(self, door):
""" Player chooses a door at state 0. Monty will choose a remaining door to reveal a goat. """
self.state = 1
self.players_choice = door
self.montys_choice = np.random.choice(self.doors[(self.doors!=self.players_choice) & (self.doors!=self.cardoor)])
def switch_door(self, do_switch):
""" Player has the option to switch from the door she has chosen to the remaining unopened door.
If the door the player has selected is the same as the cardoor, then num. of wins is incremented.
Finally, number of plays will be incremented.
"""
self.state = 2
if do_switch:
self.players_choice = self.doors[(self.doors!=self.players_choice) & (self.doors!=self.montys_choice)][0]
if self.players_choice == self.cardoor:
self.num_wins += 1
self.num_plays += 1
def continue_play(self):
""" Player opts to continue playing in this series.
The game is returned to state 0, but the counters for num. wins and num. plays
will be kept intact and running.
A new cardoor is randomly chosen.
"""
self.state = 0
self.cardoor = np.random.choice(self.doors)
self.players_choice = None
self.montys_choice = None
def reset(self):
""" The entire game state is returned to its initial state.
All counters and variable holdling state are re-initialized.
"""
self.state = 0
self.prepare_game()
```
In brief:
* The `Monty` class represents a simple state model for the game.
* When an instance of the `Monty` game is created, game state-holding variables are initialized and a `cardoor` randomly chosen.
* After the player initially picks a door, `Monty` will choose a remaining door that does not have car behind it.
* The player can then choose to switch to the other, remaining unopened door, or stick with her initial choice.
* `Monty` will then see if the player wins or not, and updates the state-holding variables for num. wins and num. plays.
* The player can continue playing, or stop and reset the game to its original state.
### As a short simulation program
Here is an example showing how to use the `Monty` class above to run a simulation to see how often the switching strategy succeeds.
```
np.random.seed(89)
trials = 10**5
game = Monty()
for _ in range(trials):
game.choose_door(np.random.choice([1,2,3]))
game.switch_door(True)
game.continue_play()
print('In {} trials, the switching strategy won {} times.'.format(game.num_plays, game.num_wins))
print('Success rate is {:.3f}'.format(game.get_success_rate()))
```
### As an interactive widget in this Jupyter notebook
Optionally, the `Monty` Python class above can also be used as an engine to power an interactive widget that lets you play the three-door game _in the browser_ using [`ipywidgets` ](https://ipywidgets.readthedocs.io/en/stable/user_guide.html).
To run the interactive widget, make sure you have the `ipywidgets` package installed (v7.4.2 or greater).
To install with the `conda` package manager, execute the following command:
conda install ipywidgets
To install with the `pip` package manager, execute the following command:
pip install ipywidgets
```
from ipywidgets import Box, Button, ButtonStyle, FloatText, GridBox, IntText, Label, Layout, HBox
from IPython.display import display
```
The doors in the game are represented by [`ipywidgets.Button`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#Button).
```
door1 = Button(description='Door 1', layout=Layout(flex='1 1 auto', width='auto'))
door2 = Button(description='Door 2', layout=door1.layout)
door3 = Button(description='Door 3', layout=door1.layout)
doors_arr = [door1, door2, door3]
doors = Box(doors_arr, layout=Layout(width='auto', grid_area='doors'))
```
State-holding variables in the `Monty` object are displayed using [`ipywidgets.IntText`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#IntText) (for the `num_wins` and `num_plays`); and [`ipywidgets.FloatText`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#FloatText) (for the success rate).
```
label1 = Label(value='number of plays', layout=Layout(width='auto', grid_area='label1'))
text1 = IntText(disabled=True, layout=Layout(width='auto', grid_area='text1'))
label2 = Label(value='number of wins', layout=Layout(width='auto', grid_area='label2'))
text2 = IntText(disabled=True, layout=Layout(width='auto', grid_area='text2'))
label3 = Label(value='success rate', layout=Layout(width='auto', grid_area='label3'))
text3 = FloatText(disabled=True, layout=Layout(width='auto', grid_area='text3'))
```
[`ipywidgets.Label`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#Label) is used to display the title and descriptive text in the game widget.
```
banner = Box([Label(value='Interactive widget: Monty Hall problem',
layout=Layout(width='50%'))],
layout=Layout(width='auto', justify_content='center', grid_area='banner'))
status = Label(value='Pick a door...', layout=Layout(width='auto', grid_area='status'))
```
Buttons allowing for further user actions are located at the bottom of the widget.
* The `reveal` button is used to show what's behind all of the doors after the player makes her final choice.
* After the player completes a round of play, she can click the `continue` button to keep counting game state (num. wins and num. plays)
* The `reset` button lets the player return the game to its original state after completing a round of play.
```
button_layout = Layout(flex='1 1 auto', width='auto')
reveal = Button(description='reveal', tooltip='open selected door', layout=button_layout, disabled=True)
contin = Button(description='continue', tooltip='continue play', layout=button_layout, disabled=True)
reset = Button(description='reset', tooltip='reset game', layout=button_layout, disabled=True)
actions = Box([reveal, contin, reset], layout=Layout(width='auto', grid_area='actions'))
```
[`ipywidgets.GridBox`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Styling.html#The-Grid-layout) helps us lay out the user interface elements for the `Monty` game widget.
```
ui = GridBox(children=[banner, doors, label1, text1, label2, text2, label3, text3, status, actions],
layout=Layout(
width='50%',
grid_template_rows='auto auto auto auto auto auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"banner banner banner banner"
"doors doors doors doors"
"label1 label1 text1 text1"
"label2 label2 text2 text2"
"label3 label3 text3 text3"
"status status status status"
". . actions actions"
'''
)
)
```
We lastly create some functions to connect the widget to the `Monty` game object. These functions adapt player action [events](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Events.html#Example) to state changes in the `Monty` object, and then update the widget user interface accordingly.
```
uigame = Monty()
def reset_ui(disable_reset=True):
""" Return widget elements to their initial state.
Do not disable the reset button in the case of continue.
"""
for i,d in enumerate(doors_arr):
d.description = 'Door {}'.format(i+1)
d.disabled = False
d.icon = ''
d.button_style = ''
reveal.disabled = True
contin.disabled = True
reset.disabled = disable_reset
def update_status(new_status):
""" Update the widget text fields for displaying present game status. """
text1.value = uigame.num_plays
text2.value = uigame.num_wins
text3.value = uigame.get_success_rate()
status.value = new_status
def update_ui_reveal():
""" Helper function to update the widget after the player clicks the reveal button. """
if uigame.players_choice == uigame.cardoor:
new_status = 'You win! Continue playing?'
else:
new_status = 'Sorry, you lose. Continue playing?'
for i,d in enumerate(doors_arr):
d.disabled = True
if uigame.cardoor == i+1:
d.description = 'car'
else:
d.description = 'goat'
if uigame.players_choice == i+1:
if uigame.players_choice == uigame.cardoor:
d.button_style = 'success'
d.icon = 'check'
else:
d.button_style = 'danger'
d.icon = 'times'
update_status(new_status)
reveal.disabled = True
contin.disabled = False
reset.disabled = False
def on_button_clicked(b):
""" Event-handling function that maps button click events in the widget
to corresponding functions in Monty, and updates the user interface
according to the present game state.
"""
if uigame.state == 0:
if b.description in ['Door 1', 'Door 2', 'Door 3']:
c = int(b.description.split()[1])
uigame.choose_door(c)
b.disabled = True
b.button_style = 'info'
m = doors_arr[uigame.montys_choice-1]
m.disabled = True
m.description = 'goat'
unopened = uigame.doors[(uigame.doors != uigame.players_choice) &
(uigame.doors != uigame.montys_choice)][0]
status.value = 'Monty reveals a goat behind Door {}. Click Door {} to switch, or \'reveal\' Door {}.' \
.format(uigame.montys_choice, unopened, uigame.players_choice)
reveal.disabled = False
reset.disabled = False
elif b.description == 'reset':
uigame.reset()
reset_ui()
update_status('Pick a door...')
elif uigame.state == 1:
if b.description in ['Door 1', 'Door 2', 'Door 3']:
prev_choice = uigame.players_choice
uigame.switch_door(True)
pb = doors_arr[prev_choice-1]
pb.icon = ''
pb.button_style = ''
b.disabled = True
b.button_style = 'info'
status.value = 'Now click \'reveal\' to see what\'s behind Door {}.'.format(uigame.players_choice)
elif b.description == 'reset':
uigame.reset()
reset_ui()
update_status('Pick a door...')
elif b.description == 'reveal':
uigame.switch_door(False)
update_ui_reveal()
elif uigame.state == 2:
if b.description == 'reveal':
update_ui_reveal()
else:
if b.description == 'continue':
uigame.continue_play()
reset_ui(False)
update_status('Pick a door once more...')
elif b.description == 'reset':
uigame.reset()
reset_ui()
update_status('Pick a door...')
# hook up all buttons to our event-handling function
door1.on_click(on_button_clicked)
door2.on_click(on_button_clicked)
door3.on_click(on_button_clicked)
reveal.on_click(on_button_clicked)
contin.on_click(on_button_clicked)
reset.on_click(on_button_clicked)
display(ui)
```
How to play:
* Click a door to select.
* Monty will select a remaining door and open to reveal a goat.
* Click the `reveal` button to open your selected door.
* Or click the remaining unopened Door button to switch your door choice, and then click `reveal`.
* Click the `continue` button to keep playing.
* You may click the `reset` button at any time to return the game back to its initial state.
| github_jupyter |
# Building your Deep Neural Network: Step by Step
Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!
- In this notebook, you will implement all the functions required to build a deep neural network.
- In the next assignment, you will use these functions to build a deep neural network for image classification.
**After this assignment you will be able to:**
- Use non-linear units like ReLU to improve your model
- Build a deeper neural network (with more than 1 hidden layer)
- Implement an easy-to-use neural network class
**Notation**:
- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer.
- Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.
- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example.
- Example: $x^{(i)}$ is the $i^{th}$ training example.
- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
- Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).
Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the main package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- dnn_utils provides some necessary functions for this notebook.
- testCases provides some test cases to assess the correctness of your functions
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.
```
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases import *
from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Outline of the Assignment
To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:
- Initialize the parameters for a two-layer network and for an $L$-layer neural network.
- Implement the forward propagation module (shown in purple in the figure below).
- Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).
- We give you the ACTIVATION function (relu/sigmoid).
- Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.
- Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.
- Compute the loss.
- Implement the backward propagation module (denoted in red in the figure below).
- Complete the LINEAR part of a layer's backward propagation step.
- We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward)
- Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.
- Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function
- Finally update the parameters.
<img src="images/final outline.png" style="width:800px;height:500px;">
<caption><center> **Figure 1**</center></caption><br>
**Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps.
## 3 - Initialization
You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.
### 3.1 - 2-layer Neural Network
**Exercise**: Create and initialize the parameters of the 2-layer neural network.
**Instructions**:
- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*.
- Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.
- Use zero initialization for the biases. Use `np.zeros(shape)`.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (โ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros(shape=(n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros(shape=(n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(2,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td> [[ 0.01624345 -0.00611756]
[-0.00528172 -0.01072969]] </td>
</tr>
<tr>
<td> **b1**</td>
<td>[[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2**</td>
<td> [[ 0.00865408 -0.02301539]]</td>
</tr>
<tr>
<td> **b2** </td>
<td> [[ 0.]] </td>
</tr>
</table>
### 3.2 - L-layer Neural Network
The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:
<table style="width:100%">
<tr>
<td> </td>
<td> **Shape of W** </td>
<td> **Shape of b** </td>
<td> **Activation** </td>
<td> **Shape of Activation** </td>
<tr>
<tr>
<td> **Layer 1** </td>
<td> $(n^{[1]},12288)$ </td>
<td> $(n^{[1]},1)$ </td>
<td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td>
<td> $(n^{[1]},209)$ </td>
<tr>
<tr>
<td> **Layer 2** </td>
<td> $(n^{[2]}, n^{[1]})$ </td>
<td> $(n^{[2]},1)$ </td>
<td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td>
<td> $(n^{[2]}, 209)$ </td>
<tr>
<tr>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$</td>
<td> $\vdots$ </td>
<tr>
<tr>
<td> **Layer L-1** </td>
<td> $(n^{[L-1]}, n^{[L-2]})$ </td>
<td> $(n^{[L-1]}, 1)$ </td>
<td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td>
<td> $(n^{[L-1]}, 209)$ </td>
<tr>
<tr>
<td> **Layer L** </td>
<td> $(n^{[L]}, n^{[L-1]})$ </td>
<td> $(n^{[L]}, 1)$ </td>
<td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>
<td> $(n^{[L]}, 209)$ </td>
<tr>
</table>
Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if:
$$ W = \begin{bmatrix}
j & k & l\\
m & n & o \\
p & q & r
\end{bmatrix}\;\;\; X = \begin{bmatrix}
a & b & c\\
d & e & f \\
g & h & i
\end{bmatrix} \;\;\; b =\begin{bmatrix}
s \\
t \\
u
\end{bmatrix}\tag{2}$$
Then $WX + b$ will be:
$$ WX + b = \begin{bmatrix}
(ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\
(ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\
(pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u
\end{bmatrix}\tag{3} $$
**Exercise**: Implement initialization for an L-layer Neural Network.
**Instructions**:
- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.
- Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`.
- Use zeros initialization for the biases. Use `np.zeros(shape)`.
- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers!
- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).
```python
if L == 1:
parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01
parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))
```
```
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (โ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
[-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
[-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
[-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td>
</tr>
<tr>
<td>**b1** </td>
<td>[[ 0.]
[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2** </td>
<td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]
[-0.01023785 -0.00712993 0.00625245 -0.00160513]
[-0.00768836 -0.00230031 0.00745056 0.01976111]]</td>
</tr>
<tr>
<td>**b2** </td>
<td>[[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
</table>
## 4 - Forward propagation module
### 4.1 - Linear Forward
Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:
- LINEAR
- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid.
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)
The linear forward module (vectorized over all the examples) computes the following equations:
$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$
where $A^{[0]} = X$.
**Exercise**: Build the linear part of forward propagation.
**Reminder**:
The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.
```
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (โ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **Z** </td>
<td> [[ 3.1980455 7.85763489]] </td>
</tr>
</table>
### 4.2 - Linear-Activation Forward
In this notebook, you will use two activation functions:
- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = sigmoid(Z)
```
- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = relu(Z)
```
For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.
**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.
```
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (โ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (โ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **With sigmoid: A ** </td>
<td > [[ 0.96076066 0.99961336]]</td>
</tr>
<tr>
<td> **With ReLU: A ** </td>
<td > [[ 3.1980455 7.85763489]]</td>
</tr>
</table>
**Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers.
### d) L-Layer Model
For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.
<img src="images/model_architecture_kiank.png" style="width:600px;height:300px;">
<caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>
**Exercise**: Implement the forward propagation of the above model.
**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.)
**Tips**:
- Use the functions you had previously written
- Use a for loop to replicate [LINEAR->RELU] (L-1) times
- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`.
```
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (โ 2 lines of code)
A, cache = linear_activation_forward(A_prev,
parameters['W' + str(l)],
parameters['b' + str(l)],
activation='relu')
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (โ 2 lines of code)
AL, cache = linear_activation_forward(A,
parameters['W' + str(L)],
parameters['b' + str(L)],
activation='sigmoid')
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1, X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
```
<table style="width:40%">
<tr>
<td> **AL** </td>
<td > [[ 0.0844367 0.92356858]]</td>
</tr>
<tr>
<td> **Length of caches list ** </td>
<td > 2</td>
</tr>
</table>
Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions.
## 5 - Cost function
Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.
**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right))ย \tag{7}$$
```
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (โ 1 lines of code)
cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
```
**Expected Output**:
<table>
<tr>
<td>**cost** </td>
<td> 0.41493159961539694</td>
</tr>
</table>
## 6 - Backward propagation module
Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters.
**Reminder**:
<img src="images/backprop_kiank.png" style="width:650px;height:250px;">
<caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>
<!--
For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:
$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$
In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.
Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.
This is why we talk about **backpropagation**.
!-->
Now, similar to forward propagation, you are going to build the backward propagation in three steps:
- LINEAR backward
- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)
### 6.1 - Linear backward
For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).
Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$.
<img src="images/linearback_kiank.png" style="width:250px;height:300px;">
<caption><center> **Figure 4** </center></caption>
The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:
$$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$
$$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$
$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$
**Exercise**: Use the 3 formulas above to implement linear_backward().
```
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (โ 3 lines of code)
dW = np.dot(dZ, cache[0].T) / m
db = np.squeeze(np.sum(dZ, axis=1, keepdims=True)) / m
dA_prev = np.dot(cache[1].T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (isinstance(db, float))
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
**Expected Output**:
<table style="width:90%">
<tr>
<td> **dA_prev** </td>
<td > [[ 2.38272385 5.85438014]
[ 6.31969219 15.52755701]
[ -3.97876302 -9.77586689]] </td>
</tr>
<tr>
<td> **dW** </td>
<td > [[ 2.77870358 -0.05500058 -5.13144969]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 5.527840195 </td>
</tr>
</table>
### 6.2 - Linear-Activation backward
Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**.
To help you implement `linear_activation_backward`, we provided two backward functions:
- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:
```python
dZ = sigmoid_backward(dA, activation_cache)
```
- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:
```python
dZ = relu_backward(dA, activation_cache)
```
If $g(.)$ is the activation function,
`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$.
**Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.
```
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (โ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (โ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
### END CODE HERE ###
# Shorten the code
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
**Expected output with sigmoid:**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td >[[ 0.08982777 0.00226265]
[ 0.23824996 0.00600122]
[-0.14999783 -0.00377826]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[-0.06001514 -0.09687383 -0.10598695]] </td>
</tr>
<tr>
<td > db </td>
<td > 0.061800984273 </td>
</tr>
</table>
**Expected output with relu**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td > [[ 2.38272385 5.85438014]
[ 6.31969219 15.52755701]
[ -3.97876302 -9.77586689]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[ 2.77870358 -0.05500058 -5.13144969]] </td>
</tr>
<tr>
<td > db </td>
<td > 5.527840195 </td>
</tr>
</table>
### 6.3 - L-Model Backward
Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass.
<img src="images/mn_backward.png" style="width:450px;height:300px;">
<caption><center> **Figure 5** : Backward pass </center></caption>
** Initializing backpropagation**:
To backpropagate through this network, we know that the output is,
$A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.
To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):
```python
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
```
You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula :
$$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$
For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.
**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model.
```
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_backward(sigmoid_backward(dAL,
current_cache[1]),
current_cache[0])
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, caches[1]), caches[0])
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
X_assess, Y_assess, AL, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dA1 = "+ str(grads["dA1"]))
```
**Expected Output**
<table style="width:60%">
<tr>
<td > dW1 </td>
<td > [[-0.09686122 -0.04840482 -0.11864308]] </td>
</tr>
<tr>
<td > db1 </td>
<td > -0.262594998379 </td>
</tr>
<tr>
<td > dA1 </td>
<td > [[-0.71011462 -0.22925516]
[-0.17330152 -0.05594909]
[-0.03831107 -0.01236844]] </td>
</tr>
</table>
### 6.4 - Update Parameters
In this section you will update the parameters of the model, using gradient descent:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$
where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary.
**Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.
**Instructions**:
Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.
```
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (โ 3 lines of code)
for l in range(L):
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = " + str(parameters["W1"]))
print ("b1 = " + str(parameters["b1"]))
print ("W2 = " + str(parameters["W2"]))
print ("b2 = " + str(parameters["b2"]))
print ("W3 = " + str(parameters["W3"]))
print ("b3 = " + str(parameters["b3"]))
```
**Expected Output**:
<table style="width:100%">
<tr>
<td > W1 </td>
<td > [[ 1.72555789 0.3700272 0.07818896]
[-1.8634927 -0.2773882 -0.35475898]
[-0.08274148 -0.62700068 -0.04381817]
[-0.47721803 -1.31386475 0.88462238]] </td>
</tr>
<tr>
<td > b1 </td>
<td > [[-0.07593768]
[-0.07593768]
[-0.07593768]
[-0.07593768]] </td>
</tr>
<tr>
<td > W2 </td>
<td > [[ 0.71838378 1.70957306 0.05003364 -0.40467741]
[-0.54535995 -1.54647732 0.98236743 -1.10106763]
[-1.18504653 -0.2056499 1.48614836 0.23671627]] </td>
</tr>
<tr>
<td > b2 </td>
<td > [[-0.08616376]
[-0.08616376]
[-0.08616376]] </td>
</tr>
<tr>
<td > W3 </td>
<td > [[-0.88352436 -0.7129932 0.62524497]
[-0.02025258 -0.76883635 -0.23003072]] </td>
</tr>
<tr>
<td > b3 </td>
<td > [[ 0.08416196]
[ 0.08416196]] </td>
</tr>
</table>
## 7 - Conclusion
Congrats on implementing all the functions required for building a deep neural network!
We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier.
In the next assignment you will put all these together to build two models:
- A two-layer neural network
- An L-layer neural network
You will in fact use these models to classify cat vs non-cat images!
| github_jupyter |
# Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker
### An overview of Docker
If you're familiar with Docker already, you can skip ahead to the next section.
For many data scientists, Docker containers are a new technology. But they are not difficult and can significantly simplify the deployment of your software packages.
Docker provides a simple way to package arbitrary code into an _image_ that is totally self-contained. Once you have an image, you can use Docker to run a _container_ based on that image. Running a container is just like running a program on the machine except that the container creates a fully self-contained environment for the program to run. Containers are isolated from each other and from the host environment, so the way your program is set up is the way it runs, no matter where you run it.
Docker is more powerful than environment managers like conda or virtualenv because (a) it is completely language independent and (b) it comprises your whole operating environment, including startup commands, and environment variable.
A Docker container is like a virtual machine, but it is much lighter weight. For example, a program running in a container can start in less than a second and many containers can run simultaneously on the same physical or virtual machine instance.
Docker uses a simple file called a `Dockerfile` to specify how the image is assembled. An example is provided below. You can build your Docker images based on Docker images built by yourself or by others, which can simplify things quite a bit.
Docker has become very popular in programming and devops communities due to its flexibility and its well-defined specification of how code can be run in its containers. It is the underpinning of many services built in the past few years, such as [Amazon ECS].
Amazon SageMaker uses Docker to allow users to train and deploy arbitrary algorithms.
In Amazon SageMaker, Docker containers are invoked in a one way for training and another, slightly different, way for hosting. The following sections outline how to build containers for the SageMaker environment.
Some helpful links:
* [Docker home page](http://www.docker.com)
* [Getting started with Docker](https://docs.docker.com/get-started/)
* [Dockerfile reference](https://docs.docker.com/engine/reference/builder/)
* [`docker run` reference](https://docs.docker.com/engine/reference/run/)
[Amazon ECS]: https://aws.amazon.com/ecs/
### How Amazon SageMaker runs your Docker container
Because you can run the same image in training or hosting, Amazon SageMaker runs your container with the argument `train` or `serve`. How your container processes this argument depends on the container. All SageMaker deep learning framework containers already cover this requirement and will trigger your defined training algorithm and inference code.
* If you specify a program as an `ENTRYPOINT` in the Dockerfile, that program will be run at startup and its first argument will be `train` or `serve`. The program can then look at that argument and decide what to do. The original `ENTRYPOINT` specified within the SageMaker PyTorch is [here](https://github.com/aws/sagemaker-pytorch-container/blob/master/docker/0.4.0/final/Dockerfile.cpu#L18).
#### Running your container during training
Currently, our SageMaker PyTorch container utilizes [console_scripts](http://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point) to make use of the `train` command issued at training time. The line that gets invoked during `train` is defined within the setup.py file inside [SageMaker Containers](https://github.com/aws/sagemaker-containers/blob/master/setup.py#L48), our common SageMaker deep learning container framework. When this command is run, it will invoke the [trainer class](https://github.com/aws/sagemaker-containers/blob/master/src/sagemaker_containers/cli/train.py) to run, which will finally invoke our [PyTorch container code](https://github.com/aws/sagemaker-pytorch-container/blob/master/src/sagemaker_pytorch_container/training.py) to run your Python file.
A number of files are laid out for your use, under the `/opt/ml` directory:
/opt/ml
|-- input
| |-- config
| | |-- hyperparameters.json
| | `-- resourceConfig.json
| `-- data
| `-- <channel_name>
| `-- <input data>
|-- model
| `-- <model files>
`-- output
`-- failure
##### The input
* `/opt/ml/input/config` contains information to control how your program runs. `hyperparameters.json` is a JSON-formatted dictionary of hyperparameter names to values. These values are always strings, so you may need to convert them. `resourceConfig.json` is a JSON-formatted file that describes the network layout used for distributed training.
* `/opt/ml/input/data/<channel_name>/` (for File mode) contains the input data for that channel. The channels are created based on the call to CreateTrainingJob but it's generally important that channels match algorithm expectations. The files for each channel are copied from S3 to this directory, preserving the tree structure indicated by the S3 key structure.
* `/opt/ml/input/data/<channel_name>_<epoch_number>` (for Pipe mode) is the pipe for a given epoch. Epochs start at zero and go up by one each time you read them. There is no limit to the number of epochs that you can run, but you must close each pipe before reading the next epoch.
##### The output
* `/opt/ml/model/` is the directory where you write the model that your algorithm generates. Your model can be in any format that you want. It can be a single file or a whole directory tree. SageMaker packages any files in this directory into a compressed tar archive file. This file is made available at the S3 location returned in the `DescribeTrainingJob` result.
* `/opt/ml/output` is a directory where the algorithm can write a file `failure` that describes why the job failed. The contents of this file are returned in the `FailureReason` field of the `DescribeTrainingJob` result. For jobs that succeed, there is no reason to write this file as it is ignored.
### The parts of the sample container
The `container` directory has all the components you need to extend the SageMaker PyTorch CPU or GPU container to use as an sample algorithm.
.
|-- build_and_push.sh
|-- Dockerfile-cpu
|-- Dockerfile-gpu
|-- requirements.txt
`-- src
`-- train.py
-- [Python Modules]
Let's discuss each of these in turn:
* __`build_and_push.sh`__ is a script that uses the Dockerfile to build your container images and then pushes it to ECR. We invoke the commands directly later in this notebook, but you can just copy and run the script for your own algorithms.
* __`src`__ is the directory which contains our user code to be invoked.
* __`train.py`__ is the interface to SageMaker.
* __`Dockerfile-cpu`__ describes how to build your Docker container image. More details are provided below.
* __`Dockerfile-gpu`__ builds the GPU image.
### The Dockerfile
The Dockerfile describes the image that we want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, however, because it takes advantage of Linux on the host machine for the basic operations.
We start from the SageMaker PyTorch image as the base. The base image is an ECR image, so it will have the following pattern.
* {account}.dkr.ecr.{region}.amazonaws.com/sagemaker-{framework}:{framework_version}-{processor_type}-{python_version}
Here is an explanation of each field.
1. account - AWS account ID the ECR image belongs to. Our public deep learning framework images are all under the 520713654638 account.
2. region - The region the ECR image belongs to. [Available regions](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/).
3. framework - The deep learning framework.
4. framework_version - The version of the deep learning framework.
5. processor_type - CPU or GPU.
6. python_version - The supported version of Python.
So the SageMaker PyTorch ECR image would be:
520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-pytorch:0.4.0-cpu-py3
Information on supported frameworks and versions can be found in this [README](https://github.com/aws/sagemaker-python-sdk).
Next, we add the code that implements our specific algorithm to the container and set up the right environment for it to run under.
Finally, we need to specify two environment variables.
1. SAGEMAKER_SUBMIT_DIRECTORY - the directory within the container containing our Python script for training and inference.
2. SAGEMAKER_PROGRAM - the Python script that should be invoked for training and inference.
Let's look at the Dockerfile for this example.
### Building and registering the container
The `build-and-push.sh` builds the container image using `docker build` and push the container image to ECR using `docker push`.
If the `gpu` argument is passed to `build-and-push.sh` the GPU Docker file is used to create the GPU instance. Otherwise the CPU instance is created.
This code looks for an ECR repository in the account you're using and the current default region (if you're using a SageMaker notebook instance, this is the region where the notebook instance was created). If the repository doesn't exist, the script will create it. In addition, since we are using the SageMaker PyTorch image as the base, we will need to retrieve ECR credentials to pull this public image.
```
!./container/build_and_push.sh
```
## Testing your algorithm on your local machine
When you're packaging your first algorithm to use with Amazon SageMaker, you probably want to test it yourself to make sure it's working correctly. We use the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to test both locally and on SageMaker. For more examples with the SageMaker Python SDK, see [Amazon SageMaker Examples](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk). In order to test our algorithm, we need our dataset.
## SageMaker Python SDK Local Training
To represent our training, we use the Estimator class, which needs to be configured in five steps.
1. IAM role - our AWS execution role
2. train_instance_count - number of instances to use for training.
3. train_instance_type - type of instance to use for training. For training locally, we specify `local`.
4. image_name - our custom PyTorch Docker image we created.
5. hyperparameters - hyperparameters we want to pass.
Let's start with setting up our IAM role. We make use of a helper function within the Python SDK. This function throw an exception if run outside of a SageMaker notebook instance, as it gets metadata from the notebook instance.
### Setup Notebook for local execution
```
!/bin/bash ./utils/setup.sh
```
### Training the Reinforcement Learning Model Locally
Note we are only training for 200 iterations, which is too few to see any increase in the average score. We are a purely checking for mechanical errors.
```
from sagemaker.estimator import Estimator
from sagemaker import get_execution_role
role = get_execution_role()
estimator = Estimator(role=role,
train_instance_count=1,
train_instance_type='local',
image_name='sagemaker-tennis-cpu:latest',
hyperparameters={'epochs': 200})
estimator.fit()
```
## Training on SageMaker
Training a model on SageMaker with the Python SDK is done in a way that is similar to the way we trained it locally. This is done by changing our train_instance_type from `local` to one of the [supported EC2 instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/).
### Locate the ECR image just built and pushed
```
import boto3
client = boto3.client('sts')
account = client.get_caller_identity()['Account']
region = boto3.Session().region_name
ecr_image = '{}.dkr.ecr.{}.amazonaws.com/sagemaker-tennis-cpu:latest'.format(account, region)
print(ecr_image)
```
### Submit the training job
```
from sagemaker.estimator import Estimator
estimator = Estimator(role=role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
image_name=ecr_image,
hyperparameters={'epochs': 200})
estimator.fit()
```
### Get the results
#### Get the bucket name
```
from sagemaker.session import Session
sagemaker_session = Session()
bucket = sagemaker_session.default_bucket()
job_name = estimator._current_job_name
print(bucket)
print(job_name)
```
#### Copy and unpack the result archive
```
import shutil
s3 = boto3.resource('s3')
key = '{}/output/output.tar.gz'.format(estimator._current_job_name)
print(key)
s3.Bucket(bucket).download_file(key, 'output.tar.gz')
shutil.unpack_archive('output.tar.gz')
from IPython.display import Image
Image(filename='scores.png')
```
As expected the above image isn't very interesting since we only ran for 200 iterations. Here is the same result when we ran for 2000.

# Reference
- [SageMaker Example: Extending PyTorch Container](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/pytorch_extending_our_containers)
- [How Amazon SageMaker interacts with your Docker container for training](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html)
- [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk)
- [Dockerfile](https://docs.docker.com/engine/reference/builder/)
- [scikit-bring-your-own](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb)
- [SageMaker PyTorch container](https://github.com/aws/sagemaker-pytorch-container)
- [SageMaker Instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/)
- [SageMaker Instance prices](https://aws.amazon.com/sagemaker/pricing/)
| github_jupyter |
```
# !pip3 install bert-tensorflow --user
# !wget https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip
# !unzip multi_cased_L-12_H-768_A-12.zip
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import json
with open('selected-topics.json') as fopen:
x = json.load(fopen)
texts = x['X']
labels = x['Y']
MAX_SEQ_LENGTH = 100
BERT_VOCAB = 'multi_cased_L-12_H-768_A-12/vocab.txt'
BERT_INIT_CHKPNT = 'multi_cased_L-12_H-768_A-12/bert_model.ckpt'
BERT_CONFIG = 'multi_cased_L-12_H-768_A-12/bert_config.json'
tokenization.validate_case_matches_checkpoint(False, '')
tokenizer = tokenization.FullTokenizer(
vocab_file=BERT_VOCAB, do_lower_case=False)
tokenizer.tokenize(texts[1])
input_ids, input_masks, segment_ids = [], [], []
for text in tqdm(texts):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > MAX_SEQ_LENGTH - 2:
tokens_a = tokens_a[:(MAX_SEQ_LENGTH - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_id = [0] * len(tokens)
input_id = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_id)
padding = [0] * (MAX_SEQ_LENGTH - len(input_id))
input_id += padding
input_mask += padding
segment_id += padding
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)
epoch = 10
batch_size = 60
warmup_proportion = 0.1
num_train_steps = int(len(texts) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
class Model:
def __init__(
self,
dimension_output,
learning_rate = 2e-5,
):
self.X = tf.placeholder(tf.int32, [None, None])
self.segment_ids = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
model = modeling.BertModel(
config=bert_config,
is_training=True,
input_ids=self.X,
input_mask=self.input_masks,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
output_layer = model.get_pooled_output()
self.logits = tf.layers.dense(output_layer, dimension_output)
self.logits = tf.identity(self.logits, name = 'logits')
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y
)
)
self.optimizer = optimization.create_optimizer(self.cost, learning_rate,
num_train_steps, num_warmup_steps, False)
correct_pred = tf.equal(
tf.argmax(self.logits, 1, output_type = tf.int32), self.Y
)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
unique_labels = np.unique(labels)
dimension_output = len(unique_labels)
learning_rate = 1e-5
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate
)
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert')
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, BERT_INIT_CHKPNT)
from sklearn.model_selection import train_test_split
train_input_ids, test_input_ids, train_input_masks, test_input_masks, train_segment_ids, test_segment_ids, train_Y, test_Y = train_test_split(
input_ids, input_masks, segment_ids, labels, test_size = 0.2
)
from tqdm import tqdm
import time
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n' % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_input_ids), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_input_ids))
batch_x = train_input_ids[i: index]
batch_masks = train_input_masks[i: index]
batch_segment = train_segment_ids[i: index]
batch_y = train_Y[i: index]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_input_ids), batch_size), desc = 'test minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_masks = test_input_masks[i: index]
batch_segment = test_segment_ids[i: index]
batch_y = test_Y[i: index]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_input_ids) / batch_size
train_acc /= len(train_input_ids) / batch_size
test_loss /= len(test_input_ids) / batch_size
test_acc /= len(test_input_ids) / batch_size
if test_acc > CURRENT_ACC:
print(
'epoch: %d, pass acc: %f, current acc: %f'
% (EPOCH, CURRENT_ACC, test_acc)
)
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_input_ids), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_masks = test_input_masks[i: index]
batch_segment = test_segment_ids[i: index]
batch_y = test_Y[i: index]
predict_Y += np.argmax(sess.run(model.logits,
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
), 1, ).tolist()
real_Y += batch_y
labels = ['kesihatan',
'kes lemas',
'kes pecah rumah',
'kes tangkap basah',
'kewangan dan perniagaan',
'kos sara hidup',
'suruhanjaya pilihan raya malaysia',
'tentera malaysia',
'nilai ringgit jatuh',
'kes buang bayi',
'isu kemiskinan',
'infrastruktur',
'harga minyak']
from sklearn import metrics
print(
metrics.classification_report(
real_Y, predict_Y, target_names = labels, digits=5
)
)
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
```
# Text Classification of Movie Reviews
```
from helpers import Timer
from sklearn.datasets import load_files
reviews_train = load_files("aclImdb/train/")
text_train, y_train = reviews_train.data, reviews_train.target
print("Number of documents in training data: %d" % len(text_train))
print(np.bincount(y_train))
reviews_test = load_files("aclImdb/test/")
text_test, y_test = reviews_test.data, reviews_test.target
print("Number of documents in test data: %d" % len(text_test))
print(np.bincount(y_test))
print(text_train[1])
print(y_train[1])
```
### Bag of words reminder:
<img src="bag_of_words.svg" width=80%>
```
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
cv.fit(text_train)
len(cv.vocabulary_)
print(cv.get_feature_names()[:50])
print(cv.get_feature_names()[50000:50050])
X_train = cv.transform(text_train)
X_train
print(text_train[19726])
X_train[19726].nonzero()[1]
X_test = cv.transform(text_test)
from sklearn.svm import LinearSVC
svm = LinearSVC()
with Timer():
svm.fit(X_train, y_train)
svm.score(X_train, y_train)
svm.score(X_test, y_test)
def visualize_coefficients(classifier, feature_names, n_top_features=25):
# get coefficients with large absolute values
coef = classifier.coef_.ravel()
positive_coefficients = np.argsort(coef)[-n_top_features:]
negative_coefficients = np.argsort(coef)[:n_top_features]
interesting_coefficients = np.hstack([negative_coefficients, positive_coefficients])
# plot them
plt.figure(figsize=(15, 5))
colors = ["red" if c < 0 else "blue" for c in coef[interesting_coefficients]]
plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients], color=colors)
feature_names = np.array(feature_names)
plt.xticks(np.arange(1, 1 + 2 * n_top_features), feature_names[interesting_coefficients], rotation=60, ha="right");
visualize_coefficients(svm, cv.get_feature_names())
from sklearn.pipeline import make_pipeline
text_pipe = make_pipeline(CountVectorizer(), LinearSVC())
with Timer():
text_pipe.fit(text_train, y_train)
text_pipe.score(text_test, y_test)
from sklearn.grid_search import GridSearchCV
param_grid = {'linearsvc__C': np.logspace(-5, 0, 6)}
grid = GridSearchCV(text_pipe, param_grid, cv=5)
with Timer():
grid.fit(text_train, y_train);
from figures import plot_grid_1d
plot_grid_1d(grid)
grid.best_params_
visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],
grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())
grid.best_score_
grid.score(text_test, y_test)
```
# Text Classification continuation.
## TfidfVectorizer
```
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_pipe = make_pipeline(TfidfVectorizer(), LinearSVC())
param_grid = {'linearsvc__C': np.logspace(-3, 2, 6)}
grid = GridSearchCV(tfidf_pipe, param_grid, cv=5)
with Timer():
grid.fit(text_train, y_train)
plot_grid_1d(grid)
visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],
grid.best_estimator_.named_steps['tfidfvectorizer'].get_feature_names())
grid.best_score_
grid.score(text_test, y_test)
```
# N-Grams
```
text_pipe = make_pipeline(CountVectorizer(), LinearSVC())
param_grid = {'linearsvc__C': np.logspace(-3, 2, 6),
"countvectorizer__ngram_range": [(1, 1), (1, 2), (1, 3)]}
grid = GridSearchCV(text_pipe, param_grid, cv=5)
with Timer():
grid.fit(text_train, y_train)
scores = np.array([score.mean_validation_score for score in grid.grid_scores_]).reshape(3, -1)
plt.matshow(scores)
plt.ylabel("n-gram range")
plt.yticks(range(3), param_grid["countvectorizer__ngram_range"])
plt.xlabel("C")
plt.xticks(range(6), param_grid["linearsvc__C"]);
plt.colorbar()
grid.best_params_
visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],
grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())
grid.score(text_test, y_test)
```
## Look at the Natural Laguage Tool Kit (NLTK)
| github_jupyter |
<p style="font-family: Arial; font-size:3.75vw;color:purple; font-style:bold"><br>
matplotlib Exercise Notebook
</p><br>
# Exercise Notebook Instructions
### 1. Important: Only modify the cells which instruct you to modify them - leave "do not modify" cells alone.
The code which tests your responses assumes you have run the startup/read-only code exactly.
### 2. Work through the notebook in order.
Some of the steps depend on previous, so you'll want to move through the notebook in order.
### 3. It is okay to use numpy libraries.
You may find some of these questions are fairly straightforward to answer using built-in numpy functions. That's totally okay - part of the point of these exercises is to familiarize you with the commonly used numpy functions.
### 4. Seek help if stuck
If you get stuck, don't worry! You can either review the videos/notebooks from this week, ask in the course forums, or look to the solutions for the correct answer. BUT, be careful about looking to the solutions too quickly. Struggling to get the right answer is an important part of the learning process.
```
# DO NOT MODIFY
# import appropriate libraries
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
import pandas as pd
%matplotlib inline
# DO NOT MODIFY
# we will use this dataset for some portions of this exercise.
# source: https://www.kaggle.com/hugomathien/soccer
def get_data():
cnx = sqlite3.connect('database.sqlite')
df = pd.read_sql_query("SELECT * FROM Player_Attributes", cnx)
return df
df = get_data()
#DO NOT MODIFY
# Let's see what is in our dataset
df.describe()
```
<p style="font-family: Arial; font-size:2.75vw;color:purple; font-style:bold"><br>
Exercise 1: Line Plot<br><br></p>
In the cell below, modify the function to plot x vs y, where x and y
are column names of dataframe (df) which is also entered as input to the function. The function should
- First sort the dataframe by the column 'x'
- Take the first 50 rows for plotting (discard the remaining)
- Provide a title
- Label x and y axes
```
# modify this cell
def line_plot(df, x, y):
### BEGIN SOLUTION
pass
### END SOLUTION
# DO NOT MODIFY
# your function should give a plot similar to the following:
line_plot(df, 'potential', 'overall_rating')
```
Your solution to Exercise 1 should look like this:

<p style="font-family: Arial; font-size:2.75vw;color:purple; font-style:bold"><br>
Exercise 2: Histogram <br><br></p>
In the cell below, modify the function to plot a histogram. The function should take an input parameter X which is a column name of the dataframe df, also passed to the function. Be sure to drop NULL values before you plot the histogram.
```
# modify this cell
def plot_histogram(df, X):
### BEGIN SOLUTION
### END SOLUTION
# DO NOT MODIFY
# your plot should look similar to the following:
plot_histogram(df, 'overall_rating')
```
Your solution for Exercise 2 should look like this:

<p style="font-family: Arial; font-size:2.75vw;color:purple; font-style:bold"><br>
Exercise 3: Scatter Plot<br><br></p>
In the cell below, modify the function to plot...
```
# modify this cell
def plot_scatter(df, x, y):
### BEGIN SOLUTION
### END SOLUTION
# DO NOT MODIFY
# your plot should look similar to the following:
plot_scatter(df, 'gk_diving', 'gk_handling')
```
Your solution to Excercise 3 should look like this:

| github_jupyter |
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#Reading in data with the suppression flags
df_suppress = pd.read_csv('Data/Final_Suppress_Features.txt', sep='\t')
df_suppress.head()
df_suppress.columns
#Dropping all columns that are not possible features or the outcome or which are categorical
df_suppress_features = df_suppress.drop(columns=['npi', 'EXCLYear', 'REINYear', 'excl_type', 'specialty_description', 'state', 'nppes_credentials'])
df_suppress_features.shape
```
## Feature Selection
There is the possibility of 40 features. Most likely all of these features are not strong predictors of the outcome (exclusion from Medicare) so will most likely want to filter these features down before running our final model. Will start looking at all together by using Radviz.
```
#Selecting target column separate from features
features_2 = df_suppress_features.drop(columns = ['exclusion_flag']).columns
X = df_suppress_features[features_2].values
y = df_suppress_features['exclusion_flag'].values
from yellowbrick.features import RadViz
# Specify the target classes
classes = ['0', '1']
# Instantiate the visualizer
visualizer = RadViz(classes=classes, features=features_2, size=(1080, 720))
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.transform(X) # Transform the data
visualizer.show() # Draw the data
```
## Regularization
Looks like too much noise is included so will look at Regularization techniques to look at the overall importance of these features in a couple different ways
```
features_3 = df_suppress_features[features_2]
labels = df_suppress_features['exclusion_flag']
#Using Lasso Regularization
model = Lasso(tol = 0.001)
model.fit(features_3, labels)
print(list(zip(features_3, model.coef_.tolist())))
#Applying Ridge Regression
model = Ridge()
model.fit(features_3, labels)
print(list(zip(features_3, model.coef_.tolist())))
#Applying ElasticNet
model = ElasticNet(tol = 0.1)
model.fit(features_3, labels)
print(list(zip(features_3, model.coef_.tolist())))
```
## Transformer Methods
```
model = Lasso(tol=.001)
sfm = SelectFromModel(model)
sfm.fit(features_3, labels)
print(list(features_3.iloc[:, sfm.get_support(indices=True)]))
model = Ridge()
sfm = SelectFromModel(model)
sfm.fit(features_3, labels)
print(list(features_3.iloc[:, sfm.get_support(indices=True)]))
model = ElasticNet(tol=.1)
sfm = SelectFromModel(model)
sfm.fit(features_3, labels)
print(list(features_3.iloc[:, sfm.get_support(indices=True)]))
```
## Correlation Matrix
To ensure we don't miss any strongly correlated features with our outcome, will also include any features with a correlations with our outcome that is greater than 0.1
```
#Looking at correlations with outcome variable using spearman
correlations_spearman = df_suppress_features.corr(method='spearman')
corr_spear = correlations_spearman['exclusion_flag']
corr_spear.sort_values()
#Selecting the features from all three of the transformers, the categorical features (as seen in previous visualizations)
#there seem to be some correlations within them), any additional features with a spearman correlation > absolute value
#of 0.1 and features flagged in earlier analysis where they may be differences when looking at their distributions
keep = ['specialty_description', 'state', 'nppes_credentials', 'nppes_provider_gender',
'medicare_prvdr_enroll_status', 'other_suppress_flag', 'antipsych_bene_ge65_suppress_flg',
'total_30_day_per_claim', 'total_claim_count', 'total_30_day_fill_count', 'bene_count', 'opioid_day_supply',
'la_opioid_day_supply', 'drug_cost_per_claim', 'average_age_of_beneficiaries', 'day_supply_per_claim', 'exclusion_flag']
#Saving as new dataset
Model_suppress = df_suppress[keep]
Model_suppress.shape
Model_suppress.to_csv('Data/Model_suppress.txt', sep='\t', index=False)
```
| github_jupyter |
# Variational Inference and Learning in the Big Data Regime
Many real-world modelling solutions require fitting models with large numbers of data-points and parameters, which is made convenient recently through software implementing automatic differentiation, but also require uncertainty quantification. Variational inference is a generic family of tools that reformulates (Bayesian) model inference into an optimisation problem, thereby making use of modern software tools but also having the ability to give model uncertainty. This talk will motivate how variational inference works and what the state-of-the-art methods are. We will also accompany the theory with implementations on some simple probabilistic models, such as variational autoencoders (VAE). If time-permitting, we will briefly talk about some of the recent frontiers of variational inference, namely normalising flows and Stein Variational Gradient Descent.
๐ป Content covered:
Current inference methods: maximum likelihood and Markov chain Monte Carlo
Information theory and KL divergence
Mean field variational inference
Bayesian linear regression
Monte Carlo variational inference (MCVI), reparameterisation trick and law of the unconscious statistician (LOTUS)
Example software implementations: VAE
๐พ This lecture will be held online on Microsoft Teams.
๐ดThe event will be recorded and will be publicly available.
๐ Attendance is FREE for members! Whether you are a student at Imperial College or not, sign up to be a member at www.icdss.club/joinus
โญ๏ธ We encourage participants of this workshop to have looked at our previous sessions on YouTube. Prerequisites: basic understanding of Bayesian statistics
๐ A schedule of our lecture series is currently available
## Background
- Variational Inference: A Review for Statisticians: https://www.tandfonline.com/doi/full/10.1080/01621459.2017.1285773
- Auto-Encoding Variational Bayes: https://arxiv.org/pdf/1312.6114.pdf
- http://yingzhenli.net/home/en/approximateinference
- https://github.com/ethanluoyc/pytorch-vae
Consider crop yields $y$ and we have a likelihood $p(y|z)$ where $z$ are latent parameters. Suppose $z$ has some prior distribution $p(z)$, then the posterior distribution is
$$
p(z|y) \propto p(y|z)p(z) := \tilde{p}(z|y).
$$
We then want to be able to compute quantities $\mathbb{E}_{z\sim p(z|y)}[h(Z)]$, for certain functions $h$ e.g. $h(z)=z$ for the posterior mean of $Z$.
We could compute $p(z|y$) analytically if we have nice priors (conjugate priors), but this is usually not the case for most models e.g. Autoencoders with latent parameters or certain Gaussian mixture models.
Markov chain Monte Carlo (MCMC) allows us to obtain samples from $z\sim p(z|y)$ using samplers (e.g. Hamiltonian Monte Carlo (HMC) or Metropolis-Hastings), but it could be very expensive and prohibits it from being used for the big data setting.
### Variational Inference
Variational Inference (VI)/Variational Bayes/Variational Approximation turns this problem into an optimisation problem. We now seek $q(z)$ in a space of functions $\mathcal{Q}$, instead of computing the exact $p(z|y)$, in which
$$KL(q(z) || p(z|y)) = \int \log\frac{q(z)}{p(z|y)} q(z) dq$$
is minimised. This KL denotes the KL-divergence, which is a divergence measure that looks at how close 2 distributions are to one-another. It is:
- Non-negative
- Is equal to 0 if and only if $q(z) = p(z|y)$
- Note: $KL(q(z)||p(z|y)) \neq KL(p(z|y) || q(z))$. Minimising $KL(p(z|y) || q(z))$ is the objective of Expectation Propagation, which is another method for approximating posterior distributions.
Note that maximum likelihood estimation (MLE) is done by maximising the log-likelihood, which is the same as minimising the KL divergence:
$$
\text{argmin}_{\theta} KL(\hat{p}(y|\theta^*) || p(y|\theta)) = \text{argmin}_{\theta} \frac{1}{n}\sum_{i=1}^n \log \frac{p(y_i|\hat{\theta})}{p(y_i|\theta)} = \text{argmin}_{\theta} \frac{1}{n}\sum_{i=1}^n \log \frac{1}{p(y_i|\theta)} = \text{argmax}_{\theta} \frac{1}{n}\sum_{i=1}^n \log p(y_i|\theta).
$$
**Evidence Lower-Bound**
Suppose I pose a family of posteriors $q(z)$, then
\begin{align*}
KL(q(z) || p(z|y)) = \int \log\frac{q(z)}{p(z|y)} q(z) dq &= \mathbb{E}_{z\sim q(z)}[\log q(z)] - \mathbb{E}_{z\sim q(z)}[\log p(z|y)] \\
&= \mathbb{E}_{z\sim q(z)}[\log q(z)] - \mathbb{E}_{z\sim q(z)}[\log p(z,y)] + \log p(y) \\
&= \mathbb{E}_{z\sim q(z)}[\log q(z)] - \mathbb{E}_{z\sim q(z)}[\log p(y|z)] - \mathbb{E}_{z\sim q(z)}[p(z)] + \log p(y) \\
&=\log p(y) + \mathbb{E}_{z\sim q(z)}[\log \frac{q(z)}{p(z)}] - \mathbb{E}_{z\sim q(z)}[\log p(y|z)] \\
&= \log p(y) + KL(q(z) || p(z)) - \mathbb{E}_{z\sim q(z)}[\log p(y|z)].
\end{align*}
Since the left term is positive and $\log p(y)$ is fixed, it is sufficient to minimise:
$$
KL(q(z) || p(z)) - \mathbb{E}_{z\sim q(z)}[\log p(y|z)].
$$
The evidence lower-bound is $ELBO(q) = \mathbb{E}_{z\sim q(z)}[\log p(y|z)] - KL(q(z) || p(z))$, which is maximised.
### Mean-Field Variational Inference
As fancy as it sounds, it just means specifying a family of posteriors $\mathcal{Q}$ such that
$$
q(z) = \prod_{j=1}^m q_j(z_j),
$$
where $m$ is the number of parameters.
**Coordinate Ascent Variational Inference (CAVI)**
Blei et al. (2017)

Let's look at an example (Li (2021)):
$$
y|x \sim \mathcal{N}(y; x^\intercal\theta, \sigma^2),\qquad \theta\sim\mathcal{N}(\theta; \mu_0, \Gamma_0^{-1}).
$$
This has an analytical solution
$$
p(\theta|\mathcal{D}) = \mathcal{N}(\theta; \mu,\Gamma^{-1})
$$
with
\begin{align*}
\Gamma &= \Gamma_0 + \frac{1}{\sigma^2}X^\intercal X \\
\mu &= \frac{1}{\sigma^2}(X^\intercal X + \Gamma_0)^{-1}X^Ty,
\end{align*}
where $X=(x_1,\ldots,x_n)^\intercal$ and $y=(y_1,\ldots,y_n)^\intercal$. **Let's try CAVI**:
\begin{align*}
\log q_1(\theta_1) =& \int q_2(\theta_2) \log \tilde{p}(\theta_1, \theta_2) d\theta_2\\
=& \int -\frac{1}{2}\left[(\theta_1-\mu_1)^2\Gamma_{11} + 2(\theta_1-\mu_1)\Gamma_{12}(\theta_2-\mu_2) \right]q_2(\theta_2) d\theta_2 + const \\
=& -\frac{1}{2}\left[(\theta_1-\mu_1)^2\Gamma_{11} + 2(\theta_1-\mu_1)\Gamma_{12}(\mathbb{E}_{\theta_2\sim q_2}[\theta_2]-\mu_2) \right] + const,
\end{align*}
which is Gaussian with mean and variance
$$
\tilde{\mu}_1 = \mu_1 - \Gamma_{11}^{-1}\Gamma_{12}(\mathbb{E}_{q_2}[\theta_2] - \mu_2),\qquad \tilde{\gamma}_2^{-1} = \Gamma_{11}.
$$
Similarly, you can obtain a similar expression for $q_2(\theta_2)$. For CAVI to convergence, it can be shown that $(\tilde{\mu}_1, \tilde{\mu}_2)^\intercal = \mu$, giving
$$
\tilde{\mu}_1 = \mu_1, \qquad \tilde{\mu}_2 = \mu_2.
$$
In this case, CAVI gives a Gaussian posteriors.
### Monte Carlo Variational Inference (MCVI)
For big data situations, the variational expectation term can be (1) very expensive and (2) is not available in closed form. We can also add some more complexity to the posterior instead of just having a mean-field approximation. Recall the bound:
$$
\mathcal{L}(q; p) = KL(q(z) || p(z)) - \mathbb{E}_{z\sim q(z)}[\log p(y|z)].
$$
MCVI calculates the variational expectation using Monte Carlo integration
$$
\mathbb{E}_{z\sim q(z)}[\log p(y_i|z)] \approx \frac{1}{M}\sum_{j=1}^M \log p(y_i|z^j),\qquad z^j\sim q(z).
$$
Even better, we can calculate this using mini-batches:
$$
\sum_{i=1}^n\mathbb{E}_{z\sim q(z)}[\log p(y_i|z)] = \mathbb{E}_{S\sim \{1,\ldots,n\}}\left[\frac{n}{|S|}\sum_{i\in S} \mathbb{E}_q[\log p(y_i|z)] \right],
$$
where the inner expectation can be calculated as before. Now, to minimise $\mathcal{L}(q; p)$, we differentiate with respect to the parameters, let's call it $\theta$. Therefore, we need
\begin{align*}
\nabla_\theta \mathcal{L}(q; p) =& \nabla_\theta\left[KL(q(z) || p(z)) - \mathbb{E}_{z\sim q(z)}[\log p(y|z)] \right] \\
=& \nabla_\theta \left[ \frac{1}{M}\sum_{j=1}^M \log\frac{q(z^j)}{p(z^j)} \right] - \nabla_\theta\left[\mathbb{E}_{S\sim \{1,\ldots,n\}}\left[\frac{n}{|S|}\sum_{i\in S} \frac{1}{M}\sum_{j=1}^M \log p(y_i|z^j)\right] \right],
\end{align*}
where $z^j\sim q(z)$. We can get rid of the expectation with respect to the mini-batches and get a nice approximation for the bound for each batch $S$.
**Reparameterisation Trick/Law of the Unconcious Statistician (LOTUS)**
LOTUS basically refers to the identity:
$$
E_X[f(X)] = \int f(x) p(x) dx = \int f(g(\epsilon)) p(\epsilon) dx = E_\epsilon[f(g(\epsilon))]
$$
for $x=g(\epsilon)$, via the inverse function theorem and the change of variable theorem. The reparameterisation trick thus makes it easier to compute the bound by allowing us to sample from a simpler distribution $p(\epsilon)$ to get $q(z)$:
\begin{align*}
\nabla_\theta \mathcal{L}(q; p) =& \nabla_\theta\left[KL(q(z) || p(z)) - \mathbb{E}_{z\sim q(z)}[\log p(y|z)] \right] \\
=& \nabla_\theta\left[KL(q(z) || p(z)) - \mathbb{E}_{\epsilon}[\log p(y|g_\theta(\epsilon))] \right]\\
=& \nabla_\theta KL(q(z) || p(z)) - \mathbb{E}_{\epsilon}[\nabla_g \log p(y|g_\theta(\epsilon)) \times \nabla_\theta g_\theta(\epsilon)].
\end{align*}
Then repeat using the same MCVI integration method to approximate the variational expectation. In practice, we can also use automatic differentiation to calculate the gradients.
**Example: Variational Autoencoders (VAEs)**
Model (Taken from https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)

**(1)**
The decoder represents the likelihood $p(y|z)$, where $y$ is an image. In the upcoming example, we have
$$
\log p(y|z) = \log N(y; f_\theta(z), I) \equiv ||y - f_\theta(z)||_2^2,
$$
the MSE loss.
**(2)**
The prior is $z\sim \mathcal{N}(0, I)$.
**(3)**
As you will see in many applications, they people only use 1 sample to calculate the variational expectation. i.e. taking $M=1$.
**(4)**
The variational distribution that we are going for is $$q(z|y) = N(g_\phi(y)[0], g_\phi(y)[1] I),$$
where the variational distribution is parameterised by the encoder network.
**(5)**
We note that we can actually analytically compute the KL divergence as they are 2 Gaussians (proceed to Wikipedia for the formula...)
## Experiments
```
# from https://github.com/ethanluoyc/pytorch-vae/blob/master/vae.py
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import torch.optim as optim
from torch import nn
import matplotlib.pyplot as plt
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
class Normal(object):
def __init__(self, mu, sigma, log_sigma, v=None, r=None):
self.mu = mu
self.sigma = sigma # either stdev diagonal itself, or stdev diagonal from decomposition
self.logsigma = log_sigma
dim = mu.get_shape()
if v is None:
v = torch.FloatTensor(*dim)
if r is None:
r = torch.FloatTensor(*dim)
self.v = v
self.r = r
class Encoder(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(Encoder, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
x = F.relu(self.linear1(x))
return F.relu(self.linear2(x))
class Decoder(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(Decoder, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
x = F.relu(self.linear1(x))
return F.relu(self.linear2(x))
class VAE(torch.nn.Module):
latent_dim = 8
def __init__(self, encoder, decoder):
super(VAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
self._enc_mu = torch.nn.Linear(100, 8)
self._enc_log_sigma = torch.nn.Linear(100, 8)
def _sample_latent(self, h_enc):
"""
Return the latent normal sample z ~ N(mu, sigma^2)
"""
mu = self._enc_mu(h_enc)
log_sigma = self._enc_log_sigma(h_enc)
sigma = torch.exp(log_sigma)
std_z = torch.from_numpy(np.random.normal(0, 1, size=sigma.size())).float()
self.z_mean = mu
self.z_sigma = sigma
return mu + sigma * Variable(std_z, requires_grad=False) # Reparameterization trick
def forward(self, state):
h_enc = self.encoder(state)
z = self._sample_latent(h_enc)
return self.decoder(z)
def latent_loss(z_mean, z_stddev):
mean_sq = z_mean * z_mean
stddev_sq = z_stddev * z_stddev
return 0.5 * torch.mean(mean_sq + stddev_sq - torch.log(stddev_sq) - 1)
input_dim = 28 * 28
batch_size = 32
transform = transforms.Compose(
[transforms.ToTensor()])
mnist = torchvision.datasets.MNIST('./', download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(mnist, batch_size=batch_size,
shuffle=True, num_workers=2)
print('Number of samples: ', len(mnist))
encoder = Encoder(input_dim, 100, 100)
decoder = Decoder(8, 100, input_dim)
vae = VAE(encoder, decoder)
criterion = nn.MSELoss()
optimizer = optim.Adam(vae.parameters(), lr=0.001)
l = None
for epoch in range(5):
for i, data in enumerate(dataloader, 0):
inputs, classes = data
inputs, classes = Variable(inputs.resize_(batch_size, input_dim)), Variable(classes)
optimizer.zero_grad()
dec = vae(inputs)
ll = latent_loss(vae.z_mean, vae.z_sigma)
loss = criterion(dec, inputs) + ll
loss.backward()
optimizer.step()
l = loss.item()
print(epoch, l)
plt.imshow(vae(inputs).data[0].numpy().reshape(28, 28), cmap='gray')
plt.show(block=True)
plt.imshow(inputs[0].numpy().reshape(28, 28), cmap='gray')
```
### Normalising Flows
Using a "nice" class of diffeomorphisms, one can obtain diagonal Jacobians from the diffeomorphisms, we apply the change of variables formula:
\begin{align*}
q(z_L) = q(z) \prod_{l=1}^L |\det(\nabla_{z_{l-1}} T_l(z_{l-1}))|^{-1}
\end{align*}
| github_jupyter |
# ็จ้ฃๆงณ+ DJL ๅฏฆไฝไบบ่ๅฃ็ฝฉ่พจ่ญ
ๅจ้ๅๆๅญธไธญๆๅๅฐๆๅฑ็คบๅฉ็จ PaddleHub ไธ่ผ้ ่จ็ทดๅฅฝ็ PaddlePaddle ๆจกๅไธฆ้ๅฐ็ฏไพ็
ง็ๅไบบ่ๅฃ็ฝฉ่พจ่ญใ้ๅ็ฏไพ็ธฝๅ
ฑๆๅๆๅ
ฉๅๆญฅ้ฉ:
- ็จ่้จๆชขๆธฌๆจกๅ่ญๅฅๅ็ไธญ็ไบบ่(็ก่ซๆฏๅฆๆๆดๅฃ็ฝฉ)
- ็ขบ่ชๅ็ไธญ็่ๆฏๅฆๆๆดๅฃ็ฝฉ
้ๅ
ฉๅๆญฅ้ฉๆๅ
ๅซไฝฟ็จๅ
ฉๅ Paddle ๆจกๅ๏ผๆๅๆๅจๆฅไธไพ็ๅ
งๅฎนไป็ดนๅ
ฉๅๆจกๅๅฐๆ้่ฆๅ็ๅๅพ่็้่ผฏ
## ๅฐๅ
ฅ็ธ้็ฐๅขไพ่ณดๅๅญ้กๅฅ
ๅจ้ๅไพๅญไธญ็ๅ่็้ฃๆงณๆทฑๅบฆๅญธ็ฟๅผๆ้่ฆๆญ้
DJL ๆททๅๆจกๅผ้ฒ่กๆทฑๅบฆๅญธ็ฟๆจ็๏ผๅๅ ๆฏๅผๆๆฌ่บซๆฒๆๅ
ๅซ NDArray ๆไฝ๏ผๅ ๆญค้่ฆ่็จๅ
ถไปๅผๆ็ NDArray ๆไฝ่ฝๅไพๅฎๆใ้้ๆๅๅฐๅ
ฅ PyTorch ไพๅๅๅ็ๅ่็ๅทฅไฝ:
```
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.17.0
%maven ai.djl.paddlepaddle:paddlepaddle-model-zoo:0.17.0
%maven org.slf4j:slf4j-simple:1.7.32
// second engine to do preprocessing and postprocessing
%maven ai.djl.pytorch:pytorch-engine:0.17.0
import ai.djl.*;
import ai.djl.inference.*;
import ai.djl.modality.*;
import ai.djl.modality.cv.*;
import ai.djl.modality.cv.output.*;
import ai.djl.modality.cv.transform.*;
import ai.djl.modality.cv.translator.*;
import ai.djl.modality.cv.util.*;
import ai.djl.ndarray.*;
import ai.djl.ndarray.types.Shape;
import ai.djl.repository.zoo.*;
import ai.djl.translate.*;
import java.io.*;
import java.nio.file.*;
import java.util.*;
```
## ่้จๅตๆธฌๆจกๅ
็พๅจๆๅๅฏไปฅ้ๅง่็็ฌฌไธๅๆจกๅ๏ผๅจๅฐๅ็่ผธๅ
ฅ่้จๆชขๆธฌๆจกๅๅๆๅๅฟ
้ ๅ
ๅไธไบ้ ่็:
โข ่ชฟๆดๅ็ๅฐบๅฏธ: ไปฅ็นๅฎๆฏไพ็ธฎๅฐๅ็
โข ็จไธๅๆธๅผๅฐ็ธฎๅฐๅพๅ็ๆญฃ่ฆๅ
ๅฐ้็ผ่
ไพ่ชชๅฅฝๆถๆฏๆฏ๏ผDJL ๆไพไบ Translator ไป้ขไพๅนซๅฉ้็ผๅ้ๆจฃ็้ ่็. ไธๅๆฏ่ผ็ฒ็ฅ็ Translator ๆถๆงๅฆไธ:

ๅจๆฅไธไพ็ๆฎต่ฝ๏ผๆๅๆๅฉ็จไธๅ FaceTranslator ๅญ้กๅฅๅฏฆไฝไพๅฎๆๅทฅไฝ
### ้ ่็
ๅจ้ๅ้ๆฎตๆๅๆ่ฎๅไธๅผตๅ็ไธฆไธๅฐๅ
ถๅไธไบไบๅ
็้ ่็๏ผ่ฎๆๅๅ
็คบ็ฏ่ฎๅไธๅผตๅ็:
```
String url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleHub/release/v1.5/demo/mask_detection/python/images/mask.jpg";
Image img = ImageFactory.getInstance().fromUrl(url);
img.getWrappedImage();
```
ๆฅ่๏ผ่ฎๆๅ่ฉฆ่ๅฐๅ็ๅไธไบ้ ่็็่ฝๆ:
```
NDList processImageInput(NDManager manager, Image input, float shrink) {
NDArray array = input.toNDArray(manager);
Shape shape = array.getShape();
array = NDImageUtils.resize(
array, (int) (shape.get(1) * shrink), (int) (shape.get(0) * shrink));
array = array.transpose(2, 0, 1).flip(0); // HWC -> CHW BGR -> RGB
NDArray mean = manager.create(new float[] {104f, 117f, 123f}, new Shape(3, 1, 1));
array = array.sub(mean).mul(0.007843f); // normalization
array = array.expandDims(0); // make batch dimension
return new NDList(array);
}
processImageInput(NDManager.newBaseManager(), img, 0.5f);
```
ๅฆไธ่ฟฐๆ่ฆ๏ผๆๅๅทฒ็ถๆๅ็่ฝๆๅฆไธๅฐบๅฏธ็ NDArray: (ๆซ้, ้้(RGB), ้ซๅบฆ, ๅฏฌๅบฆ). ้ๆฏ็ฉไปถๆชขๆธฌๆจกๅ่ผธๅ
ฅ็ๆ ผๅผ
### ๅพ่็
็ถๆๅๅๅพ่็ๆ, ๆจกๅ่ผธๅบ็ๆ ผๅผๆฏ (number_of_boxes, (class_id, probability, xmin, ymin, xmax, ymax)). ๆๅๅฏไปฅๅฐๅ
ถๅญๅ
ฅ้ ๅ
ๅปบ็ซๅฅฝ็ DJL ๅญ้กๅฅ DetectedObjects ไปฅไพฟๅๅพ็บๆไฝ. ๆๅๅ่จญๆไธ็ตๆจ่ซๅพ็่ผธๅบๆฏ ((1, 0.99, 0.2, 0.4, 0.5, 0.8)) ไธฆไธ่ฉฆ่ๆไบบๅๆก้กฏ็คบๅจๅ็ไธ
```
DetectedObjects processImageOutput(NDList list, List<String> className, float threshold) {
NDArray result = list.singletonOrThrow();
float[] probabilities = result.get(":,1").toFloatArray();
List<String> names = new ArrayList<>();
List<Double> prob = new ArrayList<>();
List<BoundingBox> boxes = new ArrayList<>();
for (int i = 0; i < probabilities.length; i++) {
if (probabilities[i] >= threshold) {
float[] array = result.get(i).toFloatArray();
names.add(className.get((int) array[0]));
prob.add((double) probabilities[i]);
boxes.add(
new Rectangle(
array[2], array[3], array[4] - array[2], array[5] - array[3]));
}
}
return new DetectedObjects(names, prob, boxes);
}
NDArray tempOutput = NDManager.newBaseManager().create(new float[]{1f, 0.99f, 0.1f, 0.1f, 0.2f, 0.2f}, new Shape(1, 6));
DetectedObjects testBox = processImageOutput(new NDList(tempOutput), Arrays.asList("Not Face", "Face"), 0.7f);
Image newImage = img.duplicate();
newImage.drawBoundingBoxes(testBox);
newImage.getWrappedImage();
```
### ็ๆไธๅ็ฟป่ญฏๅจไธฆๅท่กๆจ็ไปปๅ
้้้ๅๆญฅ้ฉ๏ผไฝ ๆ็่งฃ DJL ไธญ็ๅๅพ่็ๅฆไฝ้ไฝ๏ผ็พๅจ่ฎๆๅๆๅๆธ็ๅนพๅๆญฅ้ฉไธฒๅจไธ่ตทไธฆๅฐ็ๅฏฆๅ็้ฒ่กๆไฝ:
```
class FaceTranslator implements NoBatchifyTranslator<Image, DetectedObjects> {
private float shrink;
private float threshold;
private List<String> className;
FaceTranslator(float shrink, float threshold) {
this.shrink = shrink;
this.threshold = threshold;
className = Arrays.asList("Not Face", "Face");
}
@Override
public DetectedObjects processOutput(TranslatorContext ctx, NDList list) {
return processImageOutput(list, className, threshold);
}
@Override
public NDList processInput(TranslatorContext ctx, Image input) {
return processImageInput(ctx.getNDManager(), input, shrink);
}
}
```
่ฆๅท่ก้ๅไบบ่ๆชขๆธฌๆจ็๏ผๆๅๅฟ
้ ๅ
ๅพ DJL ็ Paddle Model Zoo ่ฎๅๆจกๅ๏ผๅจ่ฎๅๆจกๅไนๅๆๅๅฟ
้ ๆๅฎๅฅฝ `Crieteria` . `Crieteria` ๆฏ็จไพ็ขบ่ช่ฆๅพๅช้่ฎๅๆจกๅ่ๅพๅท่ก `Translator` ไพ้ฒ่กๆจกๅๅฐๅ
ฅ. ๆฅ่๏ผๆๅๅช่ฆๅฉ็จ `Predictor` ๅฐฑๅฏไปฅ้ๅง้ฒ่กๆจ่ซ
```
Criteria<Image, DetectedObjects> criteria = Criteria.builder()
.setTypes(Image.class, DetectedObjects.class)
.optModelUrls("djl://ai.djl.paddlepaddle/face_detection/0.0.1/mask_detection")
.optFilter("flavor", "server")
.optTranslator(new FaceTranslator(0.5f, 0.7f))
.build();
var model = criteria.loadModel();
var predictor = model.newPredictor();
DetectedObjects inferenceResult = predictor.predict(img);
newImage = img.duplicate();
newImage.drawBoundingBoxes(inferenceResult);
newImage.getWrappedImage();
```
ๅฆๅ็ๆ็คบ๏ผ้ๅๆจ่ซๆๅๅทฒ็ถๅฏไปฅๆญฃ็ขบ็่พจ่ญๅบๅ็ไธญ็ไธๅผตไบบ่
## ๅฃ็ฝฉๅ้กๆจกๅ
ไธๆฆๆไบๅ็็ๅบงๆจ๏ผๆๅๅฐฑๅฏไปฅๅฐๅ็่ฃๅชๅฐ้ฉ็ถๅคงๅฐไธฆไธๅฐๅ
ถๅณ็ตฆๅฃ็ฝฉๅ้กๆจกๅๅๅพ็บ็ๆจ่ซ
### ๅ็่ฃๅช
ๅไธญๆนๆกไฝ็ฝฎ็ๆธๅผ็ฏๅๅพ0ๅฐ1, ๅช่ฆๅฐ้ๅๆธๅผไนไธๅ็็้ทๅฏฌๆๅๅฐฑๅฏไปฅๅฐๆนๆกๅฐๆๅฐๅ็ไธญ็ๆบ็ขบไฝ็ฝฎ. ็บไบไฝฟ่ฃๅชๅพ็ๅ็ๆๆดๅฅฝ็็ฒพ็ขบๅบฆ๏ผๆๅๅฐๅ็่ฃๅชๆๆนๅฝข๏ผ่ฎๆๅ็คบ็ฏไธไธ:
```
int[] extendSquare(
double xmin, double ymin, double width, double height, double percentage) {
double centerx = xmin + width / 2;
double centery = ymin + height / 2;
double maxDist = Math.max(width / 2, height / 2) * (1 + percentage);
return new int[] {
(int) (centerx - maxDist), (int) (centery - maxDist), (int) (2 * maxDist)
};
}
Image getSubImage(Image img, BoundingBox box) {
Rectangle rect = box.getBounds();
int width = img.getWidth();
int height = img.getHeight();
int[] squareBox =
extendSquare(
rect.getX() * width,
rect.getY() * height,
rect.getWidth() * width,
rect.getHeight() * height,
0.18);
return img.getSubImage(squareBox[0], squareBox[1], squareBox[2], squareBox[2]);
}
List<DetectedObjects.DetectedObject> faces = inferenceResult.items();
getSubImage(img, faces.get(2).getBoundingBox()).getWrappedImage();
```
### ไบๅ
ๆบๅ Translator ไธฆ่ฎๅๆจกๅ
ๅจไฝฟ็จ่้จๆชขๆธฌๆจกๅ็ๆๅ๏ผๆๅๅฏไปฅๅฉ็จ DJL ้ ๅ
ๅปบๅฅฝ็ `ImageClassificationTranslator` ไธฆไธๅ ไธไธไบ่ฝๆใ้ๅ Translator ๆไพไบไธไบๅบ็ค็ๅ็็ฟป่ญฏ่็ไธฆไธๅๆๅ
ๅซไธไบ้ฒ้็ๆจๆบๅๅ็่็ใไปฅ้ๅไพๅญไพ่ชช, ๆๅไธ้่ฆ้กๅคๅปบ็ซๆฐ็ `Translator` ่ไฝฟ็จ้ ๅ
ๅปบ็ซ็ๅฐฑๅฏไปฅ
```
var criteria = Criteria.builder()
.setTypes(Image.class, Classifications.class)
.optModelUrls("djl://ai.djl.paddlepaddle/mask_classification/0.0.1/mask_classification")
.optFilter("flavor", "server")
.optTranslator(
ImageClassificationTranslator.builder()
.addTransform(new Resize(128, 128))
.addTransform(new ToTensor()) // HWC -> CHW div(255)
.addTransform(
new Normalize(
new float[] {0.5f, 0.5f, 0.5f},
new float[] {1.0f, 1.0f, 1.0f}))
.addTransform(nd -> nd.flip(0)) // RGB -> GBR
.build())
.build();
var classifyModel = criteria.loadModel();
var classifier = classifyModel.newPredictor();
```
### ๅท่กๆจ่ซไปปๅ
ๆๅพ๏ผ่ฆๅฎๆไธๅๅฃ็ฝฉ่ญๅฅ็ไปปๅ๏ผๆๅๅช้่ฆๅฐไธ่ฟฐ็ๆญฅ้ฉๅๅจไธ่ตทๅณๅฏใๆๅๅ
ๅฐๅ็ๅ่ฃๅชๅพไธฆๅฐๅ
ถๅไธ่ฟฐ็ๆจ่ซๆไฝ๏ผ็ตๆไนๅพๅ็ๆไธๅๆฐ็ๅ้กๅญ้กๅฅ `DetectedObjects`:
```
List<String> names = new ArrayList<>();
List<Double> prob = new ArrayList<>();
List<BoundingBox> rect = new ArrayList<>();
for (DetectedObjects.DetectedObject face : faces) {
Image subImg = getSubImage(img, face.getBoundingBox());
Classifications classifications = classifier.predict(subImg);
names.add(classifications.best().getClassName());
prob.add(face.getProbability());
rect.add(face.getBoundingBox());
}
newImage = img.duplicate();
newImage.drawBoundingBoxes(new DetectedObjects(names, prob, rect));
newImage.getWrappedImage();
```
| github_jupyter |
# "Understanding Residual Networks"
> "Probably about residuals, right?"
- comments: true
- categories: [vision]
```
#hide
!pip install -Uqq fastai>=2.0.0 graphviz ipywidgets matplotlib nbdev>=0.2.12 pandas scikit_learn azure-cognitiveservices-search-imagesearch sentencepiece
#hide
from google.colab import drive
drive.mount('/content/gdrive/', force_remount=True)
```
## Introduction
We've covered the basics of CNNs with the MNIST data set where we trained a model to recognize handwritten digits. Today, we'll be moving towards residual networks that allow us to train CNNs with more layers.
Since we've already got a good result with the MNIST data set, we'll move onto the Imagenette data set, which is a smaller version of the ImageNet data set.
We train with a smaller version so that we can make small changes without having to wait long periods of time for the model to train.
```
#hide
from fastai.vision.all import *
def get_dls(url, presize, resize):
path = untar_data(url)
return DataBlock((ImageBlock, CategoryBlock), get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label, item_tfms=Resize(presize),
batch_tfms=[*aug_transforms(min_scale=0.5, size=resize),
Normalize.from_stats(*imagenet_stats)]
).dataloaders(path, bs=128)
#hide_output
dls = get_dls(URLs.IMAGENETTE_160, 160, 128)
dls.show_batch(max_n=4)
```
## Adding flexibility through fully convolutional neural networks
First, we'll first change how our model works. With the MNIST data set, we have images of shape 28 $\times$ 28. If we added a few more layers with a stride of 1, then we'd get more layers. But, how do we do classification for images with sizes that aren't 28 $\times$ 28? And, what do we do if we want to have additional layers of different strides?
In reducing the last two dimensions of our output from each layer (the height and width) through strides, we get two problems:
- with larger images, we need a lot more stride 2 layers; and
- the model won't work for images of different shape than our training images.
The latter can be solved through resizing, but do we really want to resize images to 28 $\times$ 28? We might be losing a lot of information for more complex tasks.
So, we solve it through *fully convolutional networks*, which uses a trick of taking the average of activations along a convolutional grid. In other words, we take the average of the last two axes from the final layer like so:
```
def avg_pool(x):
return x.mean((2, 3))
```
Remember that our final layer has a shape of `n_batches * n_channels * height * width`. So, taking the average of the last two axes gives us a new tensor of shape `n_batches * n_channels * 1` from which we flatten to get `n_batches * n_channels`.
Unlike our last approach of striding until our last two axes are 1 $\times$ 1, we can have a final layer with any value for our last two axes and still end up with 1 $\times$ 1 after average pooling. In other words, through average pooling, we can have a CNN that can have as many layers as we want, with images of any shape during inference.
Overall, a fully convolutional network has a number of convolutional layers that can be of any stride, an adaptive average pooling layer, a flatten layer, and then a linear layer. An adaptive average pooling layer allows us to specify the shape of the output, but we want one value so we pass in `1`:
```
def block(ni, nf):
return ConvLayer(ni, nf, stride=2)
def get_model():
return nn.Sequential(
block(3, 16),
block(16, 32),
block(32, 64),
block(64, 128),
block(128, 256),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(256, dls.c)
)
```
The [`ConvLayer`](https://docs.fast.ai/layers.html#ConvLayer) is fastai's version of our `conv` layer from the [last blog](https://geon-youn.github.io/DunGeon/vision/2022/04/30/Convolutional-Neural-Networks.html), which includes the convolutional layer, the activation function, and batch normalization, but also adds more functionalities.
---
*Activation function or nonlinearity?* In the last blog, I defined ReLU as a nonlinearity, because it is a nonlinearity. However, it can also be called an activation function since it's taking the activations from the convolutional layer to output new activations. An activation function is just that: a function between two linear layers.
---
In retrospect, our simple CNN for the MNIST data set looked something like this:
```
def get_simple_cnn():
return nn.Sequential(
block(3, 16),
block(16, 32),
block(32, 64),
block(64, 128),
block(128, 10),
Flatten()
)
```
In a fully convolutional network we have what we had before in a CNN, except that we pool the last two axes from our final convolutional layer into a unit axis, flatten the output to get rid of the unit axis, then use a linear layer to get `dls.c` output channels. `dls.c` returns how many unique labels there are for our data set.
So, why didn't we just use fully convolutional networks from the beginning? Well, fully convolutional networks take an image, cut them into pieces, shake them all about, do the hokey pokey, and decide, on average, what the image should be classified as. However, we were dealing with an optical character recognition (OCR) problem. With OCR, it doesn't make sense to cut a character into pieces and decide, on averge, what character it is.
That doesn't mean fully convolutional networks are useless; they're bad for OCR problems, but they're good for problems where the objects-to-be-classified don't have a specific orientation or size.
Let's try training a fully convolutional network on the Imagenette data set:
```
def get_learner(model):
return Learner(dls, model, loss_func=nn.CrossEntropyLoss(),
metrics=accuracy).to_fp16()
learn = get_learner(get_model())
learn.fit_one_cycle(5, 3e-3)
```
## How resnet came about
Now we're ready to add more layers through a fully convolutional network. But, how does it turn out if we just add more layers? Not that promising.
<figure>
<img src='https://inotgo.com/imagesLocal/202103/24/20210324084503876z_0.png' alt='Comparisons of small and large layer CNNs'>
<figcaption>Comparison of small and large layer CNNs through error rate on training and test sets.</figcaption>
</figure>
You'd expect a model with more layers to have an easier time predicting the correct label. However, [the founders](https://arxiv.org/abs/1512.03385) of resnet found different results when training and comparing the results of a 20- and 56-layer CNN: the 56-layer model was doing worse than the 20-layer model in both training and test sets. Interestingly, the lower performance isn't caused by overfitting because the same pattern persists between the training and test errors.
However, shouldn't we be able to add 36 identity layers (that output the same activations) to the 20-layer model to achieve a 56-layer model that has the same results as the 20-layer model? For some reason, SGD isn't able to find that kind of model.
So, here comes *residuals*. Instead of each layer being the output of `F(x)` where `F` is a layer and `x` is the input, what if we had `x + F(x)`? In essense, we want each layer to learn well. We could go straight to `H(x)`, but we can have it learn `H(x)` by learning `F(x) = H(x) - x`, which turns out to `H(x) = x + F(x)`.
Returning to the idea of identity layers, `F` contains a batchnorm which performs $\gamma y + \beta$ with the output `y`. We could have $\gamma$ equal to 0, which turns `x + F(x)` to `x + 0`, which is equivalent to `x`.
Therefore, we could start with a good 20-layer model, initialize 36 layers on top of it with $\gamma$ initialized to 0, then fine-tune the entire model.
However, instead of starting with a trained model, we have something like this:
<figure>
<img src='https://www.researchgate.net/publication/331195671/figure/fig1/AS:727865238753280@1550548004386/ResNet-module-adapted-from-1.ppm' alt='Diagram of a resnet block'>
<figcaption>The "resnet" layer</figcaption>
</figure>
In a residual network layer, we have an input `x` that passes through two convolutional layers `f` and `g`, where `F(x) = g(f(x))`. The right arrow is the *identity branch* or *skip connection* that gives us the identity part of the equation: `x + F(x)`, whereby `F(x)` is the residual.
So instead of adding on these resnet layers to a 20-layer model, we just initialize a model with these layers. Then, we initialize them randomly per usual and train them with SGD. Skip connections enable SGD to optimize the model even though there's more layers.
Why resnet works so well compared to just adding more layers is like how weight decay works so well: we're training to minimize residuals.
At each layer of a residual network, we're training the residuals given by the function `F` since that's the only part with trainable parameters; however, the output of each layer is `x + F(x)`. So, if the desired output is `H(x)` where `H(x) = x + F(x)`, we're asking the model to predict the *residual* `F(x) = H(x) - x`, which is the difference between the desired output and the given input. Therefore at each optimization step, we're minimizing the error (residual). Hence a residual network is good at learning the difference between doing nothing and doing something (by going through the two weight layers).
So, a residual network block looks like this:
```
class ResBlock(Module):
def __init__(self, ni, nf):
self.convs = nn.Sequential(
ConvLayer(ni, nf),
ConvLayer(nf, nf, norm_type=NormType.BatchZero)
)
def forward(self, x):
return x + self.convs(x)
```
By passing `NormType.BatchZero` to the second convolutional layer, we initialize the $\gamma$s in the batchnorm equation to 0.
## How to use a resblock in practice
Although we could begin training with the `ResBlock`, how would `x + F(x)` work if `x` and `F(x)` are of different shapes?
Well, how could they become different shapes? When `ni != nf` or we use a stride that's not 1. Remember that `x` has the shape `n_batch * n_channels * height * width`. If we have `ni != nf`, then `n_channels` is different for `x` and `F(x)`. Similarly, if `stride != 1`, then `height` and `width` would be different for `x` and `F(x)`.
Instead of accepting these restrictions, we can resize `x` to become the same shape as `F(x)`. First, we appply average pooling to change `height` and `width`, then apply a 1 $\times$ 1 convolution (a convolution layer with a kernel size of 1 $\times$ 1) with `ni` in-channels and `nf` out-channels to change `n_channels`.
Through changing our code, we have:
```
def _conv_block(ni, nf, stride):
return nn.Sequential(
ConvLayer(ni, nf, stride=stride),
ConvLayer(nf, nf, act_cls=None, norm_type=NormType.BatchZero)
)
class ResBlock(Module):
def __init__(self, ni, nf, stride=1):
self.convs = _conv_block(ni, nf, stride)
# noop is `lamda x: x`, short for "no operation"
self.idconv = noop if ni == nf else ConvLayer(ni, nf, 1, act_cls=None)
self.pool = noop if stride == 1 else nn.AvgPool2d(stride, ceil_mode=True)
def forward(self, x):
# we can apply ReLU for both x and F(x) since we specified
# no activation functions for the last layer of conv and idconvs
return F.relu(self.idconv(self.pool(x)) + self.convs(x))
```
In our new `ResBlock`, one pass will lead to `H(G(x)) + F(x)` where `F` is two convolutional layers and `G` is a pooling layer followed by `H`, a convolutional layer. We have a pooling layer to make the height and width of `x` the same as `F(x)` and the convolutional layer to make `G(x)` have the same out-channels as `F(x)`. Then, `H(G(x))` and `F(x)` will have the same shape all the time, allowing them to be added. Overall, a resnet block is 2 layers deep.
```
def block(ni, nf):
return ResBlock(ni, nf, stride=2)
```
So, let's try retraining with the new model:
```
learn = get_learner(get_model())
learn.fit_one_cycle(5, 3e-3)
```
We didn't get much of an accuracy boost, but that's because we still have the same number of layers. Let's double the layers:
```
def block(ni, nf):
return nn.Sequential(ResBlock(ni, nf, stride=2), ResBlock(nf, nf))
learn = get_learner(get_model())
learn.fit_one_cycle(5, 3e-3)
```
## Implementing a resnet
The first step to go from a resblock to a resnet is to improve the stem of the model.
The first few layers of the model are called its *stem*. Through practice, [some researchers](https://arxiv.org/abs/1812.01187) found improvements by beginning the model with a few convolutional layers followed by a max pooling layer. A max pooling layer, unlike average pooling, takes the maximum instead of the average.
The new stem, which we prepend the the model looks like this:
```
def _resnet_stem(*sizes):
return [
ConvLayer(sizes[i], sizes[i + 1], 3, stride=2 if i == 0 else 1)
for i in range(len(sizes) - 1)
] + [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
```
Keeping the model simple in the beginning helps with training because with CNNs, the vast majority of *computations*, not parameters, occur in the beginning where images have large dimensions.
```
_resnet_stem(3, 32, 32, 64)
```
These same researchers found additional "tricks" to substantially improve the model: using **four** groups of resnet blocks with channels of 64, 128, 256, and 512. Each group starts with a stride of 2 except for the first one since it's after the max pooling layer from the stem.
Below is *the* resnet:
```
class ResNet(nn.Sequential):
def __init__(self, n_out, layers, expansion=1):
stem = _resnet_stem(3, 32, 32, 64)
self.channels = [64, 64, 128, 256, 512]
for i in range(1, 5): self.channels[i] *= expansion
blocks = [self._make_layer(*o) for o in enumerate(layers)]
super().__init__(*stem, *blocks,
nn.AdaptiveAvgPool2d(1), Flatten(),
nn.Linear(self.channels[-1], n_out))
def _make_layer(self, idx, n_layers):
stride = 1 if idx == 0 else 2
ni, nf = self.channels[idx:idx + 2]
return nn.Sequential(*[
ResBlock(ni if i == 0 else nf, nf, stride if i == 0 else 1)
for i in range(n_layers)
])
```
The "total" number of layers in a resnet is double sum of the layers passed in as `layers` in instantiating `ResNet` plus 2 from `stem` and `nn.Linear`. We consider `stem` as one layer because of the max pooling layer at the end.
So, a resnet-18 is:
```
#collapse_output
rn18 = ResNet(dls.c, [2, 2, 2, 2])
rn18
```
Where it has 18 layers since we pass in `[2, 2, 2, 2]` for `layers`, which sums of 8 and double that is `16` (we double since each `ResBlock` is 2 layers deep). Then, we add 2 from `stem` and `nn.Linear` to get 18 "total" layers.
By increasing the number of layers to 18, we see a significant increase in our model's accuracy:
```
learn = get_learner(rn18)
learn.fit_one_cycle(5, 3e-3)
```
## Adding more layers to resnet-18
As you increase the number of layers in a resnet, the number of parameters increases substantially. Thus, to avoid running out of GPU memory, we can apply *bottlenecking*, which alters the convolutional layer in `ResBlock` for `F` in `x + F(x)` as follows:
<figure>
<img src='https://miro.medium.com/max/588/0*9tCUFp28oQGOK6bE.jpg' alt='Bottleneck layer'>
</figure>
Previously, we stacked two convolutions with a kernel size of 3. However, a bottleneck layer has a convolution with a kernel size of 1, which decreases the channels by a factor of 4, then a convolution with a kernel size of 3 that maintains the same number of channels, and then a convolution with a kernel size of 1 that increases the number of channels by a factor of 4 to return the original dimension. It's coined *bottleneck* because we start with, in this case, a 256-channel image that's "bottlenecked" by the first convolution into 64 channels, and then enlarged to 256 channels by the last convolution.
Although we'll have another layer, it performs faster than the two convolutions with a kernel size of 3 because convolutions with a kernel size of 1 are much faster.
Through bottleneck layers, we can have more channels in more-or-less the same amount of time. Additionally, we'll have fewer parameters since we replace a 3 $\times$ 3 kernel layer with two 1 $\times$ 1 kernel layers, although one has 4 more out-channels. Overall, we have a difference of 4$:$9.
In code, the bottleneck layer looks like this:
```
def _conv_block(ni, nf, stride):
return nn.Sequential(
ConvLayer(ni, nf // 4, stride=1),
ConvLayer(nf // 4, nf // 4, stride=stride),
ConvLayer(nf // 4, nf, stride=1, act_cls=None, norm_type=NormType.BatchZero)
)
```
Then, when we make our resnet model, we'll have to pass in 4 for our `expansion` to account for the decreasing factor of 4 in our bottleneck layer.
```
rn50 = ResNet(dls.c, [3, 4, 6, 3], 4)
learn = get_learner(rn50)
learn.fit_one_cycle(20, 3e-3)
```
## MNIST: to CNN or resnet?
In the previous blog, we achieved a 99.2% accuracy with just the CNN. Let's try training a resnet-50 with the MNIST data set and see what we can get:
```
def get_dls(url, bs=512):
path = untar_data(url)
return DataBlock(
blocks=(ImageBlock(cls=PILImageBW), CategoryBlock),
get_items=get_image_files,
splitter=GrandparentSplitter('training', 'testing'),
get_y=parent_label,
batch_tfms=Normalize()
).dataloaders(path, bs=bs)
#hide_output
dls = get_dls(URLs.MNIST)
dls.show_batch(max_n=4)
```
We have to change the first input into `_resnet_stem` to 1 from 3 since the MNIST data set is a greyscale image.
```
class ResNet(nn.Sequential):
def __init__(self, n_out, layers, expansion=1):
stem = _resnet_stem(1, 32, 32, 64)
self.channels = [64, 64, 128, 256, 512]
for i in range(1, 5): self.channels[i] *= expansion
blocks = [self._make_layer(*o) for o in enumerate(layers)]
super().__init__(*stem, *blocks,
nn.AdaptiveAvgPool2d(1), Flatten(),
nn.Linear(self.channels[-1], n_out))
def _make_layer(self, idx, n_layers):
stride = 1 if idx == 0 else 2
ni, nf = self.channels[idx:idx + 2]
return nn.Sequential(*[
ResBlock(ni if i == 0 else nf, nf, stride if i == 0 else 1)
for i in range(n_layers)
])
rn50 = ResNet(dls.c, [3, 4, 6, 3], 4)
learn = get_learner(rn50)
lr = learn.lr_find().valley
learn.fit_one_cycle(20, lr)
```
After training the model by 20 epochs, we can confidently say that it's not always the best idea to start with a more complex architecture that takes much longer to train and gives a worse accuracy.
In addition, I mentioned before that resnets aren't the best for OCR problems like digit recognition since we're slicing the image and deciding, on average, what the digit is.
In this scenario, a regular CNN would be a better choice than a resnet.
## Conclusion
In this blog, we covered residual networks, which allow us to train CNNs with more layers by having the model learn indirectly through residuals. It's as if before, we were just training the weights of the activations, when in reality, we also needed bias to have the models learn efficiently. To visualize, imagine the following:
Before, we had
```
weights * x + bias
```
as our activations, where `x` is our input. But, that just gives us activations. Through residual networks, we have
```
(weights * x + bias) + residuals
```
which adds on another parameter that we can train. Through exploring, we found that we'd need some way to make `(weights * x + bias)` have the same shape as `residuals` to allow them to be added. Thus, we formed
```
reshape * (weights * x + bias) + residuals
```
Therefore, a residual network *fixes* a CNN to be more like how we train neural networks since the above can be simplified to:
```
weights * x + bias
```
Then, to use this new kind of layer, we had to use fully convolutional networks that allow us to have input of any shape. Finally, we looked at a bag of tricks like stems, groups, and bottleneck layers that allow us to train RNNs more efficiently and have many more layers.
At this point, we've covered all the main architectures for training great models with computer vision (CNN and resnet), natural language processing (AWD-LSTM), tabular data (random forests and neural networks), and collaborative filtering (probabilitic matrix factorization and neural networks).
From now on, we'll be looking at the foundations of deep learning and fastai through modifying the mentioned architectures, building better optimization functions than the standard SGD, exactly what PyTorch is doing for us, how to visualize what the model is learning, and a deeper look into what fastai is doing for us through its `Learner` class.
| github_jupyter |
<a href="https://colab.research.google.com/github/VishnuM24/LetsUpgrade-Python/blob/master/Assignment_Day_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# - DAY 2 -
##**QUESTION 1 :**
List & its default methods
```
vowels = [ 'a', 'e' ]
# Method 1 - extend()
vow2 = ['i', 'o', 'u']
vowels.extend( vow2 )
print( vowels )
# Method 2 - append()
vowels.append( 'y' )
print( vowels )
# Method 3 - insert()
vowels.insert( 2, 'x' )
print(vowels)
# Method 4 - pop()
vowels.pop(-1)
print(vowels)
# Method 5 - remove()
vowels.remove('x')
print(vowels)
```
## **Question 2:**
Dictionary & its default methods
```
dailyFood = {
'breakfast' : 'dosa',
'lunch' : 'rice',
'dinner' : 'chapathi'
}
# Method 1 - keys()
keys_list = dailyFood.keys()
print( keys_list )
# Method 2 - items()
dailyfood_list = dailyFood.items()
print( dailyfood_list )
# Method 3 - update()
dailyFood.update( { 'evening_snack' : 'biscuits' } )
print( dailyfood_list )
# Method 4 - get()
food_lunch = dailyFood.get('lunch')
print( food_lunch )
# Method 5 - pop()
dailyFood.pop( 'evening_snack' )
print( dailyFood )
```
## **Question 3:**
Sets & its default methods
```
language_set = { 'c', 'python', 'java' }
# Method 1 - add()
language_set.add( 'PHP' )
print( language_set )
# Method 2 - union()
programming_set = language_set.union( { 'c#', 'cpp', 'Go', 'R' } )
print( programming_set )
# Method 3 - discard()
programming_set.discard( 'PHP' )
print( programming_set )
# Method 4 - issubset()
under_programming = language_set.issubset( programming_set )
print( under_programming )
# Method 5 - update()
programming_set.update( { 'html', 'css', 'javascript' } )
print( programming_set )
```
## **Question 4:**
Tuple & its default methods
```
oct_values = ( '000', '001' , '010' , '011' , '100', '101', '110', '111' )
# Method 1 - count()
repeat_3 = oct_values.count( '011' )
print( repeat_3 )
# Method 2 - index()
indx = oct_values.index( '101' )
print( indx )
oct_values[6]
```
## **Question 5:**
Strings & its default methods
```
intro_template = 'Hi This is <NAME> from <PLACE>'
# Method 1 - replace()
my_template = intro_template.replace( '<NAME>', 'Vishnu' )
my_template = my_template.replace( '<PLACE>', 'Kerala' )
print( my_template )
# Method 2 - split()
words = my_template.split(' ')
print( words )
# Method 3 - find()
x = my_template.find('Vishnu')
print( 'My name starts at position',x )
# Method 4 - upper()
caps_template = my_template.upper()
print( caps_template )
# Method 5 - isnumeric()
isNumber = my_template.isnumeric()
print( isNumber )
temp_string = '2412'
isNo = temp_string.isnumeric()
print( isNo )
```
| github_jupyter |
```
import pandas as pd
import numpy as np
credit_df=pd.read_csv(r'E:\Projects\Movie recommendation System\DataSet\tmdb_5000_credits.csv')
movie_df=pd.read_csv(r'E:\Projects\Movie recommendation System\DataSet\tmdb_5000_movies.csv')
#movie_df.head()
#credit_df.head()
credit_df.columns=['id','title','cast','crew']
movie_df=movie_df.merge(credit_df,on='id')
#movie_df.head()
#movie_df.describe()
#movie_df.info()
from ast import literal_eval
features = ["cast", "crew", "keywords", "genres"]
for feature in features:
movie_df[feature] = movie_df[feature].apply(literal_eval)
#movie_df[features].head(10)
#movie_df['cast'].head()
def get_director(x):
for i in x:
if i["job"] == "Director":
return i["name"]
return np.nan
def get_list(x):
if isinstance(x, list):
names = [i["name"] for i in x]
if len(names) > 3:
names = names[:3]
return names
return []
movie_df['Director']=movie_df['crew'].apply(get_director)
features = ["cast", "keywords", "genres"]
for feature in features:
movie_df[feature] = movie_df[feature].apply(get_list)
movie_df['title']=movie_df['original_title']
#movie_df[['title', 'cast', 'Director', 'keywords', 'genres']].head()
def clean_data(row):
if isinstance(row, list):
return [str.lower(i.replace(" ", "")) for i in row if isinstance(i,str)]
else:
if isinstance(row, str):
return str.lower(row.replace(" ", ""))
else:
return ""
features = ['cast', 'keywords', 'Director', 'genres']
for feature in features:
movie_df[feature] = movie_df[feature].apply(clean_data)
#movie_df['cast'].head()
def create_soup(features):
return ' '.join(features['keywords']) + ' ' + ' '.join(features['cast']) + ' ' + features['Director'] + ' ' + ' '.join(features['genres'])
movie_df['soup']=movie_df.apply(create_soup, axis=1)
#movie_df['soup'].head()
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
count_vectorizer = CountVectorizer(stop_words='english')
count_matrix=count_vectorizer.fit_transform(movie_df['soup'])
#print(count_matrix.shape)
cosine_sim2 = cosine_similarity(count_matrix, count_matrix)
#print(cosine_sim2.shape)
movie_df = movie_df.reset_index()
indices = pd.Series(movie_df.index, index=movie_df["title"]).drop_duplicates()
#print(indices.head())
def get_recommendations(title, cosine_sim):
idx = indices[title]
similarity_scores = list(enumerate(cosine_sim[idx]))
similarity_scores= sorted(similarity_scores, key=lambda x: x[1], reverse=True)
similarity_scores= similarity_scores[1:11]
# (a, b) where a is id of movie, b is similarity_scores
movie_indices = [ind[0] for ind in similarity_scores]
movies = movie_df["title"].iloc[movie_indices]
return movies
#print(get_recommendations("Iron Man 2", cosine_sim2))
Movie_name=str(input('Enter Movie Name'))
get_recommendations(Movie_name, cosine_sim2)
```
| github_jupyter |
# ML Strategy
* Collect more data
* Collect more diverse trainign set
* Train algorithm longer with gradient descetn
* Try adam isntead of gradient descent
* Try bigger networks
* Try smaller networks
* Try dropout
* Add L2 regularizatiรณn
* Network architecture
* Network archicteture
- Activvation
- \# hidden units
# Orthogonalization
For a supervised learning system to do well, you usually need to tune the knobs of your system to make sure that four things hold true.
1. **Fit training set well on cost function** First, is that you usually have to make sure that you're at least doing well on the training set. So performance on the training set needs to pass some acceptability assessment. For some applications, this might mean doing comparably to human level performance. But this will depend on your application, and we'll talk more about comparing to human level performance next week.
2. **Fit dev set well on cost function**
3. **Fit test set well on cost function**
3. **Performs well in real world**
el priemro se soluccion con bigget network, the optiization algorithm
el segundo con regularization o con un bigger traingin set
et tres con bigger dev set
y el cuarto cambiando el dev set o la cost function
The exact details of what's precision and recall don't matter too much for this example. But briefly, the definition of precision is, of the examples that your classifier recognizes as cats,
Play video starting at 1 minute 23 seconds and follow transcript1:23
What percentage actually are cats?
Play video starting at 1 minute 32 seconds and follow transcript1:32
So if classifier A has 95% precision, this means that when classifier A says something is a cat, there's a 95% chance it really is a cat. And recall is, of all the images that really are cats, what percentage were correctly recognized by your classifier? So what percentage of actual cats, Are correctly recognized?
<img align='center' src='images/metric.PNG' width='650'/>
* I often recommend that you set up a single real number evaluation metric for your problem. Let's look at an example.
precision: the examples that your classifier recognizes as cats, What percentage actually are cats
o if classifier A has 95% precision, this means that when classifier A says something is a cat, there's a 95% chance it really is a cat.
recall: of all the images that really are cats, what percentage were correctly recognized by your classifier? So what percentage of actual cats, Are correctly recognized? So if classifier A is 90% recall, this means that of all of the images in, say, your dev sets that really are cats, classifier A accurately pulled out 90% of them.
trade-off between precision and recall
The problem with using precision recall as your evaluation metric is that if classifier A does better on recall, which it does here, the classifier B does better on precision, then you're not sure which classifier is better.
you just have to find a new evaluation metric that combines precision and recall.
In the machine learning literature, the standard way to combine precision and recall is something called an F1 score. Think as average of precision (P) and recall
$$F1 = \frac{2}{\frac{1}{P}+\frac{1}{R}}$$ Harmonic mean of precition P and Recall R
what I recommend in this example is, in addition to tracking your performance in the four different geographies, to also compute the average. And assuming that average performance is a reasonable single real number evaluation metric, by computing the average, you can quickly tell that it looks like algorithm C has a lowest average error.
---
**Satisficing and Optimizing metric**
To summarize, if there are multiple things you care about by say there's one as the optimizing metric that you want to do as well as possible on and one or more as satisficing metrics were you'll be satisfice. Almost it does better than some threshold you can now have an almost automatic way of quickly looking at multiple core size and picking the, quote, best one. Now these evaluation matrix must be evaluated or calculated on a training set or a development set or maybe on the test set. So one of the things you also need to do is set up training, dev or development, as well as test sets. In the next video, I want to share with you some guidelines for how to set up training, dev, and test sets. So let's go on to the next.
cost = accuracy - 0.5 * running time
maximize accuracy but subject
that maximizes accuracy but subject to that the running time, that is the time it takes to classify an image, that that has to be less than or equal to 100 milliseconds.
that running time is what we call a satisficing metric
So in this case accuracy is the optimizing metric and a number of false positives every 24 hours is the satisficing metric
**Train/dev/test distributions**
The way you set up your training dev, or development sets and test sets, can have a huge impact on how rapidly you or your team can make progress on building machine learning application.
* Dev set So, that dev set is also called the development set, or sometimes called the hold out cross validation set. And, workflow in machine learning is that you try a lot of ideas, train up different models on the training set, and then use the dev set to evaluate the different ideas and pick one. And, keep iterating to improve dev set performance until, finally, you have one clause that you're happy with that you then evaluate on your test set.
* choose a dev set and test set to reflect data you expect to get in future and consider important to do well on. And, in particular, the dev set and the test set here, should come from the same distribution. So, whatever type of data you expect to get in the future, and once you do well on, try to get data that looks like that.
## Size of Dev set
* So if you had a hundred examples in total, these 70/30 or 60/20/20 rule of thumb would be pretty reasonable. If you had thousand examples, maybe if you had ten thousand examples, these heuristics are not unreasonable.
* say you have a million training examples. it might be quite reasonable to set up your data so that you have 98% in the training set, 1% dev, and 1% test.
## Size of test set
* Set your test set to be enough to give high confidence in the overall performance of your system.
* Maybe all you need is a train and dev set, And I think, not having a test set might be okay
* I do find it reassuring to have a separate test set you can use to get an unbiased estimate of how I was doing before you shift it, but if you have a very large dev set so that you think you won't overfit the dev set too bad
So to summarize, in the era of big data, I think the old rule of thumb of a 70/30 is that, that no longer applies. And the trend has been to use more data for training and less for dev and test, especially when you have a very large data sets. And the rule of thumb is really to try to set the dev set to big enough for its purpose, which helps you evaluate different ideas and pick this up from AOP better. And the purpose of test set is to help you evaluate your final cost buys. You just have to set your test set big enough for that purpose, and that could be much less than 30% of the data. So, I hope that gives some guidance or some suggestions on how to set up your dev and test sets in the Deep Learning era. Next, it turns out that sometimes, part way through a machine learning problem, you might want to change your evaluation metric, or change your dev and test sets. Let's talk about it when you might want to do that.
---
### When to change dev/test sets and metrics
You've seen how set to have a dev set and evaluation metric is like placing a target somewhere for your team to aim at.
**Orthogonalization for better performance**
1. PLace target
$$Error = \frac{1}{\sum_i w^{(i)}}\sum_i w^{(i)}L\{Y_{pred}^{(i)}, y^{(i)} \}$$
$w^{(i)}$ = 1 if $x^{(i)}$ is non-porn
$w^{(i)}$ = 10 if $x^{(i)}$ is porn
*Orthogonalization for cat picture: anti-porn*
* So far weve only discussed how to define a metric to evaluate classifiers (Place the target)
* Worry separately about how to do wel on this metric
---
Bayes optimal error. Best posible error. That can't never being surpass.
*Why compare to human-level performance*
Humans are quite good a lot of task. So long as ML is worse than human, you can:
* get labeled data from humans
* Gain insight from manual error analysis. Why did a person get this right
* Better analysis of bias/variances
# Avoidable Bias
* o the fact that there's a huge gap between how well your algorithm does on your training set versus how humans do shows that your algorithm isn't even fitting the training set well. So in terms of tools to reduce bias or variance, in this case I would say focus on reducing bias. So you want to do things like train a bigger neural network or run training set longer, just try to do better on the training set
* Avoidable bias: difference between bayes and training error. You don't actually want to get below Bayes error.
* Variance: Difference betweem training error and dev error.
* Focus on either bias or variance reduction techniques
<img align='center' src='images/AvoidableBIAS.PNG' width='650'/>
# Understanding Human Level Performance
**Proxy for Bayes Error**"
So to recap, having an estimate of human-level performance gives you an estimate of Bayes error. And this allows you to more quickly make decisions as to whether you should focus on trying to reduce a bias or trying to reduce the variance of your algorithm.
Suppose:
### Surpassing human-level performance
* once you've surpassed this 0.5% threshold, your options, your ways of making progress on the machine learning problem are just less clear. It doesn't mean you can't make progress, you might still be able to make significant progress, but some of the tools you have for pointing you in a clear direction just don't work as well
*Problems where ML significantly surpasses human -level performance*
* Online advertising
* Product recommendations
* Logistic (predicting transit time)
* Loan approvals
1. All this examples are actually learning from structured data. Where you might have a databse of waht has users clicked on. This are not actual perception problems. These are not computer vision
2. today there are speech recognition systems that can surpass human-level performance. And there are also some computer vision, some image recognition tasks, where computers have surpassed human-level performance
3. Medical
ECGs, skin cancer, narrow radiology task
## Improving your model performance
**supervised learning algorithm to work well**
1. You can fit the training set pretty well
- LOW Avoidable bias
- Problem is solved by training a bigger network or training longer
2. The Training set performance generalizes pretty well to the dev/test set
- Variance
- Problem is solved regularization or getting more training data that could help you generalize better to dev set dat
**Steps.**
1. looking at the difference between your training error and your proxy for Bayes error and just gives you a sense of the avoidable bias. In other words, just how much better do you think you should be trying to do on your training set.
2. And then look at the difference between your dev error and your training error as an estimate of how much of a variance problem you have. In other words, how much harder you should be working to make your performance generalized from the training set to the dev set that it wasn't trained on explicitly.
<img align='left' src='images/ruleofthumb.PNG' width='650'/>
```
[9]*7
```
# Carrying out error analysis
If you're trying to get a learning algorithm to do a task that humans can do. And if your learning algorithm is not yet at the performance of a human. Then manually examining mistakes that your algorithm is making, can give you insights into what to do next. This process is called error analysis.
In machine learning, sometimes we call this the ceiling on performance. Which just means, what's in the best case? How well could working on the dog problem help you?
error analysis, can save you a lot of time. In terms of deciding what's the most important, or what's the most promising direction to focus on.
In this slide, we'll describe using error analysis to evaluate whether or not a single idea, dogs in this case, is worth working on.
*Look at dev examples to evaluate ideas*
Error analysis:
* Get ~100 mislabeled dev set examples
* count up how many are dogs
5/100
"Ceiling" upper bound on how much you could improve performance
In other case 50/100 are dogs, it is worth spending time on the dog problem.
*Evaluate multiple idea in parallel*
Ideas for cat detection:
* Fix pictures of dogs being recognized as cats
* Fix great cats (lions, panther, ) being misrecognized
* Improve performance on blurry images
# Cleaning up incorrectly labeled data
**deep learning algorithms are quite robust to random errors in the training set.**
* They are less robust to systematic errors.
So for example, if your labeler consistently labels white dogs as cats, then that is a problem because your classifier will learn to classify all white colored dogs as cats
**here are a few additional guidelines or principles to consider**
* f you're going in to fix something on the dev set, I would apply the same process to the test set to make sure that they continue to come from the same distribution
* It's super important that your dev and test sets come from the same distribution.
# Build your first system quickly, then iterate
If you're working on a brand new machine learning application, one of the piece of advice I often give people is that, I think you should build your first system quickly and then iterate. your main goal is to build something that works, as opposed to if your main goal is to invent a new machine learning algorithm which is a different goal, then your main goal is to get something that works really well. I'd encourage you to build something quick and dirty. Use that to do bias/variance analysis, use that to do error analysis and use the results of those analysis to help you prioritize where to go next.
* Set up dev/set and metric
* Build initial system quickly
* Use bias/variance analysis & error analysis to prioritize next steps
# Training and testing on different distributions
et. So in this video, you've seen a couple examples of when allowing your training set data to come from a different distribution than your dev and test set allows you to have much more training data. And in these examples, it will cause your learning algorithm to perform better. Now one question you might ask is, should you always use all the data you have? The answer is subtle, it is not always yes.
# Bias and Variance with mismatched data distributions
Previously we had set up some training sets and some dev sets and some test sets as follows. And the dev and test sets have the same distribution, but the training sets will have some different distribution. What we're going to do is randomly shuffle the training sets and then carve out just a piece of the training set to be the training-dev set. So just as the dev and test set have the same distribution, the training set and the training-dev set, also have the same distribution.
**Key QUantities**
- HUman Level error
- Train set error
- Train dev - set error
- Dev error
<img align='center' src='images/biasmismatch.PNG' width='400'/>
**More general formulation**
So what we've seen is that by using training data that can come from a different distribution as a dev and test set, this could give you a lot more data and therefore help the performance of your learning algorithm. But instead of just having bias and variance as two potential problems, you now have this third potential problem, data mismatch. So what if you perform error analysis and conclude that data mismatch is a huge source of error, how do you go about addressing that? It turns out that unfortunately there are super systematic ways to address data mismatch, but there are a few things you can try that could help. Let's take a look at them in the next video.
<img align='center' src='images/data_mismatch.PNG' width='700'/>
# Addressing data mismatch
If your training set comes from a different distribution, than your dev and test set, and if error analysis shows you that you have a data mismatch problem, what can you do?
1. Carry out manual error analysis and try to understand the differences between the training set and the dev/test sets. To avoid overfitting the test set, technically for error analysis, you should manually only look at a dev set and not at the test set
2. try to collect more data similar to your dev and test sets.
So, to summarize, if you think you have a data mismatch problem, I recommend you do error analysis, or look at the training set, or look at the dev set to try this figure out, to try to gain insight into how these two distributions of data might differ. And then see if you can find some ways to get more training data that looks a bit more like your dev set. One of the ways we talked about is artificial data synthesis. And artificial data synthesis does work. In speech recognition, I've seen artificial data synthesis significantly boost the performance of what were already very good speech recognition system. So, it can work very well. But, if you're using artificial data synthesis, just be cautious and bear in mind whether or not you might be accidentally simulating data only from a tiny subset of the space of all possible examples. So, that's it for how to deal with data mismatch.
# Transfer learning
But if you have a lot of data, then maybe you can retrain all the parameters in the network. And if you retrain all the parameters in the neural network, then this initial phase of training on image recognition is sometimes called pre-training, because you're using image recognitions data to pre-initialize or really pre-train the weights of the neural network. And then if you are updating all the weights afterwards, then training on the radiology data sometimes that's called fine tuning.
- Pre-training
- FIne tuning
And the reason this can be helpful is that a lot of the low level features such as detecting edges, detecting curves, detecting positive objects. Learning from that, from a very large image recognition data set, might help your learning algorithm do better in radiology diagnosis. It's just learned a lot about the structure and the nature of how images look like and some of that knowledge will be useful. So having learned to recognize images, it might have learned enough about you know, just what parts of different images look like, that that knowledge about lines, dots, curves, and so on, maybe small parts of objects, that knowledge could help your radiology diagnosis network learn a bit faster or learn with less data
you're transferring from a problem with a lot of data to a problem with relatively little data.
**When transfer learning makes sense**
* Task A and B have the same input X
* You have a lot more data for Task A than Task B
* Low level features from A could be helpful for learning B
# Multi-task learning
So whereas in transfer learning, you have a sequential process where you learn from task A and then transfer that to task B. In multi-task learning, you start off simultaneously, trying to have one neural network do several things at the same time. And then each of these task helps hopefully all of the other task. Let's look at an example.
So to summarize, multi-task learning enables you to train one neural network to do many tasks and this can give you better performance than if you were to do the tasks in isolation. Now one note of caution, in practice I see that transfer learning is used much more often than multi-task learning. So I do see a lot of tasks where if you want to solve a machine learning problem but you have a relatively small data set, then transfer learning can really help. Where if you find a related problem but you have a much bigger data set, you can train in your neural network from there and then transfer it to the problem where we have very low data. So transfer learning is used a lot today. There are some applications of transfer multi-task learning as well, but multi-task learning I think is used much less often than transfer learning. And maybe the one exception is computer vision object detection,
<img align='center' src='images/multi.PNG' width='900'/>
# What is end-to-end deep learning?
Briefly, there have been some data processing systems, or learning systems that require multiple stages of processing. And what end-to-end deep learning does, is it can take all those multiple stages, and replace it usually with just a single neural network.
Example: Face recognition
first crop the face,
Then train ML to recognize the person.
It is not a good approach to train ML to images where people is approaching to the camera
**Pros**
* Let the data speak
* Less hand-designing of components needed
**Cons**
* May need large amount of data
* Excludes potentially useful hand-designed components
## Whether to use end-to-end deep learning
* Use DL to learn individual components
* when applying supervised learning you should carefully choose what types of X to Y mappings you want to learn depending on what task you can get data fo
| github_jupyter |
# Travail Ecrit - Python
* Gymnase du Bugnon, site de l'Ours
* OC informatique
* Sujet : chapitres 1-10 du livre *Pensez en Python*
* Mirko Pirona
* Date : jeudi 13 novembre 2018
## **Exercice : expression arithmรฉtique**
Initialisez les variables `(a, b, c, x)` avec les valeurs `(2, 3, 4, 5)`.
Calculez l'expression
$$y = a x^2 + b x +c$$
et imprimez le rรฉsultat.
```
a=2
b=3
c=4
x=5
y=a*x^2+b*x+c
print(y)
```
## **Exercice : fonction surface**
Importez le module `math`,
dรฉfinissez une fonction `surface(r)` qui calcule $s = \pi r^2$,
affichez avec un texte descriptif le rรฉsultat pour `r=5`
```
import math
def surface(r):
s=math.pi*r^2
print(s, '=', 'la surface de rayon r')
```
## **Exercice : formule quadratique**
La solution d'une formule quadratique de forme
$$ a x^2 + b x +c = 0 $$
dรฉpend du terme $\Delta = b^2 - 4 a c$
* Si $\Delta < 0$ il n'y a pas de solution
* Si $\Delta = 0$ il y a une solution : $x = \frac{-b}{2 a}$
* Si $\Delta > 0$ il y a deux solutions :
$x_1 = \frac{-b +\sqrt\Delta}{2 a}$ and $x_2 = \frac{-b -\sqrt\Delta}{2 a}$
Dรฉfinissez une fonction `quadratique(a, b, c)` qui retourne la solution ร l'รฉquation quadratique dans les 3 cas: `None`, `x`, `[x1, x2]`
.
Montrez la solution pour `quadratique(1, 2, 3)`, `quadratique(1, 2, 1)` et `quadratique(1, 2, -1)`
```
def quadratique(a, b, c):
t=b^2-4a*c
if b^2-4a*c < 0:
print (None)
elif b^2-4a*c = 0:
x=(-b/(2a))
print(x)
else b^2-4*a*c > 0:
q=(-b+(math.sqrt(t)))/(2*a)
s=(-b-(math.sqrt(t)))/(2*a)
print(q, s)
print(quadratique(1, 2, 3))
print(quadratique(1, 2, 1))
print(quadratique(1, 2, -1))
```
## **Exercice : capitalize**
Crรฉez une fonction `capitalize(c)` qui transforme une lettre en majuscule si c'est une minuscule, ou la laisse inchangรฉe autrement.
```
def capitalize(c):
if type (c) ==str:
if c is str(c.upper()):
return
else c is not str(c.upper()):
a=c.upper()
print(a)
capitalize('a'), capitalize('B'), capitalize('3')
```
## **Exercice : capitalize words**
Crรฉez une fonction `capitalize_words(s)` qui transforme la premiรจre lettre de tous les mots en majuscule.
```
def capitalize_words(s):
for i in s:
x=s.upper()
print(x)
capitalize_words('hello world, how are you?')
```
## **Exercice : tranches**
Expliquez ce que font les 6 opรฉrateurs de **tranches** ci-dessous.
```
s=['a', 'b', 'c', 'd', 'e',]
s[::2]
s[:2]
s[::-1]
# s[2] - Cela affiche le troisiรจme รฉlรฉment d'une liste
# s[:2] - Cela affiche le 2 premier รฉlรฉment d'une liste
# s[::2] - Cela prend un รฉlรฉment sur 2 d'une liste et l'affiche
# s[-1] -Cela prend le dernier รฉlรฉment d'une liste
# s[:-1] -Cela prend tout sauf le dernier รฉlรฉment
# s[::-1] -Cela affiche la liste ร l'envers
```
## **Exercice : longueur spรฉcifique**
Le fichier `words.text` contient 58110 mots en anglais.
Affichez les $n=5$ premiers mots qui ont une longueur de $m=10$ et affichez leur nombre total.
```
fin = open('words.txt')
for n in fin:
if len(n) < 10:
break
else len(n) >10:
print(n)
n = 5
m = 10
```
## **Exercice : rรฉpรฉtition**.
Affchez les $m=5$ premiers mots qui sont composรฉ de deux parties rรฉpรฉtรฉes (par exemple **bonbon**).
```
fin = open('words.txt')
for m in fin:
if m[0:((len(m)/2)-1)] == m[((len(m)/2)-1):(len(m)-1)]
print(i)
m = 5
```
## **Exercice : minimum**
Crรฉez une fonction `min(L)` qui retourne le minimum d'une liste et l'index de sa position sous forme de list `[val, pos]`
.
```
def min(L):
x=len(L)
print(L[-1], x)
L = [1, 3, 34, -4, -2, 100]
min(L)
```
## **Exercice : moyenne**
Ecrivez une fonction `mean(L)` qui retourne la moyenne d'une liste.
.
```
def mean(L):
x=sum(L)
print(x/2)
L = [1, 3, 34, -4, -2, 100]
mean(L)
```
| github_jupyter |
# Pyomo Examples
## Example 19.3: Linear Programming Refinery
```
import pandas as pd
PRODUCTS = ['Gasoline', 'Kerosine', 'Fuel Oil', 'Residual']
FEEDS = ['Crude 1', 'Crude 2']
products = pd.DataFrame(index=PRODUCTS)
products['Price'] = [72, 48, 42, 20]
products['Max Production'] = [24000, 2000, 6000, 100000]
crudes = pd.DataFrame(index=FEEDS)
crudes['Processing Cost'] = [1.00, 2.00]
crudes['Feed Costs'] = [48, 30]
yields = pd.DataFrame(index=PRODUCTS)
yields['Crude 1'] = [0.80, 0.05, 0.10, 0.05]
yields['Crude 2'] = [0.44, 0.10, 0.36, 0.10]
print('\n', products)
print('\n', crudes)
print('\n', yields)
price = {}
price['Gasoline'] = 72
price['Kerosine'] = 48
price
products.loc['Gasoline','Price']
# model formulation
model = ConcreteModel()
# variables
model.x = Var(FEEDS, domain=NonNegativeReals)
model.y = Var(PRODUCTS, domain=NonNegativeReals)
# objective
income = sum(products.ix[p, 'Price'] * model.y[p] for p in PRODUCTS)
raw_materials_cost = sum(crudes.ix[f,'Feed Costs'] * model.x[f] for f in FEEDS)
processing_cost = sum(processing_costs[f] * model.x[f] for f in FEEDS)
profit = income - raw_materials_cost - processing_cost
model.objective = Objective(expr = profit, sense=maximize)
# constraints
model.constraints = ConstraintList()
for p in PRODUCTS:
model.constraints.add(0 <= model.y[p] <= max_production[p])
for p in PRODUCTS:
model.constraints.add(model.y[p] == sum(model.x[f] * product_yield[(f,p)] for f in FEEDS))
solver = SolverFactory('glpk')
solver.solve(model)
model.pprint()
from pyomo.environ import *
import numpy as np
# problem data
FEEDS = ['Crude #1', 'Crude #2']
PRODUCTS = ['Gasoline', 'Kerosine', 'Fuel Oil', 'Residual']
# feed costs
feed_costs = {'Crude #1': 48,
'Crude #2': 30}
# processing costs
processing_costs = {'Crude #1': 1.00,
'Crude #2': 2.00}
# yield data
product_yield =
product_yield = {('Crude #1', 'Gasoline'): 0.80,
('Crude #1', 'Kerosine'): 0.05,
('Crude #1', 'Fuel Oil'): 0.10,
('Crude #1', 'Residual'): 0.05,
('Crude #2', 'Gasoline'): 0.44,
('Crude #2', 'Kerosine'): 0.10,
('Crude #2', 'Fuel Oil'): 0.36,
('Crude #2', 'Residual'): 0.10}
# product sales prices
sales_price = {'Gasoline': 72,
'Kerosine': 48,
'Fuel Oil': 42,
'Residual': 20}
# production limits
max_production = {'Gasoline': 24000,
'Kerosine': 2000,
'Fuel Oil': 6000,
'Residual': 100000}
# model formulation
model = ConcreteModel()
# variables
model.x = Var(FEEDS, domain=NonNegativeReals)
model.y = Var(PRODUCTS, domain=NonNegativeReals)
# objective
income = sum(sales_price[p] * model.y[p] for p in PRODUCTS)
raw_materials_cost = sum(feed_costs[f] * model.x[f] for f in FEEDS)
processing_cost = sum(processing_costs[f] * model.x[f] for f in FEEDS)
profit = income - raw_materials_cost - processing_cost
model.objective = Objective(expr = profit, sense=maximize)
# constraints
model.constraints = ConstraintList()
for p in PRODUCTS:
model.constraints.add(0 <= model.y[p] <= max_production[p])
for p in PRODUCTS:
model.constraints.add(model.y[p] == sum(model.x[f] * product_yield[(f,p)] for f in FEEDS))
solver = SolverFactory('glpk')
solver.solve(model)
model.pprint()
profit()
income()
raw_materials_cost()
processing_cost()
```
## Example: Making Change
One of the important modeling features of Pyomo is the ability to index variables and constraints. The
```
from pyomo.environ import *
def make_change(amount, coins):
model = ConcreteModel()
model.C = Set(initialize=coins.keys())
model.x = Var(model.C, domain=NonNegativeIntegers)
model.n = Objective(expr = sum(model.x[c] for c in model.C), sense=minimize)
model.v = Constraint(expr = sum(model.x[c]*coins[c] for c in model.C) == amount)
SolverFactory('glpk').solve(model)
return {c: int(model.x[c]()) for c in model.C}
# problem data
coins = {
'penny' : 1,
'nickel' : 5,
'dime' : 10,
'quarter': 25,
}
make_change(93, coins)
```
## Example: Linear Production Model with Constraints with Duals
```
from pyomo.environ import *
model = ConcreteModel()
# for access to dual solution for constraints
model.dual = Suffix(direction=Suffix.IMPORT)
# declare decision variables
model.x = Var(domain=NonNegativeReals)
model.y = Var(domain=NonNegativeReals)
# declare objective
model.profit = Objective(expr = 40*model.x + 30*model.y, sense = maximize)
# declare constraints
model.demand = Constraint(expr = model.x <= 40)
model.laborA = Constraint(expr = model.x + model.y <= 80)
model.laborB = Constraint(expr = 2*model.x + model.y <= 100)
# solve
SolverFactory('glpk').solve(model).write()
str = " {0:7.2f} {1:7.2f} {2:7.2f} {3:7.2f}"
print("Constraint value lslack uslack dual")
for c in [model.demand, model.laborA, model.laborB]:
print(c, str.format(c(), c.lslack(), c.uslack(), model.dual[c]))
```
| github_jupyter |
```
%run homework_modules.ipynb
import torch
from torch.autograd import Variable
import numpy
import unittest
class TestLayers(unittest.TestCase):
def test_Linear(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in, n_out = 2, 3, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.Linear(n_in, n_out)
custom_layer = Linear(n_in, n_out)
custom_layer.W = torch_layer.weight.data.numpy()
custom_layer.b = torch_layer.bias.data.numpy()
layer_input = np.random.uniform(-10, 10, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-10, 10, (batch_size, n_out)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
# 3. check layer parameters grad
custom_layer.accGradParameters(layer_input, next_layer_grad)
weight_grad = custom_layer.gradW
bias_grad = custom_layer.gradb
torch_weight_grad = torch_layer.weight.grad.data.numpy()
torch_bias_grad = torch_layer.bias.grad.data.numpy()
self.assertTrue(np.allclose(torch_weight_grad, weight_grad, atol=1e-6))
self.assertTrue(np.allclose(torch_bias_grad, bias_grad, atol=1e-6))
def test_SoftMax(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.Softmax(dim=1)
custom_layer = SoftMax()
layer_input = np.random.uniform(-10, 10, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.random((batch_size, n_in)).astype(np.float32)
next_layer_grad /= next_layer_grad.sum(axis=-1, keepdims=True)
next_layer_grad = next_layer_grad.clip(1e-5,1.)
next_layer_grad = 1. / next_layer_grad
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-5))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-5))
def test_LogSoftMax(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.LogSoftmax(dim=1)
custom_layer = LogSoftMax()
layer_input = np.random.uniform(-10, 10, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.random((batch_size, n_in)).astype(np.float32)
next_layer_grad /= next_layer_grad.sum(axis=-1, keepdims=True)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_BatchNormalization(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 32, 16
for _ in range(100):
# layers initialization
slope = np.random.uniform(0.01, 0.05)
alpha = 0.9
custom_layer = BatchNormalization(alpha)
custom_layer.train()
torch_layer = torch.nn.BatchNorm1d(n_in, eps=custom_layer.EPS, momentum=1.-alpha, affine=False)
custom_layer.moving_mean = torch_layer.running_mean.numpy().copy()
custom_layer.moving_variance = torch_layer.running_var.numpy().copy()
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
# please, don't increase `atol` parameter, it's garanteed that you can implement batch norm layer
# with tolerance 1e-5
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-5))
# 3. check moving mean
self.assertTrue(np.allclose(custom_layer.moving_mean, torch_layer.running_mean.numpy()))
# we don't check moving_variance because pytorch uses slightly different formula for it:
# it computes moving average for unbiased variance (i.e var*N/(N-1))
#self.assertTrue(np.allclose(custom_layer.moving_variance, torch_layer.running_var.numpy()))
# 4. check evaluation mode
custom_layer.moving_variance = torch_layer.running_var.numpy().copy()
custom_layer.evaluate()
custom_layer_output = custom_layer.updateOutput(layer_input)
torch_layer.eval()
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
def test_Sequential(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
alpha = 0.9
torch_layer = torch.nn.BatchNorm1d(n_in, eps=BatchNormalization.EPS, momentum=1.-alpha, affine=True)
torch_layer.bias.data = torch.from_numpy(np.random.random(n_in).astype(np.float32))
custom_layer = Sequential()
bn_layer = BatchNormalization(alpha)
bn_layer.moving_mean = torch_layer.running_mean.numpy().copy()
bn_layer.moving_variance = torch_layer.running_var.numpy().copy()
custom_layer.add(bn_layer)
scaling_layer = ChannelwiseScaling(n_in)
scaling_layer.gamma = torch_layer.weight.data.numpy()
scaling_layer.beta = torch_layer.bias.data.numpy()
custom_layer.add(scaling_layer)
custom_layer.train()
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.backward(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-5))
# 3. check layer parameters grad
weight_grad, bias_grad = custom_layer.getGradParameters()[1]
torch_weight_grad = torch_layer.weight.grad.data.numpy()
torch_bias_grad = torch_layer.bias.grad.data.numpy()
self.assertTrue(np.allclose(torch_weight_grad, weight_grad, atol=1e-6))
self.assertTrue(np.allclose(torch_bias_grad, bias_grad, atol=1e-6))
def test_Dropout(self):
np.random.seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
p = np.random.uniform(0.3, 0.7)
layer = Dropout(p)
layer.train()
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
layer_output = layer.updateOutput(layer_input)
self.assertTrue(np.all(np.logical_or(np.isclose(layer_output, 0),
np.isclose(layer_output*(1.-p), layer_input))))
# 2. check layer input grad
layer_grad = layer.updateGradInput(layer_input, next_layer_grad)
self.assertTrue(np.all(np.logical_or(np.isclose(layer_grad, 0),
np.isclose(layer_grad*(1.-p), next_layer_grad))))
# 3. check evaluation mode
layer.evaluate()
layer_output = layer.updateOutput(layer_input)
self.assertTrue(np.allclose(layer_output, layer_input))
# 4. check mask
p = 0.0
layer = Dropout(p)
layer.train()
layer_output = layer.updateOutput(layer_input)
self.assertTrue(np.allclose(layer_output, layer_input))
p = 0.5
layer = Dropout(p)
layer.train()
layer_input = np.random.uniform(5, 10, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(5, 10, (batch_size, n_in)).astype(np.float32)
layer_output = layer.updateOutput(layer_input)
zeroed_elem_mask = np.isclose(layer_output, 0)
layer_grad = layer.updateGradInput(layer_input, next_layer_grad)
self.assertTrue(np.all(zeroed_elem_mask == np.isclose(layer_grad, 0)))
# 5. dropout mask should be generated independently for every input matrix element, not for row/column
batch_size, n_in = 1000, 1
p = 0.8
layer = Dropout(p)
layer.train()
layer_input = np.random.uniform(5, 10, (batch_size, n_in)).astype(np.float32)
layer_output = layer.updateOutput(layer_input)
self.assertTrue(np.sum(np.isclose(layer_output, 0)) != layer_input.size)
layer_input = layer_input.T
layer_output = layer.updateOutput(layer_input)
self.assertTrue(np.sum(np.isclose(layer_output, 0)) != layer_input.size)
def test_LeakyReLU(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
slope = np.random.uniform(0.01, 0.05)
torch_layer = torch.nn.LeakyReLU(slope)
custom_layer = LeakyReLU(slope)
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_ELU(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
alpha = 1.0
torch_layer = torch.nn.ELU(alpha)
custom_layer = ELU(alpha)
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_SoftPlus(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.Softplus()
custom_layer = SoftPlus()
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
next_layer_grad = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_ClassNLLCriterionUnstable(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.NLLLoss()
custom_layer = ClassNLLCriterionUnstable()
layer_input = np.random.uniform(0, 1, (batch_size, n_in)).astype(np.float32)
layer_input /= layer_input.sum(axis=-1, keepdims=True)
layer_input = layer_input.clip(custom_layer.EPS, 1. - custom_layer.EPS) # unifies input
target_labels = np.random.choice(n_in, batch_size)
target = np.zeros((batch_size, n_in), np.float32)
target[np.arange(batch_size), target_labels] = 1 # one-hot encoding
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input, target)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(torch.log(layer_input_var),
Variable(torch.from_numpy(target_labels), requires_grad=False))
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, target)
torch_layer_output_var.backward()
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_ClassNLLCriterion(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 4
for _ in range(100):
# layers initialization
torch_layer = torch.nn.NLLLoss()
custom_layer = ClassNLLCriterion()
layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(np.float32)
layer_input = torch.nn.LogSoftmax(dim=1)(Variable(torch.from_numpy(layer_input))).data.numpy()
target_labels = np.random.choice(n_in, batch_size)
target = np.zeros((batch_size, n_in), np.float32)
target[np.arange(batch_size), target_labels] = 1 # one-hot encoding
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input, target)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var,
Variable(torch.from_numpy(target_labels), requires_grad=False))
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, target)
torch_layer_output_var.backward()
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
def test_adam_optimizer(self):
state = {}
config = {'learning_rate': 1e-3, 'beta1': 0.9, 'beta2':0.999, 'epsilon':1e-8}
variables = [[np.arange(10).astype(np.float64)]]
gradients = [[np.arange(10).astype(np.float64)]]
adam_optimizer(variables, gradients, config, state)
self.assertTrue(np.allclose(state['m'][0], np.array([0. , 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9])))
self.assertTrue(np.allclose(state['v'][0], np.array([0., 0.001, 0.004, 0.009, 0.016, 0.025,
0.036, 0.049, 0.064, 0.081])))
self.assertTrue(state['t'] == 1)
self.assertTrue(np.allclose(variables[0][0], np.array([0., 0.999, 1.999, 2.999, 3.999, 4.999,
5.999, 6.999, 7.999, 8.999])))
adam_optimizer(variables, gradients, config, state)
self.assertTrue(np.allclose(state['m'][0], np.array([0., 0.19, 0.38, 0.57, 0.76, 0.95, 1.14,
1.33, 1.52, 1.71])))
self.assertTrue(np.allclose(state['v'][0], np.array([0., 0.001999, 0.007996, 0.017991,
0.031984, 0.049975, 0.071964, 0.097951,
0.127936, 0.161919])))
self.assertTrue(state['t'] == 2)
self.assertTrue(np.allclose(variables[0][0], np.array([0., 0.998, 1.998, 2.998, 3.998, 4.998,
5.998, 6.998, 7.998, 8.998])))
suite = unittest.TestLoader().loadTestsFromTestCase(TestLayers)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestAdvancedLayers(unittest.TestCase):
def test_Conv2d(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in, n_out = 2, 3, 4
h,w = 5,6
kern_size = 3
for _ in range(100):
# layers initialization
torch_layer = torch.nn.Conv2d(n_in, n_out, kern_size, padding=1)
custom_layer = Conv2d(n_in, n_out, kern_size)
custom_layer.W = torch_layer.weight.data.numpy() # [n_out, n_in, kern, kern]
custom_layer.b = torch_layer.bias.data.numpy()
layer_input = np.random.uniform(-1, 1, (batch_size, n_in, h,w)).astype(np.float32)
next_layer_grad = np.random.uniform(-1, 1, (batch_size, n_out, h, w)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
# 3. check layer parameters grad
custom_layer.accGradParameters(layer_input, next_layer_grad)
weight_grad = custom_layer.gradW
bias_grad = custom_layer.gradb
torch_weight_grad = torch_layer.weight.grad.data.numpy()
torch_bias_grad = torch_layer.bias.grad.data.numpy()
#m = ~np.isclose(torch_weight_grad, weight_grad, atol=1e-5)
self.assertTrue(np.allclose(torch_weight_grad, weight_grad, atol=1e-6, ))
self.assertTrue(np.allclose(torch_bias_grad, bias_grad, atol=1e-6))
def test_MaxPool2d(self):
np.random.seed(42)
torch.manual_seed(42)
batch_size, n_in = 2, 3
h,w = 4,6
kern_size = 2
for _ in range(100):
# layers initialization
torch_layer = torch.nn.MaxPool2d(kern_size)
custom_layer = MaxPool2d(kern_size)
layer_input = np.random.uniform(-10, 10, (batch_size, n_in, h,w)).astype(np.float32)
next_layer_grad = np.random.uniform(-10, 10, (batch_size, n_in,
h // kern_size, w // kern_size)).astype(np.float32)
# 1. check layer output
custom_layer_output = custom_layer.updateOutput(layer_input)
layer_input_var = Variable(torch.from_numpy(layer_input), requires_grad=True)
torch_layer_output_var = torch_layer(layer_input_var)
self.assertTrue(np.allclose(torch_layer_output_var.data.numpy(), custom_layer_output, atol=1e-6))
# 2. check layer input grad
custom_layer_grad = custom_layer.updateGradInput(layer_input, next_layer_grad)
torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
torch_layer_grad_var = layer_input_var.grad
self.assertTrue(np.allclose(torch_layer_grad_var.data.numpy(), custom_layer_grad, atol=1e-6))
suite = unittest.TestLoader().loadTestsFromTestCase(TestAdvancedLayers)
unittest.TextTestRunner(verbosity=2).run(suite)
```
| github_jupyter |
```
# Visualization of the KO+ChIP Gold Standard from:
# Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells"
# TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load
# NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full
# network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold"
# You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by")
# Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels
# Change "SVG" to "canvas" to speed up layout operations
# More info about jp_gene_viz and user interface instructions are available on Github:
# https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb
# directory containing gene expression data and network folder
directory = "."
# folder containing networks
netPath = 'Networks'
# network file name
networkFile = 'ChIP_A17_KOall_bias100_TFmRNA_sp.tsv'
# title for network figure
netTitle = 'ChIP/ATAC(Th17)+KO, bias = 100_TFmRNA, TFA = TF mRNA'
# name of gene expression file
expressionFile = 'Th0_Th17_48hTh.txt'
# column of gene expression file to color network nodes
rnaSampleOfInt = 'Th17(48h)'
# edge cutoff -- for Inferelator TRNs, corresponds to signed quantile (rank of edges in 15 TFs / gene models),
# increase from 0 --> 1 to get more significant edges (e.g., .33 would correspond to edges only in 10 TFs / gene
# models)
edgeCutoff = .93
import sys
if ".." not in sys.path:
sys.path.append("..")
from jp_gene_viz import dNetwork
dNetwork.load_javascript_support()
# from jp_gene_viz import multiple_network
from jp_gene_viz import LExpression
LExpression.load_javascript_support()
# Load network linked to gene expression data
L = LExpression.LinkedExpressionNetwork()
L.show()
# Load Network and Heatmap
L.load_network(directory + '/' + netPath + '/' + networkFile)
L.load_heatmap(directory + '/' + expressionFile)
N = L.network
N.set_title(netTitle)
N.threshhold_slider.value = edgeCutoff
N.apply_click(None)
N.draw()
# Add labels to nodes
N.labels_button.value=True
# Limit to TFs only, remove unconnected TFs, choose and set network layout
N.restore_click()
N.tf_only_click()
N.connected_only_click()
N.layout_dropdown.value = 'fruchterman_reingold'
N.layout_click()
# Interact with Heatmap
# Limit genes in heatmap to network genes
L.gene_click(None)
# Z-score heatmap values
L.expression.transform_dropdown.value = 'Z score'
L.expression.apply_transform()
# Choose a column in the heatmap (e.g., 48h Th17) to color nodes
L.expression.col = rnaSampleOfInt
L.condition_click(None)
# Switch SVG layout to get line colors, then switch back to faster canvas mode
N.force_svg(None)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
```
### Data Awal
Data berasal dari dataset DARPA 2000. DDoS attack terjadi pada 2 jam 6 menit 15 detik setelah trafik mulai direkam
```
df = pd.read_csv("../data/darpa2000-all.csv")
del df["Info"]
del df["Length"]
df.head()
#df.info()
df["T"] = pd.to_timedelta(df["Time"], unit='s')
df = df.set_index("T")
# hitung source unique
unique_source_count = df[['Source']].resample('3s').nunique()
# Hitung dest unique
unique_destination_count = df[['Destination']].resample('3s').nunique()
# Hitung proto unique
unique_protocol_count = df[['Protocol']].resample('3s').nunique()
# Hitung jumlah packet
packets_count = df[['No.']].resample('3s').count()
```
### Parameter Algoritma
- A1 = Number of packets
- A2 = Number of Unique Source IP
- A3 = (A2) divided by (Number of Unique Dest. IP)
- A4 = (A2) divided by (Number of Unique Protocol)
- Beta
- T = time interval (resampling)
- K = Window Size
```
df[['Source']].resample('3s').nunique().to_csv('USIP.csv')
df[['Destination']].resample('3s').nunique().to_csv('UDIP.csv')
df[['Protocol']].resample('3s').nunique().to_csv('UPR.csv')
df[['No.']].resample('3s').nunique().to_csv('Packets.csv')
```
### Plot Data setelah resampling
```
# Make figure with 4 rows of plots
#fig, plots = plt.subplots(4, 1)
#fig.suptitle('DARPA 2000 Internal Tuesday, 7 March 2000, from 9:25 AM to 12:35 PM', fontsize=16)
plt.plot(unique_source_count.index.seconds, unique_source_count.Source)
plt.title("Unique Source IP Address Count every 3 seconds")
plt.xlabel("Time (seconds)")
plt.ylabel("Unique Source IP Count")
plt.show()
plt.plot(unique_destination_count.index.seconds, unique_destination_count.Destination)
plt.title("Unique Destination IP Address Count every 3 seconds")
plt.xlabel("Time (seconds)")
plt.ylabel("Unique Destination IP Count")
plt.show()
plt.plot(unique_protocol_count.index.seconds, unique_protocol_count.Protocol)
plt.title("Unique Protocol Count every 3 seconds")
plt.xlabel("Time (seconds)")
plt.ylabel("Unique Source IP Count")
plt.show()
plt.plot(packets_count.index.seconds, packets_count['No.'])
plt.title("Number of packets every 3 seconds")
plt.xlabel("Time (seconds)")
plt.ylabel("Number of packets")
plt.show()
#fig.tight_layout()
```
### Plot A1, A2, A3, A4
```
##ye = packets_count.iloc[2426:2526, 0].to_list()
##print(ye)
#print(packets_count.loc[packets_count['No.'].idxmax()])
# data => 38215
# second => 7575 => 2525
# Timedelta => 0 days 02:06:15
A1_all = packets_count
A2_all = unique_source_count
A3_all = unique_source_count['Source'].divide(unique_destination_count['Destination']).to_frame()
A4_all = unique_source_count['Source'].divide(unique_protocol_count['Protocol']).to_frame()
plt.plot(A1_all, '*')
#plt.plot(A1_all.iloc[:, 0].to_list())
plt.title("A1 value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("A1")
plt.show()
plt.plot(A2_all, '*')
#plt.plot(A2_all.iloc[:, 0].to_list())
plt.title("A2 value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("A2")
plt.show()
plt.plot(A3_all, '*')
#plt.plot(A3_all.iloc[:, 0].to_list())
plt.title("A3 value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("A3")
plt.show()
plt.plot(A4_all, '*')
#plt.plot(A4_all.iloc[:, 0].to_list())
plt.title("A4 value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("A4")
plt.show()
A_all = (A1_all, A2_all, A3_all, A4_all)
Threshold_all = []
N_all = []
beta_all = []
j_to_end = range(len(A1_all.iloc[:, 0].to_list()))
for A in A_all:
# Init values
K = 1
beta = 1.5
j = 0
T = 3
current_threshold_array = list()
current_N_array = list()
current_beta_array = list()
current_moving_A = A.iloc[j: j+K, 0].to_list()
current_moving_mean = np.mean(current_moving_A)
current_moving_variance = np.var(current_moving_A)
current_threshold = (current_moving_mean + current_moving_variance) * beta
current_threshold_array.append([j, current_threshold])
current_beta_array.append([j, beta])
#while j <= K*T-1 and j < len(A1_all.iloc[:, 0].to_list()):
while j < len(A1_all.iloc[:, 0].to_list()):
if j < 0 and j % K*T == 0:
beta = 1.5
current_moving_A = A.iloc[j: j+K, 0].to_list()
current_moving_mean = np.mean(current_moving_A)
current_moving_variance = np.var(current_moving_A)
current_threshold = (current_moving_mean + current_moving_variance) * beta
current_threshold_array.append([j, current_threshold])
current_beta_array.append([j, beta])
else:
if A.iloc[j:j+1, 0].to_list()[0] > current_threshold:
current_N_array.append([j, True])
else:
current_N_array.append([j, False])
j = j + 1
current_j = j
previous_j = j - 1
previous_moving_mean = np.mean(A.iloc[previous_j: previous_j+K, 0].to_list())
current_moving_mean = np.mean(A.iloc[current_j: current_j+K, 0].to_list())
if current_moving_mean > 2 * previous_moving_mean:
beta = beta + 0.5
current_threshold = (current_moving_mean + np.var(A.iloc[current_j: current_j+K, 0].to_list())) * beta
else:
beta = beta - 0.5
if beta < 1.0:
beta = 1
current_threshold = (current_moving_mean + np.var(A.iloc[current_j: current_j+K, 0].to_list())) / beta
current_threshold_array.append([current_j, current_threshold])
current_beta_array.append([j, beta])
Threshold_all.append(current_threshold_array)
N_all.append(current_N_array)
beta_all.append(current_beta_array)
#print(beta_all)
#print(Threshold_all)
#print(N_all)
```
### Nilai Threshold berubah tiap waktu
```
beta1y = list()
beta2y = list()
beta3y = list()
beta4y = list()
for betaXY in Threshold_all[0]:
beta1y.append(betaXY[1])
for betaXY in Threshold_all[1]:
beta2y.append(betaXY[1])
for betaXY in Threshold_all[2]:
beta3y.append(betaXY[1])
for betaXY in Threshold_all[3]:
beta4y.append(betaXY[1])
plt.plot(beta1y, '*')
#plt.plot(A1_all.iloc[:, 0].to_list())
plt.title("A1 Threshold value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Threshold value")
plt.show()
plt.plot(beta2y, '*')
#plt.plot(A2_all.iloc[:, 0].to_list())
plt.title("A2 Threshold value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Threshold value")
plt.show()
plt.plot(beta3y, '*')
#plt.plot(A3_all.iloc[:, 0].to_list())
plt.title("A3 Threshold value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Threshold value")
plt.show()
plt.plot(beta4y, '*')
#plt.plot(A4_all.iloc[:, 0].to_list())
plt.title("A4 Threshold value over time")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Threshold value")
plt.show()
```
### Nature of Network tiap waktu
Jika 1 maka deteksi positif, jika 0 maka normal
```
beta1y = list()
beta2y = list()
beta3y = list()
beta4y = list()
for betaXY in N_all[0]:
beta1y.append(betaXY[1])
for betaXY in N_all[1]:
beta2y.append(betaXY[1])
for betaXY in N_all[2]:
beta3y.append(betaXY[1])
for betaXY in N_all[3]:
beta4y.append(betaXY[1])
plt.plot(beta1y, '*')
#plt.plot(A1_all.iloc[:, 0].to_list())
plt.title("A1 Network Nature")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Nature of Network")
plt.show()
plt.plot(beta2y, '*')
#plt.plot(A2_all.iloc[:, 0].to_list())
plt.title("A2 Network Nature")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Nature of Network")
plt.show()
plt.plot(beta3y, '*')
#plt.plot(A3_all.iloc[:, 0].to_list())
plt.title("A3 Network Nature")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Nature of Network")
plt.show()
plt.plot(beta4y, '*')
#plt.plot(A4_all.iloc[:, 0].to_list())
plt.title("A4 Network Nature")
plt.xlabel("Time (3secs interval)")
plt.ylabel("Nature of Network")
plt.show()
N_for_testing = []
for idx in j_to_end:
if N_all[0][idx][1] is True and N_all[1][idx][1] is True and N_all[2][idx][1] is True and N_all[3][idx][1] is True:
N_for_testing.append(1)
else:
N_for_testing.append(0)
A1_all['real'] = 0
A1_all.head()
pckts = packets_count.iloc[2525:2528, 0].index
pckts
A1_all.at['0 days 02:06:15', 'real'] = 1
A1_all.at['0 days 02:06:18', 'real'] = 1
A1_all.at['0 days 02:06:21', 'real'] = 1
y_real = A1_all.iloc[:, 1].to_list()
y_pred = N_for_testing
def perf_measure(y_actual, y_hat):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1:
TP += 1
if y_hat[i]==1 and y_actual[i]!=y_hat[i]:
FP += 1
if y_actual[i]==y_hat[i]==0:
TN += 1
if y_hat[i]==0 and y_actual[i]!=y_hat[i]:
FN += 1
return TP, FP, TN, FN
def sensitivity(true_positive, false_negative):
return true_positive / (true_positive + false_negative)
def accuracy(true_positive, false_positive, true_negative, false_negative):
return (true_positive + true_negative) / (true_positive + false_positive + true_negative + false_negative)
def specificity(true_negative, false_positive):
return true_negative / (true_negative + false_positive)
def precision(true_positive, false_positive):
return true_positive / (true_positive + false_positive)
tp, fp, tn, fn = perf_measure(y_real, y_pred)
print(
"\ntrue positive: {}".format(tp),
"\nfalse positive: {}".format(fp),
"\ntrue negative: {}".format(tn),
"\nfalse negative: {}".format(fn)
)
print("\n")
print("Sensitivity (TPR): {}".format(sensitivity(tp, fn)))
print("Accuracy: {}".format(accuracy(tp, fp, tn, fn)))
print("Specificity (TNR): {}".format(specificity(tn, fp)))
print("Precision: {}".format(precision(tp, fp)))
david_j = [98, 99.5, 99.6]
mine = [33.33, 99.94, 100]
generated_result = [50, 89.15, 90.12]
index = np.arange(3)
bar_width = 0.3
fig, ax = plt.subplots()
original = ax.bar(index, david_j, bar_width,
label="Original (DARPA 2000)")
implementation = ax.bar(index+bar_width, mine,
bar_width, label="Implementation (DARPA 2000)")
generated = ax.bar(index+2*bar_width, generated_result, bar_width, label="Implementation (Generated Dataset)")
ax.set_xlabel('Evaluation Result')
ax.set_ylabel('Percentage')
ax.set_title('Evaluation Result Comparison')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(["TPR", "Accuracy", "TNR"])
ax.legend()
plt.show()
```
| github_jupyter |
# <center> #DHBSI 2016: Computational Text Analysis </center>
## <center> Laura Nelson <br/> <em>Postdoctoral Fellow | Digital Humanities @ Berkeley | Berkeley Institute for Data Science </em> </center>
## <center> Teddy Roland <br/> <em> Coordinator, Digital Humanities @ Berkeley <br/> Lecturer, UC Berkeley </em> </center>
# <center> Summary </center>
## <center> Text Analysis Demystified </center>
### <center> It's Just Counting! <br/> </center>

## <center> The Dark Side of DH: An Invitation

## <center> Text Analysis in Research </center>

## <center> Lessons </center>
### <center> Our workshop included 5 days and 7 lessons to learn how counting, sometimes creative counting, can amplify and augment close readings of text </center>
# Lesson 1: Introduction to Natural Language Processing
```
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
import string
punctuations = list(string.punctuation)
#read the two text files from your hard drive, assign first mystery text to variable 'text1' and second mystery text to variable 'text2'
text1 = open('../01-Intro-to-NLP/text1.txt').read()
text2 = open('../01-Intro-to-NLP/text2.txt').read()
###word frequencies
#tokenize texts
text1_tokens = word_tokenize(text1)
text2_tokens = word_tokenize(text2)
#pre-process for word frequency
#lowercase
text1_tokens_lc = [word.lower() for word in text1_tokens]
text2_tokens_lc = [word.lower() for word in text2_tokens]
#remove stopwords
text1_tokens_clean = [word for word in text1_tokens_lc if word not in stopwords.words('english')]
text2_tokens_clean = [word for word in text2_tokens_lc if word not in stopwords.words('english')]
#remove punctuation using the list of punctuation from the string pacage
text1_tokens_clean = [word for word in text1_tokens_clean if word not in punctuations]
text2_tokens_clean = [word for word in text2_tokens_clean if word not in punctuations]
#frequency distribution
text1_word_frequency = nltk.FreqDist(text1_tokens_clean)
text2_word_frequency = nltk.FreqDist(text2_tokens_clean)
print("Frequent Words for Text1")
print("________________________")
for word in text1_word_frequency.most_common(20):
print(word[0])
print()
print("Frequent Words for Text2")
print("________________________")
for word in text2_word_frequency.most_common(20):
print(word[0])
### Can you guess the novel from most frequent words?
```
# Lesson 2: Basics of Python
```
# Nothing to see here, folks
```
# Lesson 3: Operationalizing
```
import pandas
dialogue_df = pandas.read_csv('../03-Operationalizing/antigone_dialogue.csv', index_col=0)
dialogue_tokens = [character.split() for character in dialogue_df['DIALOGUE']]
dialogue_len = [len(tokens) for tokens in dialogue_tokens]
dialogue_df['WORDS_SPOKEN'] = dialogue_len
dialogue_df = dialogue_df.sort_values('WORDS_SPOKEN', ascending = False)
# Let's visualize!
# Tells Jupyter to produce images in notebook
% pylab inline
# Makes images look good
style.use('ggplot')
dialogue_df['WORDS_SPOKEN'].plot(kind='bar')
###Who is the main protagonist? Maybe not Antigone?
```
# Lesson 4: Discriminating Words
```
from sklearn.feature_extraction.text import TfidfVectorizer
df = pandas.read_csv("../04-Discriminating-Words/BDHSI2016_music_reviews.csv", sep = '\t')
tfidfvec = TfidfVectorizer()
#create the dtm, but with cells weigthed by the tf-idf score.
dtm_tfidf_df = pandas.DataFrame(tfidfvec.fit_transform(df.body).toarray(), columns=tfidfvec.get_feature_names(), index = df.index)
df_genre = df['genre'].to_frame()
merged_df = df_genre.join(dtm_tfidf_df, how = 'right', lsuffix='_x')
#pull out the reviews for three genres, Rap, Alternative/Indie Rock, and Jazz
dtm_rap = merged_df[merged_df['genre_x']=="Rap"]
dtm_indie = merged_df[merged_df['genre_x']=="Alternative/Indie Rock"]
dtm_jazz = merged_df[merged_df['genre_x']=="Jazz"]
#print the words with the highest tf-idf scores for each genre
print("Rap Words")
print(dtm_rap.max(numeric_only=True).sort_values(ascending=False)[0:20])
print()
print("Indie Words")
print(dtm_indie.max(numeric_only=True).sort_values(ascending=False)[0:20])
print()
print("Jazz Words")
print(dtm_jazz.max(numeric_only=True).sort_values(ascending=False)[0:20])
###What words are distinct to reviews of Rap albums, Indie albums, and Jazz albums?
##Notice the word weights for the Rap albums compared to others. Are these reviews more different than other reviews?
```
# Lesson 5: Sentiment Analysis using the Dictionary Method
```
pos_sent = open("../05-Dictionary-Method/positive_words.txt").read()
neg_sent = open("../05-Dictionary-Method/negative_words.txt").read()
positive_words=pos_sent.split('\n')
negative_words=neg_sent.split('\n')
text1_pos = [word for word in text1_tokens_clean if word in positive_words]
text2_pos = [word for word in text2_tokens_clean if word in positive_words]
text1_neg = [word for word in text1_tokens if word in negative_words]
text2_neg = [word for word in text2_tokens if word in negative_words]
print("Postive words in Melville")
print(len(text1_pos)/len(text1_tokens))
print()
print("Negative words in Melville")
print(len(text1_neg)/len(text1_tokens))
print()
print("Postive words in Austen")
print(len(text2_pos)/len(text2_tokens))
print()
print("Negative words in Austen")
print(len(text2_neg)/len(text2_tokens))
## Who is more postive, Melville or Austen?
## Melville has a similar precentage of postive and negative words (a whale is a whale, neither good nor bad)
## Austen is decidedly more positive than negative (it's the gentleman thing to do)
```
# Lesson 6: Literary Distinction
```
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
import os
review_path = '../06-Literary Distinction (Probably)/poems/reviewed/'
random_path = '../06-Literary Distinction (Probably)/poems/random/'
review_files = os.listdir(review_path)
random_files = os.listdir(random_path)
review_texts = [open(review_path+file_name).read() for file_name in review_files]
random_texts = [open(random_path+file_name).read() for file_name in random_files]
all_texts = review_texts + random_texts
all_file_names = review_files + random_files
all_labels = ['reviewed'] * len(review_texts) + ['random'] * len(random_texts)
cv = CountVectorizer(stop_words = 'english', min_df=180, binary = True, max_features = None)
dtm = cv.fit_transform(all_texts).toarray()
nb = MultinomialNB()
nb.fit(dtm, all_labels)
dickinson_canonic = """Because I could not stop for Death โ
He kindly stopped for me โ
The Carriage held but just Ourselves โ
And Immortality.
We slowly drove โ He knew no haste
And I had put away
My labor and my leisure too,
For His Civility โ
We passed the School, where Children strove
At Recess โ in the Ring โ
We passed the Fields of Gazing Grain โ
We passed the Setting Sun โ
Or rather โ He passed us โ
The Dews drew quivering and chill โ
For only Gossamer, my Gown โ
My Tippet โ only Tulle โ
We paused before a House that seemed
A Swelling of the Ground โ
The Roof was scarcely visible โ
The Cornice โ in the Ground โ
Since then โ โtis Centuries โ and yet
Feels shorter than the Day
I first surmised the Horsesโ Heads
Were toward Eternity โ """
anthem_patriotic = """O! say can you see, by the dawn's early light,
What so proudly we hailed at the twilight's last gleaming,
Whose broad stripes and bright stars through the perilous fight,
O'er the ramparts we watched, were so gallantly streaming?
And the rockets' red glare, the bombs bursting in air,
Gave proof through the night that our flag was still there;
O! say does that star-spangled banner yet wave
O'er the land of the free and the home of the brave?"""
unknown_dtm = cv.transform([dickinson_canonic,anthem_patriotic]).toarray()
nb.predict(unknown_dtm)
## Can a computer predict whether a poem would be considered 'presitgious'?
```
# Lesson 6: Topic Modeling
```
import gensim
import pandas
from nltk.corpus import stopwords, words
metadata_df = pandas.read_csv('../07-Topic Modeling/txtlab_Novel150_English.csv')
fiction_path = '../07-Topic Modeling/txtalb_Novel150_English/'
novel_list = [open(fiction_path+file_name).read() for file_name in metadata_df['filename']]
novel_tokens_list = [novel.lower().split() for novel in novel_list]
dictionary = gensim.corpora.dictionary.Dictionary(novel_tokens_list)
proper_names = [word.lower() for word in words.words() if word.istitle()]
noise_tokens = [word for word in dictionary.values() if word.isalpha()==False or len(word)<=2]
bad_words = stopwords.words('english') + proper_names + noise_tokens
stop_ids = [_id for _id, count in dictionary.doc2bow(bad_words)]
dictionary.filter_tokens(bad_ids = stop_ids)
dictionary.filter_extremes(no_below = 40)
corpus = [dictionary.doc2bow(text) for text in novel_tokens_list]
lda_model = gensim.models.LdaModel(corpus, num_topics=25, alpha='auto', id2word=dictionary, iterations=2500, passes = 4)
list_of_doctopics = [lda_model.get_document_topics(text, minimum_probability=0) for text in corpus]
list_of_probabilities = [[probability for label,probability in distribution] for distribution in list_of_doctopics]
proba_distro_df = pandas.DataFrame(list_of_probabilities)
metadata_df = pandas.concat([metadata_df, pandas.DataFrame(list_of_probabilities)], axis=1)
annual_means_df = metadata_df.groupby('date').mean()
annual_means_df[8].plot(kind='bar', figsize=(8,8))
lda_model.show_topic(8)
```
| github_jupyter |
# Data Preprocessing for Topic Monitoring(Facebook)
```
import pandas as pd
import numpy as np
import re
import csv
from langdetect import detect
import nltk
# nltk.download('punkt')
# nltk.download('maxent_treebank_pos_tagger')
# nltk.download('wordnet')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk import wordpunct_tokenize
from IPython.display import Image
from IPython.display import display
### Load the Crawled Facebook Dataset
# Remove duplicates, NA, sorted time
disease = pd.read_csv('Final_utf16.csv', encoding = 'utf-16LE', sep=',',
dtype={"key": object, "id.x": object,"like_count.x": float, "from_id.x":float,
"from_name.x":object, "message.x":object, "created_time.x":object, "type":object,
"link":object, "story":object, "comments_count.x":float,"shares_count":float,
"love_count":float, "haha_count":float, "wow_count":float, "sad_count": float,
"angry_count":float, "join_id":object, "from_id.y":float, "from_name.y":object,
"message.y":object, "created_time.y":object, "likes_count.y":float,
"comments_count.y": float, "id.y":object})
df = pd.DataFrame(disease, columns=['key', 'created_time.x', 'id.x','message.x' , 'id.y', 'message.y'])
df.columns = ['key', 'created_time.x', 'id.x','message.x' , 'id.y', 'message.y']
rm_duplicates = df.drop_duplicates(subset=['message.x', 'message.y'])
dtime = rm_duplicates.sort_values(['created_time.x'])
dtime.index = range(len(dtime))
dlang = dtime
dlang = dlang[dlang['key']!='johnson & johnson']
dlang = dlang[dlang['key']!='johnson&johnson']
dlang.index = range(len(dlang))
display(dlang.head(3))
print(len(dlang))
# Detect the text language by majority vote
def calculate_languages_ratios(text):
languages_ratios = {}
tokens = wordpunct_tokenize(text)
words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements)
return languages_ratios
def detect_language(text):
ratios = calculate_languages_ratios(text)
most_rated_language = max(ratios, key=ratios.get)
return most_rated_language
```
# Final Preprocessing
In this section, preprocessing is implemented into following steps.<br>
| Preprocessing Steps| Packages | Notes |
|------------------- |-----------------------------|-------------------------------------|
| Language Detection | Self-defined function, nktk |Check the language of each post |
| Remove Stopwords | nltk.corpus |Remove stopwords of detected language|
| Remove Url | Regular expression | |
| Remove Punctuation | string.punctuation | |
| Lemmatizing | nltk.stem |Lemmatize words in Noun and Verb |
| Part of Speech(POS)| nltk.pos_tag |Preserve Noun, Adverb and Adjective |
| Tokenize | split |Unigram |
| Remove NA | pandas | |
| Drop Duplicates | pandas | |
```
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from nltk.stem import WordNetLemmatizer
import string
import time
import os
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# Create a new csv file to store the result after data preprocessing
with open('facebook_preprocessing.csv', 'w', encoding = 'UTF-8', newline = '') as csvfile:
column = [['key', 'created_time.x', 'id.x', 'message.x', 'id.y', 'message.y',
'lang.x', 're_message.x', 'lang.y', 're_message.y']]
writer = csv.writer(csvfile)
writer.writerows(column)
# Data preprocessing steps
for i in range(len(dlang['message.x'])):
features = []
features.append(dlang['key'][i])
features.append(dlang['created_time.x'][i])
features.append(dlang['id.x'][i])
features.append(dlang['message.x'][i])
features.append(dlang['id.y'][i])
features.append(dlang['message.y'][i])
if(str(dlang['message.x'][i]) == "nan"):
features.append('english')
features.append(dlang['message.x'][i])
else:
lang = detect_language(dlang['message.x'][i])
features.append(lang)
stop = set(stopwords.words(lang))
reurl = re.sub(r"http\S+", "", str(dlang['message.x'][i]))
tokens = ' '.join(re.findall(r"[\w']+", reurl)).lower().split()
x = [''.join(c for c in s if c not in string.punctuation) for s in tokens]
x = ' '.join(x)
stop_free = " ".join([i for i in x.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word,pos = 'n') for word in punc_free.split())
normalized = " ".join(lemma.lemmatize(word,pos = 'v') for word in normalized.split())
word = " ".join(word for word in normalized.split() if len(word)>3)
postag = nltk.pos_tag(word.split())
irlist = [',','.',':','#',';','CD','WRB','RB','PRP','...',')','(','-','``','@']
poslist = ['NN','NNP','NNS','RB','RBR','RBS','JJ','JJR','JJS']
wordlist = ['co', 'https', 'http','rt','com','amp','fe0f','www','ve','dont',"i'm","it's",'isnt','รขลบฤ','รขฤ
ฤ','รขล_','kf4pdwe64k']
adjandn = [word for word,pos in postag if pos in poslist and word not in wordlist and len(word)>3]
stop = set(stopwords.words(lang))
wordlist = [i for i in adjandn if i not in stop]
features.append(' '.join(wordlist))
if(str(dlang['message.y'][i]) == "nan"):
features.append('english')
features.append(dlang['message.y'][i])
else:
lang = detect_language(dlang['message.y'][i])
features.append(lang)
stop = set(stopwords.words(lang))
reurl = re.sub(r"http\S+", "", str(dlang['message.y'][i]))
tokens = ' '.join(re.findall(r"[\w']+", reurl)).lower().split()
x = [''.join(c for c in s if c not in string.punctuation) for s in tokens]
x = ' '.join(x)
stop_free = " ".join([i for i in x.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word,pos='n') for word in punc_free.split())
normalized = " ".join(lemma.lemmatize(word,pos='v') for word in normalized.split())
word = " ".join(word for word in normalized.split() if len(word)>3)
postag = nltk.pos_tag(word.split())
irlist = [',','.',':','#',';','CD','WRB','RB','PRP','...',')','(','-','``','@']
poslist = ['NN','NNP','NNS','RB','RBR','RBS','JJ','JJR','JJS']
wordlist = ['co', 'https', 'http','rt','com','amp','fe0f','www','ve','dont',"i'm","it's",'isnt','รขลบฤ','รขฤ
ฤ','รขล_','kf4pdwe64k']
adjandn = [word for word,pos in postag if pos in poslist and word not in wordlist and len(word)>3]
stop = set(stopwords.words(lang))
wordlist = [i for i in adjandn if i not in stop]
features.append(' '.join(wordlist))
with open('facebook_preprocessing.csv', 'a', encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows([features])
df_postncomment = pd.read_csv('facebook_preprocessing.csv', encoding = 'UTF-8', sep = ',')
rm_na = df_postncomment[pd.notnull(df_postncomment['re_message.x'])]
rm_na.index = range(len(rm_na))
dfinal_fb = pd.DataFrame(
rm_na,
columns = ['key', 'created_time.x', 'id.x', 'message.x', 'id.y', 'message.y',
'lang.x', 're_message.x', 'lang.y', 're_message.y'])
dfinal_fb.to_csv(
'final_facebook_preprocessing.csv',
encoding = 'UTF-8',
columns = ['key', 'created_time.x', 'id.x', 'message.x', 'id.y', 'message.y',
'lang.x', 're_message.x', 'lang.y', 're_message.y'])
os.remove('facebook_preprocessing.csv')
#print(rm_na['re_message.x'][8])
test = pd.read_csv('final_facebook_preprocessing.csv', encoding = 'UTF-8', sep = ',', index_col = 0)
display(test.head(3))
print(len(test))
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import sys
import warnings
warnings.filterwarnings("ignore")
sys.path.append("../")
from modules.data.conll2003.prc import conll2003_preprocess
data_dir = "/home/eartemov/ae/work/conll2003/"
conll2003_preprocess(data_dir)
```
## IO markup
### Train
```
from modules.data import bert_data
data = bert_data.LearnData.create(
train_df_path="/home/eartemov/ae/work/conll2003/eng.train.train.csv",
valid_df_path="/home/eartemov/ae/work/conll2003/eng.testa.dev.csv",
idx2labels_path="/home/eartemov/ae/work/conll2003/idx2labels5.txt",
clear_cache=True,
model_name="bert-base-cased"
)
from modules.models.bert_models import BERTBiLSTMAttnNCRF
model = BERTBiLSTMAttnNCRF.create(
len(data.train_ds.idx2label), model_name="bert-base-cased",
lstm_dropout=0., crf_dropout=0.3, nbest=len(data.train_ds.idx2label))
from modules.train.train import NerLearner
num_epochs = 100
learner = NerLearner(
model, data, "/home/eartemov/ae/work/models/conll2003-BERTBiLSTMAttnNCRF-base-IO.cpt",
t_total=num_epochs * len(data.train_dl))
model.get_n_trainable_params()
learner.fit(epochs=num_epochs)
```
### Predict
```
from modules.data.bert_data import get_data_loader_for_predict
dl = get_data_loader_for_predict(data, df_path=data.valid_ds.config["df_path"])
preds = learner.predict(dl)
from sklearn_crfsuite.metrics import flat_classification_report
from modules.analyze_utils.utils import bert_labels2tokens, voting_choicer
from modules.analyze_utils.plot_metrics import get_bert_span_report
pred_tokens, pred_labels = bert_labels2tokens(dl, preds)
true_tokens, true_labels = bert_labels2tokens(dl, [x.bert_labels for x in dl.dataset])
assert pred_tokens == true_tokens
tokens_report = flat_classification_report(true_labels, pred_labels, labels=data.train_ds.idx2label[4:], digits=4)
print(tokens_report)
```
### Test
```
from modules.data.bert_data import get_data_loader_for_predict
dl = get_data_loader_for_predict(data, df_path="/home/eartemov/ae/work/conll2003/eng.testb.dev.csv")
preds = learner.predict(dl)
from sklearn_crfsuite.metrics import flat_classification_report
from modules.analyze_utils.utils import bert_labels2tokens, voting_choicer
from modules.analyze_utils.plot_metrics import get_bert_span_report
pred_tokens, pred_labels = bert_labels2tokens(dl, preds)
true_tokens, true_labels = bert_labels2tokens(dl, [x.bert_labels for x in dl.dataset])
assert pred_tokens == true_tokens
tokens_report = flat_classification_report(true_labels, pred_labels, labels=data.train_ds.idx2label[4:], digits=4)
print(tokens_report)
```
| github_jupyter |
# Codigo Para Twitter por streaming
```
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import time
access_token = "227835837-WD07ixlyOeLqkeywbnMYzk5dnebJjd1pA4sKpOjl"
access_token_secret = "6utbaX2ab3UrpL4PpfSx6ToCuuQZgZ5zDDqKQq2albTLL"
consumer_key = "dwazigqjw1ZIVtx2jKGGsw2wb"
consumer_secret = "Lyy1wItpyPTfPWIBJ5d2qrA250s3iydzwKaCubVAamOTOecK2A"
class StdOutListener(StreamListener):
def on_data(self, data):
json_load = json.loads(data)
print(data.encode())
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=['Naruto'])
```
# Codigo para Descargar Historico de Twitter
```
import json
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
import pandas as pd
import pickle
import sys
##es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#Variables para poner las credenciales para la API
consumer_key = "dwazigqjw1ZIVtx2jKGGsw2wb"
consumer_secret = "Lyy1wItpyPTfPWIBJ5d2qrA250s3iydzwKaCubVAamOTOecK2A"
access_token = "227835837-WD07ixlyOeLqkeywbnMYzk5dnebJjd1pA4sKpOjl"
access_secret = "6utbaX2ab3UrpL4PpfSx6ToCuuQZgZ5zDDqKQq2albTLL"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
datos_tweets={}
datos_tweets.setdefault('date',[])
datos_tweets.setdefault('texts',[])
datos_tweets.setdefault('user_id',[])
datos_tweets.setdefault('retweet_count',[])
datos_tweet_df=pd.DataFrame()
args=['','Soen','2021-03-12T00:00:00','2021-03-19T24:59:59']##sys.argv
print(args[1])
#print(place_id)
for tweet in tweepy.Cursor(api.search, q=args[1], lang="es", since=args[2], until=args[3]).items():
if not hasattr(tweet,'retweeted_status'):
datos_tweet_df=datos_tweet_df.append(pd.json_normalize(tweet._json))
datos_tweet_df=datos_tweet_df.reset_index(drop=True)
datos_tweet_df=datos_tweet_df.reset_index(drop=True)
datos_tweet_df
```
| github_jupyter |
## Emerging Technologies
#### Student name : Jina Kim G00353420
***
#### Jupyter notebook that trains a model using the data set. (powerproduction.csv)
***
#### References
Keras Fundamentals for Deep Learning. [1]
Training & evaluation from tf.data Datasets. [2]
***
[1] Keras Fundamentals for Deep Learning; guru99; https://www.guru99.com/keras-tutorial.html
[2] Training & evaluation from tf.data Datasets; Tensorflow; https://www.tensorflow.org/guide/keras/train_and_evaluate
***
```
# Neural networks.
import tensorflow.keras as kr
from tensorflow.keras import layers
from tensorflow.python.keras.layers import Dense
from tensorflow.keras.layers.experimental import preprocessing
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.models import Model
# Numerical arrays
import numpy as np
# Data frames.
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
from sklearn import linear_model
# Plot style.
plt.style.use("ggplot")
# Plot size.
plt.rcParams['figure.figsize'] = [14, 8]
# Dataset from csv file
train = pd.read_csv("powerproduction.csv")
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
print("Local copy of the dataset file: {}".format(train))
# turn dataset into dataframe
data = pd.DataFrame(train)
#####################
### plot the datasel
#####################
data.plot.scatter(x='speed', y='power',marker='o', color="blue")
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0, with sigmoid activation.
m.add(Dense(50, input_shape=(1,), activation='sigmoid', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform"))
m.add(Dense(1, activation='linear', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform"))
# Compile the model.
m.compile(kr.optimizers.Adam(lr=0.001), loss='mean_squared_error', metrics=['accuracy'])
# Model configuration
no_epochs = 200
batch_size = 10
# Start the training process
model_one = m.fit(data['speed'], data['power'], epochs=no_epochs, batch_size=batch_size)
# Show a summary of the model. Check the number of trainable parameters
# Call its summary() method to display its contents
print(m.summary())
#The returned "history" object holds a record of the loss values and metric values during training:
model_one.history
# evaluate the model on the test data via evaluate()
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = m.evaluate(data['speed'], data['power'], batch_size=128)
print("test loss, test acc:", results)
# prediction to data frame
prediction = pd.DataFrame(m.predict(data['speed']), columns=['speed'])
# output prediction
prediction
###################
### save the model
###################
m.save("my_h5_model.h5")
print("saving model prediction data...")
# Model accuracy into data frame
Accuracy = pd.DataFrame(model_one.history['accuracy'], columns=['Average of accuracy is'])
# output mean of model accuracy
Accuracy.mean()
# Model accuracy into data frame
Accuracy = pd.DataFrame(model_one.history['accuracy'], columns=['maximum of accuracy is'])
# output mean of model accuracy
Accuracy.max()
# Model losses into data frame
Loss = pd.DataFrame(model_one.history['loss'], columns=['Average of loss is'])
# output minium loss ofrom model
Loss.mean()
# Model losses into data frame
Loss = pd.DataFrame(model_one.history['loss'], columns=['Minium Loss is'])
# output minium loss ofrom model
Loss.min()
# Visualize the loss function over time
fig, axes = plt.subplots(2, sharex=True, figsize=(14, 10))
fig.suptitle('Training Metrics', fontsize=14)
##########################################
### summarize history for lost & accuracy
##########################################
# plot the losses of model
axes[0].set_ylabel("Loss", fontsize=10)
axes[0].set_xlabel("Epoch", fontsize=10)
axes[0].set_title("= Model Losses =", fontsize=13)
axes[0].plot(model_one.history['loss'],color="red")
plt.legend(['loss'], loc='upper left')
# plot accuracy of model
axes[1].set_ylabel("Accuracy", fontsize=10)
axes[1].set_xlabel("Epoch", fontsize=10)
axes[1].set_title("= Model Accuracy =", fontsize=13)
axes[1].plot(model_one.history['accuracy'],color="green")
plt.legend(['accuracy'], loc='upper left')
plt.show()
# As you can see, the losses are dropped significantly and the accuracy is high.
#############################
### plot model for comparing
#############################
plt.title('Comparison of Original Data and Predicted Data')
plt.plot(data['speed'], data['power'], 'v', color="green", markersize=2, label='Original Data')
plt.plot(data['speed'], m.predict(data['speed']), '+', color="red", markersize=2, label='Predicted Data')
plt.legend();
```
***
```
########################################################################################
### A program that trains a model that predicts wind turbine power based on wind speed.
########################################################################################
# load the dataset into memory
training_dataset = pd.read_csv("powerproduction.csv")
# create a model using the linear regression algorithm
# and train it with the data from our csv
regression_model = linear_model.LinearRegression()
print ("Training model...")
# model training
regression_model.fit(training_dataset[['speed']], training_dataset.power)
print ("Model trained!")
# ask user to enter an speed and calculate
# its power using our model
input_speed = float(input("Enter speed: "))
proped_power = regression_model.predict([[input_speed]])
print ("Proped power:", round(proped_power[0], 3))
print ("Done")
```
***
#### End of Model 1
## Done
| github_jupyter |
```
import torch
from torchvision import transforms
import torch.nn.functional as F
import torch.nn as nn
from PIL import Image
import imageio
import os
from google.colab import drive
from google.colab import drive
drive.mount('/content/drive')
class YOLO(nn.Module):
def __init__(self, img_width, row_size):
super(YOLO, self).__init__()
self.row_size = row_size
self.conv1 = nn.Conv2d(1, 16, 7, stride=2)
self.mp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 32, (3, 3), stride=1)
self.mp2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 64, (3, 3), stride=1)
self.mp3 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64*53*36, 4096)
self.fc2 = nn.Linear(4096, row_size * 5)
self.dropout = nn.Dropout()
def forward(self, x):
# Conv + ReLU + max pooling for two layers
x = F.relu(self.conv1(x))
x = self.mp1(x)
x = F.relu(self.conv2(x))
x = self.mp2(x)
x = F.relu(self.conv3(x))
x = self.mp3(x)
x = x.view(-1, 64*53*36)
x = F.relu(self.dropout(self.fc1(x)))
x = self.fc2(x)
x = x.view(-1, self.row_size, 5)
x = torch.sigmoid(x)
return x
def calc_x_y(row, tensor):
"""calc coordinates"""
x = tensor[1] * 619
y = tensor[2] * (885 / 50) + row * (885 / 50)
width = tensor[3] * 619
height = tensor[4] * 885
return torch.FloatTensor([1, x, y, width, height])
def calc_box(tensor):
"""calc box for output line"""
x1 = max(0, tensor[1] - 0.5 * tensor[3])
y1 = max(0, tensor[2] - 0.5 * tensor[4])
x2 = min(619, tensor[1] + 0.5 * tensor[3])
y2 = min(885, tensor[2] + 0.5 * tensor[4])
box = [x1, y1, x2, y2]
return box
def non_maximum_suppression(tensor, percent):
"""choose predicted lines by highest propability.
Lines who overlap a actual choosen line by percent or higher will delete."""
for j in range(tensor.size(1)):
if(tensor[j,0].item() < 0.5):
tensor[j,0] = torch.tensor(0)
found = []
while(True):
maximum = 0
index = 0
for j in range(tensor.size(1)):
if(tensor[j,0].item() > maximum and j not in found):
maximum = tensor[j,0].item()
index = j
if(maximum == 0):
break
found.append(index)
tensor[index,0] = torch.tensor(1)
for j in range(tensor.size(1)):
if(j != index and tensor[j,0] >= 0.5):
x_y_max = calc_x_y(index, tensor[index])
x_y_other = calc_x_y(j, tensor[j])
box1 = calc_box(x_y_max)
box2 = calc_box(x_y_other)
if(calc_iou(box1, box2) > percent):
tensor[j,0] = 0
imgs_path = "drive/My Drive/data_small/forms/forms_train_small/"
imgs_paths = os.listdir(imgs_path)
weight_path = "drive/My Drive/evaluation_small/weights_small.pt"
predict_path = "drive/My Drive/testlines_predicted_small/"
transform = transforms.Compose([transforms.Resize((885, 619)),
transforms.ToTensor()])
# set a boolean flag that indicates whether a cuda capable GPU is available
is_gpu = torch.cuda.is_available()
print("GPU is available:", is_gpu)
print("If you are receiving False, try setting your runtime to GPU")
# set the device to cuda if a GPU is available
device = torch.device("cuda" if is_gpu else "cpu")
model = torch.load(weight_path)
print(model)
def predict_lines(model,imgs_path, predict_path):
""" predict images to lines from image path to predict_path"""
img_count = 0
for path in imgs_paths:
count = 0
img_tensor = transform(Image.open(imgs_path + path))
output = model(torch.stack([img_tensor]).to(device))[0]
# find right boxes
non_maximum_suppression(output, 0.5)
img = imageio.imread(imgs_path + path)
yscale = round(img.shape[0] / 885)
xscale = round(img.shape[1] / 619)
print(xscale, xscale)
for i in range(50):
if(output[i][0] > 0.5):
print(output[i])
box = calc_box(calc_x_y(i, output[i]))
x1 = (int(box[0])) * xscale
x2 = (int(box[2])) * xscale
y1 = (int(box[1])) * yscale
y2 = (int(box[3])) * yscale
print(box)
imageio.imwrite(predict_path + "pic" + str(img_count) + "line" + str(count) + '.jpg', img[y1:y2, x1:x2])
count += 1
img_count += 1
predict_lines(model, imgs_path, predict_path)
```
| github_jupyter |
# Db2 11.5.4 RESTful Programming
The following notebook is a brief example of how to use the Db2 11.5.4 RESTful Endpoint service to extend the capabilies of Db2.
Programmers can create Representational State Transfer (REST) endpoints that can be used to interact with Db2.
Each endpoint is associated with a single SQL statement. Authenticated users of web, mobile, or cloud applications can use these REST endpoints from any REST HTTP client without having to install any Db2 drivers.
The Db2 REST server accepts an HTTP request, processes the request body, and returns results in JavaScript Object Notation (JSON).
The Db2 REST server is pre-installed and running on Docker on host3 (10.0.0.4) in the Demonstration cluster. As a programmer you can communicate with the service on port 50050. Your welcome note includes the external port you can use to interact with the Db2 RESTful Endpoint service directly.
You can find more information about this service at: https://www.ibm.com/support/producthub/db2/docs/content/SSEPGG_11.5.0/com.ibm.db2.luw.admin.rest.doc/doc/c_rest.html.
### Finding the Db2 RESTful Endpoint Service API Documentation
If you are running this notebook from a browser running inside the Cloud Pak for Data cluster, click: http://10.0.0.4:50050/docs If you are running this from a browser from your own desktop, check your welcome note for the address of the Db2 RESTful Service at port 50050.
## Getting Started
Before you can start submitting SQL or creating your own services you need to complete a few setup steps.
### Import the required programming libraries
The requests library is the minimum required by Python to construct RESTful service calls. The Pandas library is used to format and manipulate JSON result sets as tables. The urllib3 library is used to manage secure https requests.
```
import requests
import pandas as pd
import urllib3
```
### Create the Header File required for getting an authetication token
We have to provide the location of the RESTful service for our calls.
The RESTful call to the Db2 RESTful Endpoint service is contructed and transmitted as JSON. The first part of the JSON structure is the headers that define the content tyoe of the request.
```
headers = {
"content-type": "application/json"
}
```
### Define the RESTful Host
The next part defines where the request is sent to. It provides the location of the RESTful service for our calls.
```
Db2RESTful = "https://10.0.0.201:32115"
```
### API Authentication Service
Each service has its own path in the RESTful call. For authentication we need to point to the `v1/auth` service.
```
API_Auth = "/v1/auth"
```
### Database Connection Information
To authenticate to the RESTful service you must provide the connection information for the database along with the userid and password that you are using to authenticate with. You can also provide an expiry time so that the access token that gets returned will be invalidated after that time period.
```
body = {
"dbParms": {
"dbHost": "10.0.0.201",
"dbName": "BLUDB",
"dbPort": 31684,
"isSSLConnection": False,
"username": "admin",
"password": "password"
},
"expiryTime": "8760h"
}
```
### Disabling HTTPS Warnings
```
urllib3.disable_warnings()
```
### Retrieving an Access Token
When communicating with the RESTful service, you must provide the name of the service that you want to interact with. In this case the authentication service is */v1/auth*.
```
try:
response = requests.post("{}{}".format(Db2RESTful,API_Auth), verify=False, headers=headers, json=body)
print (response)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
```
A response code of 200 means that the authentication worked properly, otherwise the error that was generated is printed. The response includes a connection token that is reused throughout the rest of this lab. It ensures secure a connection without requiring that you reenter a userid and password with each request.
```
if (response.status_code == 200):
token = response.json()["token"]
print("Token: {}".format(token))
else:
print(response.json()["errors"])
```
### Creating a standard reusable JSON header
The standard header for all subsequent calls will use this format. It includes the access token.
```
headers = {
"authorization": f"{token}",
"content-type": "application/json"
}
```
## Executing an SQL Statement
Before you try creating your own customer service endpoint, you can try using some of the built in services. These let you submit SQL statements in a variety of ways.
Executing SQL requires a different service endpoint. In this case we will use "/services/execsql"
```
API_execsql = "/v1/services/execsql"
```
In this example the code requests that the RESTful function waits until the command is complete.
```
sql = \
"""
SELECT AC."TAIL_NUMBER", AC."MANUFACTURER", AC."MODEL", OT."FLIGHTDATE", OT."UNIQUECARRIER", OT."AIRLINEID", OT."CARRIER", OT."TAILNUM", OT."FLIGHTNUM", OT."ORIGINAIRPORTID", OT."ORIGINAIRPORTSEQID", OT."ORIGINCITYNAME", OT."ORIGINSTATE", OT."DESTAIRPORTID", OT."DESTCITYNAME", OT."DESTSTATE", OT."DEPTIME", OT."DEPDELAY", OT."TAXIOUT", OT."WHEELSOFF", OT."WHEELSON", OT."TAXIIN", OT."ARRTIME", OT."ARRDELAY", OT."ARRDELAYMINUTES", OT."CANCELLED", OT."AIRTIME", OT."DISTANCE"
FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = 'NJ'
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > 300
ORDER BY OT.ARRDELAY;
"""
body = {
"isQuery": True,
"sqlStatement": sql,
"sync": True
}
print(body)
def runStatement(sql, isQuery) :
body = {
"isQuery": isQuery,
"sqlStatement": sql,
"sync": True
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_execsql), verify=False, headers=headers, json=body)
return response
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
response = runStatement(sql, True)
```
If the successful call returns a **200** response code.
```
print(response)
```
Now that you know the call is a success, you can retrieve the json in the result set.
```
print(response.json()["resultSet"])
```
To format the results, use a Pandas Dataframe class to convert the json result set into a table. Dataframes can be used to further manipulate results in Python.
```
display(pd.DataFrame(response.json()['resultSet']))
```
## Use Parameters in a SQL Statement
Simple parameter passing is also available through the execsql service. In this case we are passing the employee number into the query to retrieve the full employee record. Try substituting different employee numbers and run the REST call again. For example, you can change "000010" to "000020", or "000030".
```
sqlparm = \
"""
SELECT AC."TAIL_NUMBER", AC."MANUFACTURER", AC."MODEL", OT."FLIGHTDATE", OT."UNIQUECARRIER", OT."AIRLINEID", OT."CARRIER", OT."TAILNUM", OT."FLIGHTNUM", OT."ORIGINAIRPORTID", OT."ORIGINAIRPORTSEQID", OT."ORIGINCITYNAME", OT."ORIGINSTATE", OT."DESTAIRPORTID", OT."DESTCITYNAME", OT."DESTSTATE", OT."DEPTIME", OT."DEPDELAY", OT."TAXIOUT", OT."WHEELSOFF", OT."WHEELSON", OT."TAXIIN", OT."ARRTIME", OT."ARRDELAY", OT."ARRDELAYMINUTES", OT."CANCELLED", OT."AIRTIME", OT."DISTANCE"
FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = 'NJ'
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > ?
ORDER BY OT.ARRDELAY;
"""
body = {
"isQuery": True,
"parameters" : {
"1" : 300
},
"sqlStatement": sqlparm,
"sync": True
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_execsql), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print(response)
response.json()["resultSet"]
display(pd.DataFrame(response.json()['resultSet']))
```
## Generate a Call and don't wait for the results
If you know that your statement will take a long time to return a result, you can check back later. Turn **sync** off to avoid waiting.
```
sql = \
"""
SELECT AC."TAIL_NUMBER", AC."MANUFACTURER", AC."MODEL", OT."FLIGHTDATE", OT."UNIQUECARRIER", OT."AIRLINEID", OT."CARRIER", OT."TAILNUM", OT."FLIGHTNUM", OT."ORIGINAIRPORTID", OT."ORIGINAIRPORTSEQID", OT."ORIGINCITYNAME", OT."ORIGINSTATE", OT."DESTAIRPORTID", OT."DESTCITYNAME", OT."DESTSTATE", OT."DEPTIME", OT."DEPDELAY", OT."TAXIOUT", OT."WHEELSOFF", OT."WHEELSON", OT."TAXIIN", OT."ARRTIME", OT."ARRDELAY", OT."ARRDELAYMINUTES", OT."CANCELLED", OT."AIRTIME", OT."DISTANCE"
FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = 'NJ'
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > 300
ORDER BY OT.ARRDELAY;
"""
body = {
"isQuery": True,
"sqlStatement": sql,
"sync": False
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_execsql), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print(response)
```
Retrieve the job id, so that you can retrieve the results later.
```
job_id = response.json()["id"]
print(job_id)
```
## Retrieve Result set using Job ID
The service API needs to be appended with the Job ID.
```
API_get = "/v1/services/"
```
We can limit the number of rows that we return at a time. Setting the limit to zero means all of the rows are to be returned.
```
body = {
"limit": 0
}
```
Get the results.
```
try:
response = requests.get("{}{}{}".format(Db2RESTful,API_get,job_id), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print(response)
```
Retrieve the results.
```
display(pd.DataFrame(response.json()['resultSet']))
```
Now that you have some experience with the built in SQL service, you can try creating your own endpoint service.
## Using RESTful Endpoint Services
The most common way of interacting with the service is to fully encapsulate an SQL statement, including any parameters, in a unique RESTful service. This creates a secure separation between the database service and the RESTful programming service. It also allows you to create versions of the same service to make maintenance and evolution of programming models simple and predictable.
### Setup the Meta Data Tables and Stored Procedures to manage Endpoint Services
Before you can start defining and running your own RESTful Endpoint services you need call the service to create the table and stored procedures in the database you are using.
```
API_makerest = "/v1/metadata/setup"
```
You can specify the schema that the new table and stored procedures will be created in. In this example we will use **DB2REST**
```
body = {
"schema": "DB2REST"
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_makerest), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
```
If the process is successful the service returns a 201 status code.
```
if (response.status_code == 201):
print(response.reason)
else:
print(response.json())
```
### Create a RESTful Service
Now that the RESTful Service metadata is created in your database, you can create your first service. In this example you will pass an employee numb er, a 6 character string, to the service. It will return the department number of the employee.
```
API_makerest = "/v1/services"
```
The first step is to define the SQL that we want in the RESTful call. Parameters are identified using an ampersand "@". Notice that our SQL is nicely formatted to make this notebook easier to ready. However when creating a service it is good practice to remove the line break characters from your SQL statement.
```
sql = \
"""
SELECT COUNT(AC."TAIL_NUMBER") FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = @STATE
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > @DELAY
FETCH FIRST 5 ROWS ONLY
"""
sql = sql.replace("\n","")
```
The next step is defining the json body to send along with the REST call.
```
body = {"isQuery": True,
"parameters": [
{
"datatype": "CHAR(2)",
"name": "@STATE"
},
{
"datatype": "INT",
"name": "@DELAY"
}
],
"schema": "DEMO",
"serviceDescription": "Delay",
"serviceName": "delay",
"sqlStatement": sql,
"version": "1.0"
}
```
Now submit the full RESTful call to create the new service.
```
try:
response = requests.post("{}{}".format(Db2RESTful,API_makerest), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print(response)
```
### Call the new RESTful Service
Now you can call the RESTful service. In this case we will pass the stock symbol CAT. But like in the previous example you can try rerunning the service call with different stock symbols.
```
API_runrest = "/v1/services/delay/1.0"
body = {
"parameters": {
"@STATE": "NY","@DELAY":"300"
},
"sync": True
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_runrest), verify=False, headers=headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print("{}{}".format(Db2RESTful,API_runrest))
print(response)
print(response.json())
```
You can retrieve the result set, convert it into a Dataframe and display the table.
```
display(pd.DataFrame(response.json()['resultSet']))
```
## Loop through the new call
Now you can call the RESTful service with different values.
```
API_runrest = "/v1/services/delay/1.0"
repeat = 2
for x in range(0, repeat):
for state in ("OH", "NJ", "NY", "FL", "MI"):
body = {
"parameters": {
"@STATE": state,"@DELAY": "240"
},
"sync": True
}
try:
response = requests.post("{}{}".format(Db2RESTful,API_runrest), verify=False, headers=headers, json=body)
print(state + ": " + str(response.json()['resultSet']))
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
```
## Managing Your Services
There are several service calls you can use to help manage the Db2 RESTful Endpoint service.
## List Available Services
You can also list all the user defined services you have access to
```
API_listrest = "/v1/services"
try:
response = requests.get("{}{}".format(Db2RESTful,API_listrest), verify=False, headers=headers)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print(response.json())
display(pd.DataFrame(response.json()['Db2Services']))
```
## Get Service Details
You can also get the details of a service
```
API_getDetails = "/v1/services/delay/3.0"
try:
response = requests.get("{}{}".format(Db2RESTful,API_getDetails), verify=False, headers=headers)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
json = response.json()
print(json)
```
You can format the result to make it easier to ready. For example, here are the input and outputs.
```
display(pd.DataFrame(json['inputParameters']))
display(pd.DataFrame(json['resultSetFields']))
```
## Delete a Service
A single call is also available to delete a service
```
API_deleteService = "/v1/services"
Service = "/delay"
Version = "/1.0"
try:
response = requests.delete("{}{}{}{}".format(Db2RESTful,API_deleteService,Service,Version), verify=False, headers=headers)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
print (response)
```
## Get Service Logs
You can also easily download the Db2 RESTful Endpoint service logs.
```
API_listrest = "/v1/logs"
try:
response = requests.get("{}{}".format(Db2RESTful,API_listrest), verify=False, headers=headers)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 200):
myFile = response.content
open('/tmp/logs.zip', 'wb').write(myFile)
print("Downloaded",len(myFile),"bytes.")
else:
print(response.json())
```
To see the content of the logs, open the Files browser on machine host3 (10.0.0.4). Navigate to the **/tmp** directory and unzip the logs file.
## Using the Db2 REST Class
For your convenience, everything in the lessons above has been included into a Db2REST Python Class. You can add or use this code as part of your own Jupyter notebooks to make working with the Db2 RESTful Endpoint service quick and easy.
There are also lots of examples in the following lesson on how to use the class.
```
# Run the Db2REST Class library
# Used to construct and reuse an Autentication Key
# Used to construct RESTAPI URLs and JSON payloads
import json
import requests
import pandas as pd
class Db2REST():
def __init__(self, RESTServiceURL):
self.headers = {"content-type": "application/json"}
self.RESTServiceURL = RESTServiceURL
self.version = "/v1"
self.API_auth = self.version + "/auth"
self.API_makerest = self.version + "/metadata/setup"
self.API_services = self.version + "/services/"
self.API_version = self.version + "/version/"
self.API_execsql = self.API_services + "execsql"
self.API_monitor = self.API_services + "monitor"
self.Verify = False
import urllib3
urllib3.disable_warnings()
def connectDatabase(self, dbHost, dbName, dbPort, isSSLConnection, dbUsername, dbPassword, expiryTime="300m"):
self.dbHost = dbHost
self.dbName = dbName
self.dbPort = dbPort
self.isSSLConnection = isSSLConnection
self.dbusername = dbUsername
self.dbpassword = dbPassword
self.connectionBody = {
"dbParms": {
"dbHost": dbHost,
"dbName": dbName,
"dbPort": dbPort,
"isSSLConnection": isSSLConnection,
"username": dbUsername,
"password": dbPassword
},
"expiryTime": expiryTime
}
try:
response = requests.post("{}{}".format(self.RESTServiceURL,self.API_auth), verify=self.Verify, headers=self.headers, json=self.connectionBody)
print (response)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 200):
self.token = response.json()["token"]
print("Successfully connected and retrieved access token")
else:
print(response)
print(response.json())
print(response.json()["errors"])
self.headers = {
"authorization": f"{self.token}",
"content-type": "application/json"
}
def getConnection(self):
return self.connectionBody
def getService(self):
return self.RESTServiceURL
def getToken(self):
return("Token: {}".format(self.token))
def getVersion(self):
try:
print("{}{}".format(self.RESTServiceURL,self.API_version))
response = requests.get("{}{}".format(self.RESTServiceURL,self.API_version),verify=self.Verify)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 200):
return response.json()['version']
else:
print(response)
print(response.json()['errors'][0]['more_info'])
def runStatement(self, sql, isQuery=True, sync=True, parameters={}):
body = {
"isQuery": isQuery,
"sqlStatement": sql,
"sync": sync,
"parameters": parameters
}
try:
response = requests.post("{}{}".format(self.RESTServiceURL,self.API_execsql), verify=self.Verify, headers=self.headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 200):
return pd.DataFrame(response.json()['resultSet'])
elif (response.status_code == 202):
return response.json()["id"]
else:
print(response.json()['errors'][0]['more_info'])
def getResult(self, job_id, limit=0):
body = {"limit": limit}
try:
response = requests.get("{}{}{}".format(self.RESTServiceURL,self.API_services,job_id), verify=self.Verify, headers=self.headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 200):
json = response.json()
if (json['jobStatus'] == 2):
return json['jobStatusDescription']
elif (json['jobStatus'] == 3):
return pd.DataFrame(json['resultSet'])
elif (json['jobStatus'] == 4):
return pd.DataFrame(json['resultSet'])
else:
return json
elif (response.status_code == 404):
print(response.json()['errors'])
elif (response.status_code == 500):
print(response.json()['errors'][0]['more_info'])
else:
print(response.json())
def createServiceMetadata(self, serviceSchema="Db2REST"):
self.serviceSchema = serviceSchema
body = {"schema": self.serviceSchema}
try:
response = requests.post("{}{}".format(self.RESTServiceURL,self.API_makerest), verify=self.Verify, headers=self.headers, json=body)
if (response.status_code == 201):
print(response.reason)
else:
print(response.json())
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
def listServices(self):
try:
response = requests.get("{}{}".format(self.RESTServiceURL,self.API_services), verify=self.Verify, headers=self.headers)
return pd.DataFrame(response.json()['Db2Services'])
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
def getServiceDetails(self, serviceName, version):
try:
response = requests.get("{}{}{}{}".format(self.RESTServiceURL,self.API_services,"/" + serviceName,"/" + version), verify=self.Verify, headers=self.headers)
print(response.status_code)
if (response.status_code == 200):
description = response.json()
print("Input parameters:")
print(description["inputParameters"])
print("Result format:")
print(description["resultSetFields"])
else:
print(response.json())
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
def createService(self, schema, serviceDescription, serviceName, sql, version, parameters=False, isQuery=True):
if (parameters==False):
body = {"isQuery": isQuery,
"schema": schema,
"serviceDescription": serviceDescription,
"serviceName": serviceName,
"sqlStatement": sql.replace("\n",""),
"version": version
}
else:
body = {"isQuery": isQuery,
"schema": schema,
"serviceDescription": serviceDescription,
"serviceName": serviceName,
"sqlStatement": sql.replace("\n",""),
"version": version,
"parameters": parameters
}
try:
response = requests.post("{}{}".format(self.RESTServiceURL,self.API_services), verify=self.Verify, headers=self.headers, json=body)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 201):
print("Service: " + serviceName + " Version: " + version + " created")
else:
print(response.json())
def deleteService(self, serviceName, version):
try:
response = requests.delete("{}{}{}{}".format(self.RESTServiceURL,self.API_services,"/" + serviceName,"/" + version), verify=self.Verify, headers=self.headers)
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
if (response.status_code == 204):
print("Service: " + serviceName + " Version: " + version + " deleted")
else:
print(response.json())
def callService(self, serviceName, version, parameters, sync=True):
body = {
"parameters": parameters,
"sync": sync
}
try:
response = requests.post("{}{}{}{}".format(self.RESTServiceURL,self.API_services,"/" + serviceName,"/" + version), verify=self.Verify, headers=self.headers, json=body)
if (response.status_code == 200):
return pd.DataFrame(response.json()['resultSet'])
elif (response.status_code == 202):
return response.json()["id"]
else:
print(response.json()['errors'][0]['more_info'])
except Exception as e:
if (repr(e) == "KeyError('more_info',)"):
print("Service not found")
else:
print("Unable to call RESTful service. Error={}".format(repr(e)))
def monitorJobs(self):
try:
response = requests.get("{}{}".format(self.RESTServiceURL,self.API_monitor), verify=self.Verify, headers=self.headers)
if (response.status_code == 200):
return pd.DataFrame(response.json()['MonitorServices'])
else:
print(response.json())
except Exception as e:
print("Unable to call RESTful service. Error={}".format(repr(e)))
```
### Setting up a Db2 RESTful Endpoint Service Class instance
To use the class first create an instance of the class. The cell below creates an object called **Db2RESTService** from the **Db2REST** class. The first call to the object is **getVersion** to confirm the version of the RESTful Endpoint Service you are connected to.
#### Connecting to the service to the database
Unless your service is already bound to a single database, the call below connects it to a single Db2 database. You can run this command again to connect to a different database from the same RESTful Endpoint service.
```
Db2RESTService = Db2REST("https://10.0.0.201:31315")
print("Db2 RESTful Endpoint Service Version: " + Db2RESTService.getVersion())
```
#### Connect to Db2 OLTP
```
Db2RESTService.connectDatabase("10.0.0.201", "STOCKS", 32443, False, "admin", "CP4DDataFabric")
```
#### Connect to DV
```
Db2RESTService.connectDatabase("10.0.0.201", "BIGSQL", 31193, False, "admin", "CP4DDataFabric")
```
#### Confirming the service settings
Once the connection to the RESTful Endpoint Service and Db2 is established you can always check your settings using the following calls.
```
print(Db2RESTService.getService())
print(Db2RESTService.getConnection())
print(Db2RESTService.getToken())
```
### Running SQL Through the Service
You can run an SQL Statement through the RESTful service as a simple text string.
Let's start by defining the SQL to run:
```
sql = \
"""
SELECT AC."TAIL_NUMBER", AC."MANUFACTURER", AC."MODEL", OT."FLIGHTDATE", OT."UNIQUECARRIER", OT."AIRLINEID", OT."CARRIER", OT."TAILNUM", OT."FLIGHTNUM", OT."ORIGINAIRPORTID", OT."ORIGINAIRPORTSEQID", OT."ORIGINCITYNAME", OT."ORIGINSTATE", OT."DESTAIRPORTID", OT."DESTCITYNAME", OT."DESTSTATE", OT."DEPTIME", OT."DEPDELAY", OT."TAXIOUT", OT."WHEELSOFF", OT."WHEELSON", OT."TAXIIN", OT."ARRTIME", OT."ARRDELAY", OT."ARRDELAYMINUTES", OT."CANCELLED", OT."AIRTIME", OT."DISTANCE"
FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = 'NJ'
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > 300
ORDER BY OT.DEPDELAY DESC
FETCH FIRST 5 ROWS ONLY;
"""
```
Now a single call to the **runStatement** routine runs the SQL synchronously and returns the result as a DataFrame
```
sql = "SELECT * FROM SYSCAT.TABLES"
result = (Db2RESTService.runStatement(sql))
display(result)
```
You can also run the statement asynchronously so you don't have to wait for the result. In this case the result is the statement identifier that you can use to check the statement status.
```
statementID = (Db2RESTService.runStatement(sql, sync=False))
display(statementID)
```
If you have several statements running at the same time you can check to see their status with the **monitorStatus** routine and see where they are in the service queue.
```
services = Db2RESTService.monitorJobs()
display(services)
```
You can try to get the results of the statment by passing the statement identifier into the getResults routine. If the statement has finished running it will return a result set as a DataFrame. It is still running, a message is returned.
```
result = (Db2RESTService.getResult(statementID))
display(result)
```
#### Passing Parameters when running SQL Statements
You can also define a single SQL statement with ? parameters and call that statement with different values using the same **runStatement** routine.
```
sqlparm = \
"""
SELECT AC."TAIL_NUMBER", AC."MANUFACTURER", AC."MODEL", OT."FLIGHTDATE", OT."UNIQUECARRIER", OT."AIRLINEID", OT."CARRIER", OT."TAILNUM", OT."FLIGHTNUM", OT."ORIGINAIRPORTID", OT."ORIGINAIRPORTSEQID", OT."ORIGINCITYNAME", OT."ORIGINSTATE", OT."DESTAIRPORTID", OT."DESTCITYNAME", OT."DESTSTATE", OT."DEPTIME", OT."DEPDELAY", OT."TAXIOUT", OT."WHEELSOFF", OT."WHEELSON", OT."TAXIIN", OT."ARRTIME", OT."ARRDELAY", OT."ARRDELAYMINUTES", OT."CANCELLED", OT."AIRTIME", OT."DISTANCE"
FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = ?
AND DESTSTATE = ?
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > ?
ORDER BY OT.DEPDELAY DESC
FETCH FIRST 10 ROWS ONLY;
"""
result = Db2RESTService.runStatement(sqlparm,parameters={"1": 'NY', "2": 'CA', "3" : 300})
display(result)
result = Db2RESTService.runStatement(sqlparm,parameters={"1": 'NJ', "2": 'CA', "3" : 200})
display(result)
```
#### Limiting Results
You also have full control of how many rows in an answer set to return. Run the following statement using **sync=False**
```
statementID = Db2RESTService.runStatement(sqlparm, sync=False, parameters={"1": 'NJ', "2": 'CA', "3" : 200})
display(statementID)
result = (Db2RESTService.getResult(statementID))
display(result)
```
This time the **getResult** routine include a parameter to limit the result set to 5 rows.
```
result = (Db2RESTService.getResult(statementID, limit=5))
display(result)
```
The next cell retrieves the remaining rows.
```
result = (Db2RESTService.getResult(statementID))
display(result)
```
After all the rows have been returned the job history is removed. If you try to retrieve the results for this statement now the service won't find it.
```
result = (Db2RESTService.getResult(statementID))
display(result)
```
### Creating and Running Endpoint Services
If the MetaData tables have not already been created in your database you can use the following call to create the MetaData in the schema of your choice. In this case **DB2REST**.
```
Db2RESTService.createServiceMetadata("DB2REST")
```
Let's start by defining the SQL statement. It can include parameters that have to be idenfied with an amersand "@".
```
sql = \
"""
SELECT COUNT(AC."TAIL_NUMBER") FROM "ONTIME"."ONTIME" OT, "ONTIME"."AIRCRAFT" AC
WHERE AC."TAIL_NUMBER" = OT.TAILNUM
AND ORIGINSTATE = @STATE
AND DESTSTATE = 'CA'
AND AC.MANUFACTURER = 'Boeing'
AND AC.MODEL LIKE 'B737%'
AND OT.TAXIOUT > 30
AND OT.DISTANCE > 2000
AND OT.DEPDELAY > @DELAY
FETCH FIRST 5 ROWS ONLY
"""
```
Now we can create the service, including the two parameters, using the **createService** routine.
```
parameters = [{"datatype": "CHAR(2)","name": "@STATE"},{"datatype": "INT","name": "@DELAY"}]
schema = 'DEMO'
serviceDescription = 'Delay'
serviceName = 'delay'
version = '1.0'
Db2RESTService.createService(schema, serviceDescription, serviceName, sql, version, parameters)
```
A call to the **listServices** routine confirms that you have created the new service.
```
services = Db2RESTService.listServices()
display(services)
```
You can also see the details for any service using the **getServiceDetails** routine.
```
details = Db2RESTService.getServiceDetails("delay","1.0")
display(details)
```
You can all the new service using the **callService** routine. The parameters are passed into call using an array of values. By default the call is synchronous so you have to wait for the results.
```
serviceName = 'delay'
version = '1.0'
parameters = {"@STATE": "NJ","@DELAY":"200"}
result = Db2RESTService.callService(serviceName, version, parameters)
display(result)
```
You can also call the service asychronously, just like we did with SQL statements earlier. Notice the additional parameter **sync=False**. Since the cell below immediately checks the status of the job you can see it has been queued.
```
serviceName = 'delay'
version = '1.0'
parameters = {"@STATE": "NJ","@DELAY":"200"}
statementID = Db2RESTService.callService(serviceName, version, parameters, sync=False)
display(statementID)
display(Db2RESTService.monitorJobs())
```
Run **monitorJobs** again to confirm that the endpoint service has completed the request.
```
services = Db2RESTService.monitorJobs()
display(services)
```
And retrieve the result set.
```
result = (Db2RESTService.getResult(statementID))
display(result)
```
You can also delete an existing endpoint service with a call to the **deleteService** routine.
```
serviceName = 'delay'
version = '1.0'
Db2RESTService.deleteService(serviceName, version)
```
#### Using a service to query the Catalog
You can also think about creating services to explore the database catalog. For example, here is a service that accepts a schema as an input parameter and returns a list of tables in the schema.
```
sql = \
"""
SELECT TABSCHEMA, TABNAME, ALTER_TIME FROM SYSCAT.TABLES WHERE TABSCHEMA = @SCHEMA
"""
parameters = [{"datatype": "VARCHAR(64)","name": "@SCHEMA"}]
schema = 'DEMO'
serviceDescription = 'Tables'
serviceName = 'tables'
version = '1.0'
Db2RESTService.createService(schema, serviceDescription, serviceName, sql, version, parameters)
serviceName = 'tables'
version = '1.0'
result = Db2RESTService.callService(serviceName, version, parameters = {"@SCHEMA": "SYSCAT"}, sync=True)
display(result)
```
### Incorporating the Db2 RESTFul Endpoint Class into your Python scipts
The Db2 RESTful Endpoint Class is available on GIT at https://github.com/Db2-DTE-POC/CPDDVHOL4/blob/main/RESTfulEndpointServiceClass402.ipynb. You can download a copy into your own Python library and add **%run db2restendpoint.ipynb** to your own Python notebook. You can also include the following two lines which will automatically download a copy of the library from GIT and run the Class code.
```
!wget -O db2restendpoint.ipynb https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVHOL4/main/RESTfulEndpointServiceClass402.ipynb
%run db2restendpoint.ipynb
Db2RESTService = Db2REST("https://10.0.0.201:31315")
print("Db2 RESTful Endpoint Service Version: " + Db2RESTService.getVersion())
```
## What's Next
Try experimenting. Create your own services. You can find out more at: https://www.ibm.com/support/producthub/db2/docs/content/SSEPGG_11.5.0/com.ibm.db2.luw.admin.rest.doc/doc/c_rest.html.
Also check out the OpenAPI specification for the service. It includes coding examples in Python, CURL and JavaScript.
If you are running this notebook from a browser running inside the Cloud Pak for Data cluster, click: http://10.0.0.4:50050/docs If you are running this from a browser from your own desktop, check your welcome note for the address of the Db2 RESTful Service at port 50050 and add **docs** to the end of the URL.
| github_jupyter |
# Inference acceleration of `T5` for large batch size / long sequence length / > large models
Every week or so, a new impressive few shots learning work taking advantage of autoregressive models is released by some team around the world.
Still `LLM` inference is rarely discussed and few projects are focusing on this aspect.
In this notebook, we describe our take to significantly improve autoregressive model latency.
We plan to intensively test large autoregressive models, so we want something:
* which **scales**: the improvement exists on small and large models, for short and long sequences, in greedy and beam search;
* This is very important in a few shots learning where sequences are most of the time hundreds or thousands tokens long and beam search is used to improve text quality.
* that has **no hidden cost**: no big increase in memory usage, no degradation in quality of generated text, support state-of-the-art decoding algorithms;
* that is **generic**: works for any transformer based architecture, and not specific to an inference engine;
* that is **easy to maintain**: no hard-coded behaviors or other technical debt if it doesn't bring a clear advantage.
To be clear, **we are not targeting the best performance ever but the right trade off** (for us at least) between simplicity to use/maintain and acceptable latency.
## The challenge
In most situations, performing inference with `Onnx Runtime` or `TensorRT` usually bring large improvement over `Pytorch` implementations.
It's very true with `transformer` based models.
The main reason is that these tools will perform `kernel fusions` (merging several operations into a single one) and therefore reduce the number of memory bounded operations. Sometimes they also replace some operations by a much faster approximation.
In the very specific case of autoregressive languages, things are a bit more complicated.
On most `Pytorch` implementations of these models, there is a `cache` of `K` and `V` values.
Let's remind us that in attention blocks, each token is projected on 3 matrices called `Query`, `Key`, and `Value`.
Then, those projections will be used to compute a representation of each token which takes into account the information from the related other tokens of the sequence.
As autoregressive models generate the sequence one token at a time, they should recompute final representation of all past tokens for each new token to generate.
Because each token can only attend to the past, the result of these computations never changes; therefore one simple trick to reduce latency is to just memorize them and reuse them later, avoiding lots of computation.
Out of the box, the cache mechanism can't be exported to `Onnx` from `Hugging Face` models (and all other `Pytorch` implementations we are aware of).
The reason is that those models are not `torchscript` scripting compliant (it requires `Pytorch` code to follow some [restrictive rules](https://pytorch.org/docs/stable/jit_builtin_functions.html)).
Because of that, `Onnx` export is done through `tracing` which erases any control flow instructions (including the `If` instruction to enable or not a cache).
## Existing solutions
Some interesting solutions targeting inference latency that we have considered and/or tested:
* [TensorRT](https://developer.nvidia.com/blog/optimizing-t5-and-gpt-2-for-real-time-inference-with-`TensorRT`/), which targets `GPU`, heavily optimizes the computation graph, making `T5` inference very fast (they report X10 speedup on `small-T5`). The trick is that it doesn't use any cache (see below for more details), so it's very fast on short sequence and small models, as it avoids many memory bounded operations by redoing full computation again and again... but as several users have already found ([1](https://github.com/NVIDIA/TensorRT/issues/1807), [2](https://github.com/NVIDIA/TensorRT/issues/1642), [3](https://github.com/NVIDIA/TensorRT/issues/1799), [4](https://github.com/NVIDIA/TensorRT/issues/1845), ...), this approach doesn't scale when the computation intensity increases, i.e., when base or large models are used instead of a small one, when generation is done on moderately long sequence of few hundred of tokens or if beam search is used instead of a greedy search;
* [FastT5](https://github.com/Ki6an/fastT5), which targets `CPU`, exports 2 versions of the decoder, one with cache and one without. You need the `no cache` version to compute the first token and the first `past state` tensors (aka the cached tensors), and for all the other tokens you use the `cache` version of the computation graph. Basically, it makes the memory foot print 2 times bigger as all weights are duplicated. As generative models tend to be huge, they work around the memory issue by using dynamic `int-8` quantization, the final memory foot print of the decoders is now the same as `Hugging Face` in `FP16`... but 1/ dynamic quantization only works on `CPU`, and 2/ according to several reports dynamic quantization degrades significantly generative model output, to a point where it may make them useless ([1](https://github.com/huggingface/transformers/issues/2466#issuecomment-572781378), [2](https://github.com/huggingface/transformers/issues/2466#issuecomment-982710520), and [here](https://github.com/microsoft/onnxruntime/issues/6549#issuecomment-1016948837) you can find a report in the `GPT-2` context from a Microsoft engineer: "*int8 quantization are not recommended due to accuracy loss*").
* [Onnx Runtime T5 export tool](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers/models/t5) targets both `GPU` and `CPU`. It works in a similar way than `FastT5`: `decoder` module is exported 2 times. Like `FastT5`, the memory footprint of the decoder part is doubled (this time there is no `int-8` quantization).
* [FasterTransformer](https://github.com/NVIDIA/FasterTransformer/blob/main/docs/t5_guide.md#translation-process) targets `GPU` and is a mix of `Pytorch` and `CUDA`/`C++` dedicated code. The performance boost is huge on `T5`, they report a 10X speedup like `TensorRT`. However, it may significantly decrease the accuracy of the model ([here](https://github.com/NVIDIA/FasterTransformer/blob/main/docs/t5_guide.md#translation-process) when sampling is enabled, it reduces BLEU score of translation task by 8 points, the cause may be a bug in the decoding algorithm or an approximation a bit too aggressive) plus the speedup is computed on a [translation task](https://github.com/NVIDIA/FasterTransformer/blob/main/examples/pytorch/decoding/utils/translation/test.en) where sequences are 25 tokens long on average. In our experience, improvement on very short sequences tends to decrease by large margins on longer sequences. It seems to us that their objectives are different from ours.
With the existing solutions, you need to choose one or two items of the following:
* double decoder memory footprint;
* be slower than `Hugging Face` for moderately long sequence length / beam search;
* degrade output quality.
## Our approach
Our approach to make autoregressive `transformer` based models 2X faster than `Hugging Face` `Pytorch` implementation (the base line) is based on 3 key ingredients:
* storing 2 computation graphs in a single `Onnx` file: this let us have both cache and no cache support without having any duplicated weights,
* `zero copy` to retrieve output from `Onnx Runtime`: we built over our past work to connect in the most efficient way `Pytorch` tensors (used in the decoding part) and `Onnx Runtime`. Our previous work was to avoid `host` <-> `GPU` tensor copy, but it still required a `GPU` <-> `GPU`. It is now part of the official `Onnx Runtime` documentation (apparently [thanks of our project](https://github.com/microsoft/onnxruntime/pull/10651)!). This time we found out a way to directly expose the internal state of `Onnx Runtime` through a `Pytorch` tensor in zero copy way. Combined with cache mechanism, this is responsible for most of the speedup we have obtained.
* a generic tool to convert any model (whatever the architecture) to `FP16` without any risk of having out of range values or rounding to zero: `FP16` is still the way to reduce memory footprint of a model. The main issue is that some nodes may output values outside of `FP16` range or round others to zero, resulting in `NaN` output; moreover, very small values may be rounded to zero which is an issue for log and div operations. We have built a tool which detect those nodes so we can keep their precision in `FP32`. It's quite important to reduce memory footprint of these models, not just because they tend to be huge, but also because past states (that we cache) and internal buffers can be even bigger than the weights of the model itself.
## Results
As demonstrated at the end of this notebook, **we are able to provide a X2 speedup** whatever the batch size, the sequence length or the model size.
> For `TensorRT` we have our own implementation of our approach described above which helps to provide similar latency to `Onnx Runtime`. It's in a Python script in the same folder as this notebook. We had to work around a documented limitation. Because of that the code is slightly more complex and we wanted to keep this notebook easy to follow.
```
! nvidia-smi
```
## `Onnx Runtime` compilation
Version 1.11.1 of `Onnx Runtime` and older have a bug which makes them much slower when most inputs are used by subgraphs of an `If` node.
Unfortunately, it's exactly what will do below, so we need to compile our own version of `Onnx Runtime` until the version 1.12 is released (in June 2022).
Code below has been tested on Ubuntu 22.04 and supposes that your machine has `CUDA` 11.4 installed.
If not, use the Docker image of this library.
We use a specific commit of `Onnx Runtime` with a better management of `If`/`Else`/`Then` `Onnx` nodes:
```shell
git clone --recursive https://github.com/Microsoft/onnxruntime
cd onnxruntime
git checkout -b fix_if 81d78706feb1dc923f3e43f7ba8ac30b55f5b19b
CUDACXX=/usr/local/cuda-11.4/bin/nvcc ./build.sh \
--config Release \
--build_wheel \
--parallel \
--use_cuda \
--cuda_home /usr/local/cuda-11.4 \
--cudnn_home /usr/lib/x86_
-linux-gnu/ \
--skip_test
# pip install ...
# other required dependencies
# pip install nvtx seaborn
```
On our machine, it takes around 20 minutes.
> to clear previous compilation, delete content of `./build` folder
```
import json
import random
from transformer_deploy.backends.ort_utils import get_keep_fp32_nodes
from transformer_deploy.backends.ort_utils import convert_fp16
import time
from typing import Callable, Dict, Optional, List
import matplotlib.pylab as plt
from onnxruntime import IOBinding
import numpy as np
import onnx
import torch
from pathlib import Path
from typing import Tuple
from onnx import GraphProto, ModelProto, helper
from torch.nn import Linear
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, PretrainedConfig, T5ForConditionalGeneration, TensorType
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput
from transformers.models.t5.modeling_t5 import T5Stack
from nvtx import nvtx
from copy import copy
from transformer_deploy.backends.ort_utils import create_model_for_provider, inference_onnx_binding
from transformer_deploy.backends.pytorch_utils import convert_to_onnx
import seaborn as sns
import operator
from collections import defaultdict
import gc
```
## Loading `Hugging Face` model / tokenizer
Below we load the model and set global variables of this notebook.
```
np.random.seed(123)
torch.random.manual_seed(123)
# other possible values: t5-small, t5-base, t5-large. t5-3b should work when ORT library is fixed
model_name = "t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
input_ids: torch.Tensor = tokenizer(
"translate English to French: This model is now very fast!", return_tensors=TensorType.PYTORCH
).input_ids
input_ids = input_ids.type(torch.int32).to("cuda")
pytorch_model: T5ForConditionalGeneration = AutoModelForSeq2SeqLM.from_pretrained(model_name)
pytorch_model = pytorch_model.eval()
pytorch_model = pytorch_model.cuda()
pytorch_model.config.use_cache = True # not really needed, just to make things obvious
num_layers = pytorch_model.config.num_layers
# tolerance between Onnx FP16 and Pytorch FP32.
# Rounding errors increase with number of layers: 1e-1 for t5-small, 5e-1 for large, 3 for 3b. 11b not tested.
# Do not impact final quality
fp16_default_tolerance = 5e-1
def are_equal(a: torch.Tensor, b: torch.Tensor, atol: float = fp16_default_tolerance) -> None:
assert np.allclose(a=a.detach().cpu().numpy(), b=b.detach().cpu().numpy(), atol=atol), f"{a}\n\nVS\n\n{b}"
def save_onnx(proto: onnx.ModelProto, model_path: str) -> None:
# protobuff doesn't support files > 2Gb, in this case, weights are stored in another binary file
save_external_data: bool = proto.ByteSize() > 2 * 1024**3
filename = Path(model_path).name
onnx.save_model(
proto=proto,
f=model_path,
save_as_external_data=save_external_data,
all_tensors_to_one_file=True,
location=filename + ".data",
)
def prepare_folder(path: str) -> Tuple[str, str]:
p = Path(path)
p.mkdir(parents=True, exist_ok=True)
[item.unlink() for item in Path(path).glob("*") if item.is_file()]
return path + "/model.onnx", path + "/model_fp16.onnx"
# create/clean folders where each model will be stored.
# as multiple files will be saved for T5-3B and 11B, we use different folders for the encoder and the decoders.
encoder_model_path, encoder_fp16_model_path = prepare_folder(path="./test-enc")
dec_cache_model_path, dec_cache_fp16_model_path = prepare_folder(path="./test-dec-cache")
dec_no_cache_model_path, dec_no_cache_fp16_model_path = prepare_folder(path="./test-dec-no-cache")
_, dec_if_fp16_model_path = prepare_folder(path="./test-dec-if")
# some outputs to compare with
out_enc: BaseModelOutputWithPastAndCrossAttentions = pytorch_model.encoder(input_ids=input_ids)
out_full: Seq2SeqLMOutput = pytorch_model(input_ids=input_ids, decoder_input_ids=input_ids)
```
# Export to Onnx
First step is to export the model to `Onnx` graph.
`T5` is made of 2 parts, an `encoder` and a `decoder`.
## Export encoder part
The `encoder` part export doesn't imply any specific challenge.
We use export function built for `Bert` like model, exported model is in `FP16`.
```
pytorch_model = pytorch_model.to("cuda")
convert_to_onnx(
model_pytorch=pytorch_model.encoder,
output_path=encoder_model_path,
inputs_pytorch={"input_ids": input_ids},
var_output_seq=True,
quantization=False,
)
```
## Conversion to mixed precision
### Why mixed precision?
As `T5` can have up to 11 billion parameters, it requires lots of computation, and even more important, it takes lots of space in device memory.
We convert the `encoder` to half precision.
If we blindly convert the whole graph to `FP16`, we will have 2 issues:
* `overflow`: some nodes, like exponential nodes, will try to output values out of the `FP16` range, at the end you get some `NaN`.
* `underflow`: values very close to 0 will be rounded to 0, which may be an issue for some operations like `Div` and `Log` .
### The challenge
Mixed precision is done out of the box by `Pytorch` and follow some strict rules described in https://pytorch.org/docs/stable/amp.html
Those rules are generic and quite conservative. Many nodes will be kept in `FP32` even if their output is always in the `FP16` range.
Other approaches we have found:
* `Onnx Runtime T5` [demo](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/models/t5/t5_helper.py): provide a list of operations to keep in `FP32` (Pow, ReduceMean, Add, Sqrt, Div, Mul, Softmax, Relu). We have found this approach to need more an more tweaking on larger networks and encoder part (decoder part seems simpler to manage, https://github.com/microsoft/onnxruntime/issues/11119);
* `TensorRT T5` [demo](https://github.com/NVIDIA/TensorRT/tree/main/demo/HuggingFace/notebooks): provide the exact pattern of nodes to keep in `FP32`. This approach is much more effective, but imply lots of code to describe the patterns and may not generalize well, basically what works for a `base` model may not work for 11 billion parameters model. And it does not scale to other architectures without adaptations, for a library like `transformer-deploy`, it would lead to unmaintainable technical debt.
### Our approach
We have chosen an architecture agnostic approach: we inject random input sequences and audit the output of each computation graph node; finally, we make a list of all nodes that have output values out of the `FP16` range /close to zero values and perform some cleaning (to avoid unnecessary casting).
We have chosen to use random values only for the `input_ids` field as the search space is limited: positive integers lower than the vocabulary size.
You can also decide to send real data from a dataset you want to work on.
To finish, we provide the list of nodes to keep in `FP32` to the conversion function.
```
def get_random_input_encoder() -> Dict[str, torch.Tensor]:
max_seq = 512
seq_len = random.randint(a=1, b=max_seq)
batch = max_seq // seq_len
random_input_ids = torch.randint(
low=0, high=tokenizer.vocab_size, size=(batch, seq_len), dtype=torch.int32, device="cuda"
)
inputs = {"input_ids": random_input_ids}
return inputs
keep_fp32_encoder = get_keep_fp32_nodes(onnx_model_path=encoder_model_path, get_input=get_random_input_encoder)
assert len(keep_fp32_encoder) > 0
enc_model_onnx = convert_fp16(onnx_model=encoder_model_path, nodes_to_exclude=keep_fp32_encoder)
save_onnx(proto=enc_model_onnx, model_path=encoder_fp16_model_path)
del enc_model_onnx
torch.cuda.empty_cache()
gc.collect()
print(f"20 first nodes to keep in FP32 (total {len(keep_fp32_encoder)}):")
keep_fp32_encoder[:20]
```
Compare the output of the `Onnx` `FP16` model with `Pytorch` one
```
enc_fp16_onnx = create_model_for_provider(encoder_fp16_model_path, "CUDAExecutionProvider")
enc_fp16_onnx_binding: IOBinding = enc_fp16_onnx.io_binding()
enc_onnx_out = inference_onnx_binding(
model_onnx=enc_fp16_onnx,
binding=enc_fp16_onnx_binding,
inputs={"input_ids": input_ids},
device=input_ids.device.type,
)["output"]
are_equal(a=enc_onnx_out, b=out_enc.last_hidden_state)
```
## Export decoder
The decoder export part is more challenging:
* we first need to wrap it in a `Pytorch` model to add the final layer so it's output provide scores for each vocabulary token and can be directly used by the `Hugging Face` `decoding` algorithm
* then, we need to manipulate the `Onnx` graph to add support of `Key`/`Value` cache
The second point is the key ingredient of the observed acceleration of `Onnx` vs `Hugging Face` inference.
### Wrapper to include some post-processing on the decoder output
The post-processing is mainly a projection of the decoder output on a matrix with one of its dimensions equal to model vocabulary size, so we have scores for each possible token.
```
class ExportT5(torch.nn.Module):
def __init__(self, decoder: T5Stack, lm_head: Linear):
super(ExportT5, self).__init__()
self.decoder = decoder
self.lm_head = lm_head
def forward(self, input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, past_key_values: Tuple = None):
out_dec = self.decoder.forward(
input_ids=input_ids, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values
)
# Rescale output before projecting on vocab
out_dec["last_hidden_state"] = out_dec["last_hidden_state"] * (pytorch_model.model_dim**-0.5)
out_dec["last_hidden_state"] = self.lm_head(out_dec["last_hidden_state"])
return out_dec
pytorch_model.cuda()
model_decoder = ExportT5(decoder=pytorch_model.decoder, lm_head=pytorch_model.lm_head).eval()
out_model_export: torch.Tensor = model_decoder(input_ids=input_ids, encoder_hidden_states=out_enc.last_hidden_state)
are_equal(a=out_model_export["last_hidden_state"], b=out_full.logits)
```
### Export decoder part to `Onnx`
Below we export 2 versions of the decoder, one without cache support and one with it.
Model inputs with past states (cache support):
```
model_decoder.cuda()
# decoder output one step before
out_dec_pytorch = model_decoder(input_ids=input_ids[:, :-1], encoder_hidden_states=out_enc.last_hidden_state)
model_inputs = {
"input_ids": input_ids[:, -1:].type(torch.int32),
"encoder_hidden_states": out_enc.last_hidden_state,
"past_key_values": out_dec_pytorch.past_key_values,
}
input_names = ["input_ids", "encoder_hidden_states"]
for i in range(num_layers):
input_names.append(f"past_key_values.{i}.decoder.key")
input_names.append(f"past_key_values.{i}.decoder.value")
input_names.append(f"past_key_values.{i}.encoder.key")
input_names.append(f"past_key_values.{i}.encoder.value")
output_names = ["logits"]
for i in range(num_layers):
output_names.append(f"present.{i}.decoder.key")
output_names.append(f"present.{i}.decoder.value")
output_names.append(f"present.{i}.encoder.key")
output_names.append(f"present.{i}.encoder.value")
dynamic_axis = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"encoder_hidden_states": {0: "batch", 1: "encoder_sequence"},
"logits": {0: "batch", 1: "decoder_sequence"},
}
for i in range(num_layers):
dynamic_axis[f"past_key_values.{i}.decoder.key"] = {0: "batch", 2: "past_decoder_sequence"}
dynamic_axis[f"past_key_values.{i}.decoder.value"] = {0: "batch", 2: "past_decoder_sequence"}
dynamic_axis[f"past_key_values.{i}.encoder.key"] = {0: "batch", 2: "encoder_sequence_length"}
dynamic_axis[f"past_key_values.{i}.encoder.value"] = {0: "batch", 2: "encoder_sequence_length"}
dynamic_axis[f"present.{i}.decoder.key"] = {0: "batch", 2: "decoder_sequence"}
dynamic_axis[f"present.{i}.decoder.value"] = {0: "batch", 2: "decoder_sequence"}
dynamic_axis[f"present.{i}.encoder.key"] = {0: "batch", 2: "encoder_sequence_length"}
dynamic_axis[f"present.{i}.encoder.value"] = {0: "batch", 2: "encoder_sequence_length"}
```
Export of the model with cache support:
```
with torch.no_grad():
pytorch_model.config.return_dict = True
pytorch_model.eval()
# export can works with named args but the dict containing named args as to be last element of the args tuple
torch.onnx.export(
model_decoder,
(model_inputs,),
f=dec_cache_model_path,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axis,
do_constant_folding=True,
opset_version=13,
)
```
Export of the model computing Key/Values for the whole sequence (we basically just remove past states from the input, the `Pytorch` code will recompute them):
```
model_inputs_no_cache = {
"input_ids": input_ids,
"encoder_hidden_states": out_enc.last_hidden_state,
}
with torch.no_grad():
pytorch_model.config.return_dict = True
pytorch_model.eval()
# export can works with named args but the dict containing named args as to be last element of the args tuple
torch.onnx.export(
model_decoder,
(model_inputs_no_cache,),
f=dec_no_cache_model_path,
input_names=list(model_inputs_no_cache.keys()),
output_names=output_names,
dynamic_axes={k: v for k, v in dynamic_axis.items() if "past_key_values" not in k},
do_constant_folding=True,
opset_version=13,
)
_ = pytorch_model.cpu() # free cuda memory
torch.cuda.empty_cache()
```
## Conversion to mixed precision
Decoder module has different kinds of inputs, `input_ids` but also some float tensors.
It would a bit more complicated to generate random values for those tensors: in theory it can be of any value in the FP32 range, but because of how models are initialized and trained, most of them are close to 0.
To avoid too much guessing, we have decided to just take the output of the real model being fed with random `input_ids`.
```
def get_random_input_no_cache() -> Dict[str, torch.Tensor]:
inputs = get_random_input_encoder()
encoder_hidden_states = inference_onnx_binding(
model_onnx=enc_fp16_onnx,
binding=enc_fp16_onnx_binding,
inputs=inputs,
device="cuda",
clone_tensor=False,
)["output"]
# it will serve as input of a FP32 model
inputs["encoder_hidden_states"] = encoder_hidden_states.type(torch.float32)
return inputs
keep_fp32_no_cache = get_keep_fp32_nodes(onnx_model_path=dec_no_cache_model_path, get_input=get_random_input_no_cache)
onnx_model_no_cache_fp16 = convert_fp16(onnx_model=dec_no_cache_model_path, nodes_to_exclude=keep_fp32_no_cache)
save_onnx(proto=onnx_model_no_cache_fp16, model_path=dec_no_cache_fp16_model_path)
print(f"20 first nodes to keep in FP32 (total {len(keep_fp32_no_cache)}):")
keep_fp32_no_cache[:20]
dec_no_cache_ort_model = create_model_for_provider(dec_no_cache_model_path, "CUDAExecutionProvider")
# use info from tokenizer size and max shape provided through the command line
def get_random_input_cache() -> Dict[str, torch.Tensor]:
inputs = get_random_input_no_cache()
dec_past_states = inference_onnx_binding(
model_onnx=dec_no_cache_ort_model,
inputs=inputs,
device="cuda",
clone_tensor=False,
)
for k, v in dec_past_states.items():
if k == "logits":
continue
new_k = k.replace("present", "past_key_values")
inputs[new_k] = v
batch, _ = inputs["input_ids"].shape
complement = torch.randint(low=0, high=tokenizer.vocab_size, size=(batch, 1), dtype=torch.int32, device="cuda")
inputs["input_ids"] = torch.concat(tensors=[inputs["input_ids"], complement], dim=1)
return inputs
keep_fp32_cache = get_keep_fp32_nodes(onnx_model_path=dec_cache_model_path, get_input=get_random_input_cache)
del dec_no_cache_ort_model # free cuda memory
torch.cuda.empty_cache()
gc.collect()
onnx_model_cache_fp16 = convert_fp16(onnx_model=dec_cache_model_path, nodes_to_exclude=keep_fp32_cache)
save_onnx(proto=onnx_model_cache_fp16, model_path=dec_cache_fp16_model_path)
print(f"20 first nodes to keep in FP32 (total {len(keep_fp32_cache)}):")
keep_fp32_cache[:20]
```
## Merge `Onnx` computation graph to deduplicate weights
Finally, we will merge the 2 decoders together.
The idea is simple:
* we prefix the node / edge names of one of them to avoid naming collision
* we deduplicate the weights (the same weight matrix will have different names in the 2 models)
* we join the 2 computation graphs through an `If` node
* we generate the `Onnx` file
The new model will take a new input, `enable_cache`. When it contains a `True` value, computation graph with cache support is used.
> code below is written to be easy to read, but could be made much faster to run
```
prefix = "cache_node_"
mapping_initializer_cache_to_no_cache = dict()
# search for not-duplicated weights, called initializer in Onnx
to_add = list()
for node_cache in onnx_model_cache_fp16.graph.initializer:
found = False
for node_no_cache in onnx_model_no_cache_fp16.graph.initializer:
if node_cache.raw_data == node_no_cache.raw_data:
found = True
mapping_initializer_cache_to_no_cache[node_cache.name] = node_no_cache.name
break
if not found:
node_cache.name = prefix + node_cache.name
to_add.append(node_cache)
mapping_initializer_cache_to_no_cache[node_cache.name] = node_cache.name
onnx_model_no_cache_fp16.graph.initializer.extend(to_add)
# I/O model names should not be prefixed
model_io_names = [n.name for n in list(onnx_model_cache_fp16.graph.input) + list(onnx_model_cache_fp16.graph.output)]
# replace pointers to duplicated weights to their deduplicated version
for node in onnx_model_cache_fp16.graph.node:
for index, input_name in enumerate(node.input):
if input_name in model_io_names:
continue
node.input[index] = mapping_initializer_cache_to_no_cache.get(input_name, prefix + input_name)
for index, output_name in enumerate(node.output):
if output_name in model_io_names:
continue
node.output[index] = prefix + output_name
node.name = prefix + node.name
model_io_names = [n.name for n in list(onnx_model_cache_fp16.graph.input) + list(onnx_model_cache_fp16.graph.output)]
# prefix nodes to avoid naming collision
prefix = "init_"
cache = dict()
for node in onnx_model_no_cache_fp16.graph.initializer:
if node.name in model_io_names:
new_name = prefix + node.name
cache[node.name] = new_name
node.name = new_name
for node in onnx_model_no_cache_fp16.graph.node:
for input_index, n in enumerate(node.input):
node.input[input_index] = cache.get(n, n)
# mandatory for subgraph in if/else node
assert len(onnx_model_cache_fp16.graph.output) == len(
onnx_model_no_cache_fp16.graph.output
), f"{len(onnx_model_cache_fp16.graph.output)} vs {len(onnx_model_no_cache_fp16.graph.output)}"
# build a computation graph with cache support
graph_cache: onnx.GraphProto = onnx.helper.make_graph(
nodes=list(onnx_model_cache_fp16.graph.node),
name="graph-cache",
inputs=[],
outputs=list(onnx_model_cache_fp16.graph.output),
initializer=[],
)
# build a computation which doesn't need past states to run
graph_no_cache: onnx.GraphProto = onnx.helper.make_graph(
nodes=list(onnx_model_no_cache_fp16.graph.node),
name="graph-no-cache",
inputs=[],
outputs=list(onnx_model_no_cache_fp16.graph.output),
initializer=[],
)
# a new input to decide if we use past state or not
enable_cache_input = onnx.helper.make_tensor_value_info(name="enable_cache", elem_type=onnx.TensorProto.BOOL, shape=[1])
if_node = onnx.helper.make_node(
op_type="If",
inputs=["enable_cache"],
outputs=[o.name for o in list(onnx_model_no_cache_fp16.graph.output)],
then_branch=graph_cache,
else_branch=graph_no_cache,
)
# final model which can disable its cache
if_graph_def: GraphProto = helper.make_graph(
nodes=[if_node],
name="if-model",
inputs=list(onnx_model_cache_fp16.graph.input) + [enable_cache_input],
outputs=list(onnx_model_no_cache_fp16.graph.output),
initializer=list(onnx_model_no_cache_fp16.graph.initializer),
)
# serialization and cleaning
model_if: ModelProto = helper.make_model(
if_graph_def, producer_name="onnx-example", opset_imports=[helper.make_opsetid(onnx.defs.ONNX_DOMAIN, 13)]
)
save_onnx(proto=model_if, model_path=dec_if_fp16_model_path)
del model_if
torch.cuda.empty_cache()
gc.collect()
```
### Check `Onnx` decoder output
Compare `Onnx` output with and without cache, plus compare with `Pytorch` output.
```
pytorch_model = pytorch_model.cuda()
model_decoder = model_decoder.cuda()
input_ids = input_ids.cuda()
pytorch_model = pytorch_model.eval()
model_decoder = model_decoder.eval()
dec_onnx = create_model_for_provider(dec_if_fp16_model_path, "CUDAExecutionProvider", log_severity=3)
dec_onnx_binding: IOBinding = dec_onnx.io_binding()
```
## Zero copy output
Below, we check that the new model output is similar to the ones from `Pytorch`.
We use our new implementation of inference call.
The idea is the following:
* we ask `Onnx Runtime` to output a pointer to the `CUDA` array containing the result of the inference;
* we use `Cupy` API to wrap the array and provide information regarding tensor shape and type. `Cupy` doesn't own the data;
* we use `Dlpack` support to convert the `Cupy` tensor to `Pytorch`, another zero copy process.
This pipeline is unsafe, as the content of the tensor may change or disappear silently: only `Onnx Runtime` has the control of the array containing the data. It will happen at the next inference call. Because we know that during the text generation we discard each output before recalling `Onnx Runtime`, it works well in our case.
A second benefit of this approach is that we do not have anymore to guess the output shape.
Before using this approach, to avoid the output to be stored on host memory (RAM) which made inference slower, we had to provide `Onnx Runtime` with a pointer to `Pytorch` tensor with the right size. As the size change with the sequence length (so it changes for each generated token), we had to store the logic to guess the size somewhere in the code. The new approach frees us from this burden.
```
pytorch_model = pytorch_model.half()
with torch.inference_mode():
out_enc_pytorch: BaseModelOutputWithPastAndCrossAttentions = pytorch_model.encoder(input_ids=input_ids)
previous_step_pytorch: BaseModelOutputWithPastAndCrossAttentions = model_decoder(
input_ids=input_ids[:, :-1], encoder_hidden_states=out_enc_pytorch.last_hidden_state
)
out_dec_pytorch: BaseModelOutputWithPastAndCrossAttentions = model_decoder(
input_ids=input_ids, encoder_hidden_states=out_enc_pytorch.last_hidden_state
)
def decoder_pytorch_inference(decoder_input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, **_):
with torch.inference_mode():
return model_decoder(input_ids=decoder_input_ids, encoder_hidden_states=encoder_hidden_states)
def decoder_onnx_inference(
decoder_input_ids: torch.Tensor,
encoder_hidden_states: torch.Tensor,
enable_cache: torch.Tensor,
past_key_values: Optional[torch.Tensor],
):
inputs_onnx_dict = {
"input_ids": decoder_input_ids,
"encoder_hidden_states": encoder_hidden_states,
"enable_cache": enable_cache,
}
if past_key_values is not None:
for index, (k_dec, v_dec, k_enc, v_enc) in enumerate(past_key_values):
inputs_onnx_dict[f"past_key_values.{index}.decoder.key"] = k_dec
inputs_onnx_dict[f"past_key_values.{index}.decoder.value"] = v_dec
inputs_onnx_dict[f"past_key_values.{index}.encoder.key"] = k_enc
inputs_onnx_dict[f"past_key_values.{index}.encoder.value"] = v_enc
result_dict = inference_onnx_binding(
model_onnx=dec_onnx,
inputs=inputs_onnx_dict,
binding=dec_onnx_binding, # recycle the binding
device=decoder_input_ids.device.type,
clone_tensor=False, # no memory copy -> best perf and lowest memory footprint!
)
past_states = list()
for index in range(pytorch_model.config.num_layers):
kv = (
result_dict[f"present.{index}.decoder.key"],
result_dict[f"present.{index}.decoder.value"],
result_dict[f"present.{index}.encoder.key"],
result_dict[f"present.{index}.encoder.value"],
)
past_states.append(kv)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=result_dict["logits"],
past_key_values=past_states,
)
out_dec_onnx_no_cache = decoder_onnx_inference(
decoder_input_ids=input_ids,
encoder_hidden_states=out_enc_pytorch.last_hidden_state,
enable_cache=torch.tensor([False], device="cuda", dtype=torch.bool),
past_key_values=None,
)
are_equal(a=out_dec_onnx_no_cache.last_hidden_state[:, -1:, :], b=out_dec_pytorch.last_hidden_state[:, -1:, :])
# check that past states are identical between Onnx and Pytorch
assert len(out_dec_onnx_no_cache.past_key_values) == len(out_dec_pytorch.past_key_values)
for (o_dec_k, o_dev_v, o_enc_k, o_enc_v), (p_dec_k, p_dev_v, p_enc_k, p_enc_v) in zip(
out_dec_onnx_no_cache.past_key_values, out_dec_pytorch.past_key_values
):
are_equal(a=o_dec_k, b=p_dec_k)
are_equal(a=o_dev_v, b=p_dev_v)
are_equal(a=o_enc_k, b=p_enc_k)
are_equal(a=o_enc_v, b=p_enc_v)
out_dec_onnx_cache = decoder_onnx_inference(
decoder_input_ids=input_ids[:, -1:],
encoder_hidden_states=out_enc_pytorch.last_hidden_state,
enable_cache=torch.tensor([True], device="cuda", dtype=torch.bool),
past_key_values=previous_step_pytorch.past_key_values,
)
are_equal(a=out_dec_onnx_cache.last_hidden_state[:, -1:, :], b=out_dec_pytorch.last_hidden_state[:, -1:, :])
# check that past states are identical between Onnx and Pytorch
assert len(out_dec_onnx_cache.past_key_values) == len(out_dec_pytorch.past_key_values)
for (o_dec_k, o_dev_v, o_enc_k, o_enc_v), (p_dec_k, p_dev_v, p_enc_k, p_enc_v) in zip(
out_dec_onnx_cache.past_key_values, out_dec_pytorch.past_key_values
):
are_equal(a=o_dec_k, b=p_dec_k)
are_equal(a=o_dev_v, b=p_dev_v)
are_equal(a=o_enc_k, b=p_enc_k)
are_equal(a=o_enc_v, b=p_enc_v)
```
## Benchmarks!
Finally, we will compare the performances of 4 setup in end-to-end scenarii:
* `Pytorch`
* `Pytorch` + cache
* `Onnx`
* `Onnx` + cache
For the comparison, we first do a sanity check by just generating a short sequence (we already have checked that output tensors are OK).
Then we force each model to generate:
* 256 tokens + batch size 1 (similar to `TensorRT` demo)
* 1000 tokens + batch size 4
```
def encoder_onnx_inference(input_ids: torch.Tensor, **_) -> BaseModelOutputWithPastAndCrossAttentions:
last_hidden_state = inference_onnx_binding(
model_onnx=enc_fp16_onnx, # noqa: F821
inputs={"input_ids": input_ids},
device=input_ids.device.type,
binding=enc_fp16_onnx_binding,
)["output"]
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=last_hidden_state.type(torch.float16))
def encoder_pytorch_inference(input_ids, **_) -> BaseModelOutputWithPastAndCrossAttentions:
with torch.inference_mode():
res = pytorch_model.encoder(input_ids=input_ids).type(torch.float16)
return res
# https://github.com/NVIDIA/TensorRT/blob/main/demo/HuggingFace/T5/export.py
class ExtT5(torch.nn.Module, GenerationMixin):
def __init__(self, config: PretrainedConfig, device: torch.device, encoder_func: Callable, decoder_func: Callable):
super(ExtT5, self).__init__()
self.main_input_name = "input_ids" # https://github.com/huggingface/transformers/pull/14803
self.config: PretrainedConfig = config
self.device: torch.device = device
self.encoder_func = encoder_func
self.decoder_func = decoder_func
self.use_cache = True
self.timings = list()
def get_encoder(self):
return self.encoder_func
def get_decoder(self):
return self.decoder_func
def set_cache(self, enable: bool) -> None:
self.use_cache = enable
# from transformers library (modeling_t5.py)
def _reorder_cache(self, past, beam_idx):
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, **kwargs) -> Dict[str, torch.Tensor]:
params = {
"encoder_hidden_states": kwargs["encoder_outputs"]["last_hidden_state"],
}
if past is None: # this is the 1st inferred token
self.timings = list()
if not self.use_cache:
past = None
if past is None:
params[self.main_input_name] = input_ids
params["enable_cache"] = torch.tensor([False], device="cuda", dtype=torch.bool)
else:
params[self.main_input_name] = input_ids[:, -1:]
params["enable_cache"] = torch.tensor([True], device="cuda", dtype=torch.bool)
params["past_key_values"] = past
return params
def forward(
self,
input_ids: torch.Tensor,
encoder_hidden_states: torch.Tensor,
enable_cache: torch.Tensor,
past_key_values: Optional[torch.Tensor] = None,
**_,
):
start_timer = time.monotonic()
dec_output = self.get_decoder()(
decoder_input_ids=input_ids,
encoder_hidden_states=encoder_hidden_states,
enable_cache=enable_cache,
past_key_values=past_key_values,
)
self.timings.append(time.monotonic() - start_timer)
return Seq2SeqLMOutput(logits=dec_output.last_hidden_state, past_key_values=dec_output.past_key_values)
model_gen = (
ExtT5(
config=pytorch_model.config,
device=pytorch_model.device,
encoder_func=encoder_onnx_inference, # encoder_pytorch_inference
decoder_func=decoder_onnx_inference, # decoder_pytorch_inference
)
.cuda()
.eval()
)
torch.cuda.synchronize()
with torch.inference_mode():
print("Onnx:")
print(
tokenizer.decode(
model_gen.generate(
inputs=input_ids,
min_length=3,
max_length=60,
num_beams=4,
no_repeat_ngram_size=2,
)[0],
skip_special_tokens=True,
)
)
print("Pytorch:")
print(
tokenizer.decode(
pytorch_model.generate(
input_ids=input_ids,
min_length=3,
max_length=60,
num_beams=4,
no_repeat_ngram_size=2,
)[0],
skip_special_tokens=True,
)
)
def print_timings(name: str, total: float, inference: float):
percent_inference = 100 * inference / total
print(f"{name}: {total:.1f}, including inference: {inference:.1f} ({percent_inference:.1f}%)")
all_timings: Dict[str, Dict[str, List[float]]] = dict()
for seq_len, num_beam in [(256, 1), (1000, 4)]:
timings = dict()
print(f"seq len: {seq_len} / # beam (batch size): {num_beam}")
task = "Onnx"
with nvtx.annotate(
task, color="red"
): # nvtx is for Nvidia nsight profiler, you can remove the line or install the library
model_gen.set_cache(enable=False)
# warmup
model_gen.generate(inputs=input_ids, max_length=10, num_beams=num_beam, min_length=10)
start = time.monotonic()
model_gen.generate(inputs=input_ids, max_length=seq_len, num_beams=num_beam, min_length=seq_len)
total_time = time.monotonic() - start
print_timings(name=task, total=total_time, inference=sum(model_gen.timings))
timings[f"{task}"] = model_gen.timings
task = "Onnx + cache"
with nvtx.annotate(task, color="red"):
model_gen.set_cache(enable=True)
# warmup
model_gen.generate(inputs=input_ids, max_length=10, num_beams=num_beam, min_length=10)
start = time.monotonic()
model_gen.generate(inputs=input_ids, max_length=seq_len, num_beams=num_beam, min_length=seq_len)
total_time = time.monotonic() - start
print_timings(name=task, total=total_time, inference=sum(model_gen.timings))
timings[f"{task}"] = model_gen.timings
# monckey patching of forward function to add a timer per generated token
old_fw = pytorch_model.forward
timing_pytorch = list()
def new_fw(self, *args, **kwargs):
timer_start = time.monotonic()
res = old_fw(self, *args, **kwargs)
torch.cuda.synchronize() # makes timings correct without having significant impact on e2e latency
total_time = time.monotonic() - timer_start
timing_pytorch.append(total_time)
return res
task = "Pytorch"
with nvtx.annotate(task, color="orange"):
pytorch_model.config.use_cache = False
with torch.inference_mode():
with torch.cuda.amp.autocast():
# warmup
pytorch_model.generate(inputs=input_ids, max_length=10, num_beams=num_beam, min_length=10)
pytorch_model.forward = new_fw.__get__(pytorch_model)
start = time.monotonic()
pytorch_model.generate(inputs=input_ids, max_length=seq_len, num_beams=num_beam, min_length=seq_len)
total_time = time.monotonic() - start
pytorch_model.forward = old_fw
inference_time = np.sum(timing_pytorch)
print_timings(name="Pytorch", total=total_time, inference=inference_time)
timing_pytorch_no_cache = copy(timing_pytorch)
timings[f"{task}"] = copy(timing_pytorch)
timing_pytorch.clear()
torch.cuda.empty_cache()
task = "Pytorch + cache"
with nvtx.annotate("Pytorch + cache", color="green"):
pytorch_model.config.use_cache = True
with torch.inference_mode():
with torch.cuda.amp.autocast():
# warmup
pytorch_model.generate(inputs=input_ids, max_length=10, num_beams=num_beam, min_length=10)
pytorch_model.forward = new_fw.__get__(pytorch_model)
start = time.monotonic()
pytorch_model.generate(inputs=input_ids, max_length=seq_len, num_beams=num_beam, min_length=seq_len)
total_time = time.monotonic() - start
pytorch_model.forward = old_fw
print_timings(name="Pytorch + cache", total=total_time, inference=sum(timing_pytorch))
timings[f"{task}"] = copy(timing_pytorch)
timing_pytorch.clear()
all_timings[f"{seq_len} / {num_beam}"] = timings
torch.cuda.empty_cache()
```
## Benchmark analysis
Below, we plot for each setup (short and long sequence):
* the time spent on each token generation
* the full time to generate the sequence (for each length)
We can see that for short sequence and batch size of 1, cache or not, latency appears to be stable.
However, for longer sequences, we can see that the no cache approach (being `Pytorch` or `Onnx` based) doesn't scale well, and at some point, `Onnx` is even slower than `Hugging Face` code with cache support.
On the other side, `Onnx` timings are mostly stable whatever the sequence length which is quite remarkable.
It's because we are working one token at a time and converted a quadratic complexity in the attention layer into a linear one.
```
sns.set_style("darkgrid") # darkgrid, whitegrid, dark, white and ticks
plt.rc("axes", titlesize=15) # fontsize of the axes title
plt.rc("axes", labelsize=14) # fontsize of the x and y labels
plt.rc("xtick", labelsize=13) # fontsize of the tick labels
plt.rc("ytick", labelsize=13) # fontsize of the tick labels
plt.rc("legend", fontsize=15) # legend fontsize
plt.rc("font", size=13) # controls default text sizes
colors = sns.color_palette("deep")
fig = plt.figure(constrained_layout=True, figsize=(12, 8))
subfigs = fig.subfigures(nrows=2, ncols=1)
fig.supxlabel("seq len (# tokens)")
fig.supylabel("latency (s)")
fig.suptitle(f"Small seq len and greedy search on {model_name} don't tell the whole (inference) story...")
for row, (plot_name, timings) in enumerate(all_timings.items()):
subfigs[row].suptitle(f"setup #{1+row}: {plot_name} (seq len / beam search)")
axs = subfigs[row].subplots(nrows=1, ncols=2)
for col, accumulated in enumerate([False, True]):
plot_axis = axs[col]
for index, (k, v) in enumerate(timings.items()):
axis = range(len(v))
color = colors[index]
v = np.array(v)
# remove extreme values
p99 = np.percentile(v, 99)
v[v > p99] = p99
v = np.cumsum(v) if accumulated else v
plot_axis.scatter(axis, v, label=k, s=2)
title = f"latency for the full sequence" if accumulated else f"latency for each token"
plot_axis.title.set_text(title)
# legend deduplication
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
fig.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1, 1), loc="upper left", markerscale=5)
plt.show()
```
## Profiling model at the kernel level
Below we reload the decoder model with `Onnx Runtime` kernel profiling enabled.
It will help us to understand on which part of the computation graph the GPU spends its time.
The number of events that `Onnx Runtime` can save is limited to [1 million](https://github.com/microsoft/onnxruntime/blob/a4b5fa334aa939fb159bdc571ed3d56ca8d31fc7/onnxruntime/core/common/profiler.cc#L10).
It is not an issue as we have seen that timings per token are mostly stable, so having only n first token information don't change anything.
The main information it gives us is that 30% of the time is spent on matrix multiplication when caching is used.
The rest of the time is spent on mostly memory bound operations:
* element-wise operations which require little computation (`add`, `mul`, `div`, etc.)
* copy pasting tensors `GPU` <-> `GPU` with little transformation in between (`transpose`, `concat`, `cast`, etc.)
It matches the information provided by both `nvidia-smi` and `Nvidia Nsight` (the GPU profiler from Nvidia): the GPU is under utilized.
That's why we think that a tool like `TensorRT` which will perform aggressive kernel fusion, reducing time spent on memory bounded operations, should be a good fit for autoregressive models.
> there is a nice opportunity to increase the speedup by reducing the number of casting operations. We keep this work for the future.
```
dec_onnx = create_model_for_provider(
dec_if_fp16_model_path, "CUDAExecutionProvider", enable_profiling=True, log_severity=3
)
dec_onnx_binding: IOBinding = dec_onnx.io_binding()
_ = model_gen.generate(inputs=input_ids, max_length=10, num_beams=4, min_length=10)
profile_name = dec_onnx.end_profiling()
with open(profile_name) as f:
content = json.load(f)
op_timings = defaultdict(lambda: 0)
for c in content:
if "op_name" not in c["args"]:
continue
op_name = c["args"]["op_name"]
if op_name == "If":
continue # subgraph
time_taken = c["dur"]
op_timings[op_name] += time_taken
op_timings_filter = dict(sorted(op_timings.items(), key=operator.itemgetter(1), reverse=True)[:10])
total_kernel_timing = sum(op_timings.values())
op_timings_percent = {k: 100 * v / total_kernel_timing for k, v in op_timings_filter.items()}
plt.barh(list(op_timings_percent.keys()), list(op_timings_percent.values()))
plt.title("Time spent per kernel\n(top 10 kernels)")
plt.xlabel("% total inference time")
plt.show()
```
| github_jupyter |
```
!wget -q https://github.com/CISC-372/Notebook/releases/download/a4/test.csv
!wget -q https://github.com/CISC-372/Notebook/releases/download/a4/train.csv
# comment your understanding of each function
import pandas as pd
import csv
xy_train_df = pd.read_csv('train.csv')
x_test_df = pd.read_csv('test.csv', index_col='id')
xy_train_df['length'] = xy_train_df.apply(lambda x: len(x.review), axis=1)
xy_train_df = xy_train_df.sort_values('length')
xy_train_df
# comment your understanding of each function and each parameter below:
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
vocab_size = 10000
max_len = 256
xy_train, xy_validation = train_test_split(
xy_train_df, test_size=0.2)
# build vocabulary from training set
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(xy_train.review)
def _preprocess(texts):
return pad_sequences(
tokenizer.texts_to_sequences(texts),
maxlen=max_len,
padding='post'
)
x_train = _preprocess(xy_train.review)
y_train = xy_train.rating
x_valid = _preprocess(xy_validation.review)
y_valid = xy_validation.rating
x_test = _preprocess(x_test_df.review)
print(x_train.shape)
print(x_valid.shape)
print(x_test.shape)
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
# comment your understanding of each line and
# the output shape of each line below. for each dimensionality, explains its
# meaning. (e.g. None is the batch size)
x = keras.Input((max_len))
embeded = keras.layers.Embedding(vocab_size, 20)(x)
averaged = tf.reduce_mean(embeded, axis=1)
pred = keras.layers.Dense(1, activation=tf.nn.sigmoid)(averaged)
model = keras.Model(x, pred)
model.compile(
optimizer=Adam(clipnorm=4.),
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
epochs=5,
batch_size=64,
validation_data=(x_valid, y_valid),
verbose=1)
model.evaluate(x_valid, y_valid)
def predict_class(_dataset):
classes = model.predict(_dataset) > 0.5
return np.squeeze(classes * 1)
y_predict = predict_class(x_valid)
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
print(f1_score(y_valid, y_predict, average='micro'))
# submission
pd.DataFrame(
{'id': x_test_df.index,
'rating': predict_class(x_test)}).to_csv('sample_submission.csv', index=False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/anshradh/trl_custom/blob/test/04_writing_prompt_supervised_baseline_training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Writing Prompt Response Supervised Learning Baseline
We fine-tune a language model to respond to reddit writing prompts using standard supervised learning. This is also known as behavioral cloning or imitation learning in RL.
## Prerequisites
```
## Install needed libraries and log into huggingface
!pip install datasets
!pip install transformers
!pip install accelerate
!pip install huggingface_hub
!apt install git-lfs
from huggingface_hub import notebook_login
notebook_login()
import torch
from tqdm.auto import tqdm
import numpy as np
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
import torch
import collections
import random
tqdm.pandas()
from datasets import load_dataset, ClassLabel, load_metric, concatenate_datasets
from transformers import AutoModel, AutoTokenizer
from transformers import top_k_top_p_filtering
from torch import nn
from torch.nn import Identity
import torch.nn.functional as F
import torch
from transformers import AutoModelForCausalLM, DataCollatorWithPadding, AdamW, get_scheduler
from accelerate import Accelerator
```
## Load and preprocess dataset
```
## Load dataset from huggingface
prompt_response_dataset = load_dataset("rewardsignal/reddit_writing_prompts", data_files="prompt_responses_full.csv", split='train[:80%]')
## We tokenize the dataset and standardize the prompts and responses
# tokenizer_name = input()
tokenizer_name = 'distilgpt2'
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True)
prompt_prefix = "Writing Prompt: "
response_prefix = "Response: "
def preprocess_text_function(examples):
examples["prompt"] = [prompt.replace('[WP] ', prompt_prefix) for prompt in examples["prompt"]]
examples["response"] = [response_prefix + response for response in examples["response"]]
return tokenizer(examples['prompt'], examples['response'], truncation=True)
tokenized_prompt_response_dataset = prompt_response_dataset.map(preprocess_text_function, batched=True, remove_columns=['Unnamed: 0', 'prompt_id', 'prompt', 'prompt_score', 'prompt_created_utc', 'response_id', 'response', 'response_score', 'response_created_utc', 'num_responses', 'response_children', 'score_bin', 'response_rank'], num_proc=4)
## We group the prompts and responses together into continuous text blocks to simplify training
block_size = 512
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
tokenized_prompt_response_dataset = tokenized_prompt_response_dataset.map(group_texts, batched=True, batch_size = 1000, num_proc = 4)
tokenized_prompt_response_dataset.set_format("torch")
```
## Prepare for training
```
## Split into training and evaluation datasets
supervised_train_dataset = tokenized_prompt_response_dataset.shuffle(seed=42).select(range(4*len(tokenized_prompt_response_dataset)//5))
supervised_eval_dataset = tokenized_prompt_response_dataset.shuffle(seed=42).select(range(4*len(tokenized_prompt_response_dataset)//5, len(tokenized_prompt_response_dataset)))
## We prepare for supervised fine-tuning on the dataset
# supervised_model_name = input()
supervised_model_name = 'distilgpt2'
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
supervised_model = AutoModelForCausalLM.from_pretrained(supervised_model_name, num_labels=2)
train_dataloader = DataLoader(
supervised_train_dataset, shuffle=True, batch_size=4, collate_fn=data_collator
)
eval_dataloader = DataLoader(
supervised_eval_dataset, batch_size=4, collate_fn=data_collator
)
optimizer = AdamW(supervised_model.parameters(), lr=3e-5)
accelerator = Accelerator()
train_dataloader, eval_dataloader, supervised_model, optimizer = accelerator.prepare(train_dataloader, eval_dataloader, supervised_model, optimizer)
num_epochs = 1
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
progress_bar = tqdm(range(num_training_steps))
```
## Training Loop
```
## Training loop for fine-tuning
supervised_model.train()
for epoch in range(num_epochs):
for batch in train_dataloader:
outputs = supervised_model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
```
## Evaluate Outputs
```
## Look at 10 batch outputs from the evaluation dataset
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
supervised_model.to(device)
count = 0
for batch in eval_dataloader:
count += 1
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = supervised_model.generate(**batch, max_length=512, min_length = 200)
print(tokenizer.batch_decode(outputs, max_length = 512))
if count == 10: break
## Upload model to huggingface hub
supervised_model.push_to_hub(tokenizer_name + "_supervised_model_final", use_temp_dir=True)
```
## Results and Discussion
Overall, this step was pretty straightforward and should provide us a good supervised baseline to apply RL on top of.
| github_jupyter |
# Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
- **Twitter**: @iamtrask
- **Blog**: http://iamtrask.github.io
### What You Should Already Know
- neural networks, forward and back-propagation
- stochastic gradient descent
- mean squared error
- and train/test splits
### Where to Get Help if You Need it
- Re-watch previous Udacity Lectures
- Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code)
- Shoot me a tweet @iamtrask
### Tutorial Outline:
- Intro: The Importance of "Framing a Problem" (this lesson)
- [Curate a Dataset](#lesson_1)
- [Developing a "Predictive Theory"](#lesson_2)
- [**PROJECT 1**: Quick Theory Validation](#project_1)
- [Transforming Text to Numbers](#lesson_3)
- [**PROJECT 2**: Creating the Input/Output Data](#project_2)
- Putting it all together in a Neural Network (video only - nothing in notebook)
- [**PROJECT 3**: Building our Neural Network](#project_3)
- [Understanding Neural Noise](#lesson_4)
- [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4)
- [Analyzing Inefficiencies in our Network](#lesson_5)
- [**PROJECT 5**: Making our Network Train and Run Faster](#project_5)
- [Further Noise Reduction](#lesson_6)
- [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6)
- [Analysis: What's going on in the weights?](#lesson_7)
# Lesson: Curate a Dataset<a id='lesson_1'></a>
The cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.
```
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
```
**Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.
```
len(reviews)
reviews[0]
labels[0]
```
# Lesson: Develop a Predictive Theory<a id='lesson_2'></a>
```
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
```
# Project 1: Quick Theory Validation<a id='project_1'></a>
There are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.
You'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library.
```
from collections import Counter
import numpy as np
```
We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.
```
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
```
**TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.
**Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show.
```
# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects
for review, label in zip(reviews, labels):
r = review.split(' ')
if label == 'POSITIVE':
for i in r:
positive_counts[i] += 1
if label == 'NEGATIVE':
for i in r:
negative_counts[i] += 1
for i in r:
total_counts[i] += 1
```
Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
```
# Examine the counts of the most common words in positive reviews
positive_counts.most_common()
# Examine the counts of the most common words in negative reviews
negative_counts.most_common()
```
As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews.
**TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`.
>Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator โย that ensures we don't divide by zero for words that are only seen in positive reviews.
```
# Create Counter object to store positive/negative ratios
pos_neg_ratios = Counter()
# TODO: Calculate the ratios of positive and negative uses of the most common words
# Consider words to be "common" if they've been used at least 100 times
for term, count in list(total_counts.most_common()):
if count > 10 :
ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = ratio
```
Examine the ratios you've calculated for a few words:
```
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
```
Looking closely at the values you just calculated, we see the following:
* Words that you would expect to see more often in positive reviews โ like "amazing"ย โ have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.
* Words that you would expect to see more often in negative reviews โ like "terrible" โ have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.
* Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews โ like "the" โ have values very close to 1. A perfectly neutral word โย one that was used in exactly the same number of positive reviews as negative reviews โย would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.
Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:
* Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.
* When comparing absolute values it's easier to do that around zero than one.
To fix these issues, we'll convert all of our ratios to new values using logarithms.
**TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`)
In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs.
```
# TODO: Convert ratios to logs
for term, count in pos_neg_ratios.most_common():
if count > 1:
pos_neg_ratios[term] = np.log(count)
else:
pos_neg_ratios[term] = - np.log((1 / (count + 0.01)))
```
Examine the new ratios you've calculated for the same words from before:
```
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
```
If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments.
Now run the following cells to see more ratios.
The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.)
The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.)
You should continue to see values similar to the earlier ones we checked โย neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios.
```
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
# Note: Above is the code Andrew uses in his solution video,
# so we've included it here to avoid confusion.
# If you explore the documentation for the Counter class,
# you will see you could also find the 30 least common
# words like this: pos_neg_ratios.most_common()[:-31:-1]
```
# End of Project 1.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Transforming Text into Numbers<a id='lesson_3'></a>
The cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.
```
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
```
# Project 2: Creating the Input/Output Data<a id='project_2'></a>
**TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary.
```
# TODO: Create set named "vocab" containing all of the words from all of the reviews
vocab = total_counts.keys()
```
Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074**
```
vocab_size = len(vocab)
print(vocab_size)
```
Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer.
```
from IPython.display import Image
Image(filename='sentiment_network_2.png')
```
**TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns.
```
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros
layer_0 = np.zeros((1, len(total_counts)))
```
Run the following cell. It should display `(1, 74074)`
```
layer_0.shape
from IPython.display import Image
Image(filename='sentiment_network.png')
```
`layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
```
# Create a dictionary of words in the vocabulary mapped to index positions
# (to be used in layer_0)
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
# display the map of words to indices
word2index
```
**TODO:** Complete the implementation of `update_input_layer`. It should count
how many times each word is used in the given review, and then store
those counts at the appropriate indices inside `layer_0`.
```
def update_input_layer(review):
""" Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
"""
global layer_0
# clear out previous state by resetting the layer to be all 0s
layer_0 *= 0
for term in review.split(' '):
layer_0[:,word2index[term]] = 1
# TODO: count how many times each word is used in the given review and store the results in layer_0
```
Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`.
```
update_input_layer(reviews[0])
layer_0
```
**TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`,
depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively.
```
def get_target_for_label(label):
"""Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
"""
# TODO: Your code here
return 1 if label == 'POSITIVE' else 0
```
Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively.
```
labels[0]
get_target_for_label(labels[0])
```
Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively.
```
labels[1]
get_target_for_label(labels[1])
```
# End of Project 2.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Project 3: Building a Neural Network<a id='project_3'></a>
**TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following:
- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer.
- Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.
- Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code)
- Implement the `pre_process_data` function to create the vocabulary for our training data generating functions
- Ensure `train` trains over the entire corpus
### Where to Get Help if You Need it
- Re-watch earlier Udacity lectures
- Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code)
```
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
for review in reviews:
for r in review.split(' '):
review_vocab.add(r)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
for label in labels:
label_vocab.add(label)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
for idx, word in enumerate(self.review_vocab):
self.word2index[word] = idx
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
for idx, label in enumerate(self.label_vocab):
self.label2index[label] = idx
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((input_nodes, hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
self.layer_0 *= 0
for term in review.split(' '):
if(term in self.word2index.keys()):
self.layer_0[:,self.word2index[term]] = 1
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
return 1 if label == 'POSITIVE' else 0
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
self.update_input_layer(training_reviews[i])
label = self.get_target_for_label(training_labels[i])
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
error = out_1_2 - label
layel_2_error = error * self.sigmoid_output_2_derivative(out_1_2)
layer_1_error = np.dot(layel_2_error, self.weights_1_2.T)
self.weights_1_2 -= np.dot(out_1_2, layel_2_error) * self.learning_rate
self.weights_0_1 -= np.dot(self.layer_0.T, layer_1_error) * self.learning_rate
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
if(out_1_2 >= 0.5 and label == 1):
correct_so_far += 1
elif(out_1_2 < 0.5 and label == 0):
correct_so_far += 1
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
self.update_input_layer(review)
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
if out_1_2 > 0.5:
return 'POSITIVE'
else:
return 'NEGATIVE'
```
Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
```
Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set).
**We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.**
```
mlp.test(reviews[-1000:],labels[-1000:])
```
Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
```
mlp.train(reviews[:-1000],labels[:-1000])
```
That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
```
With a learning rate of `0.001`, the network should finally have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.
# End of Project 3.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Understanding Neural Noise<a id='lesson_4'></a>
The following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
```
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
```
# Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>
**TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:
* Copy the `SentimentNetwork` class you created earlier into the following cell.
* Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used.
```
# TODO: -Copy the SentimentNetwork class from Projet 3 lesson
# -Modify it to reduce noise, like in the video
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
for review in reviews:
for r in review.split(' '):
review_vocab.add(r)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
for label in labels:
label_vocab.add(label)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
for idx, word in enumerate(self.review_vocab):
self.word2index[word] = idx
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
for idx, label in enumerate(self.label_vocab):
self.label2index[label] = idx
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((input_nodes, hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
self.layer_0 *= 0
for term in review.split(' '):
if(term in self.word2index.keys()):
self.layer_0[:,self.word2index[term]] = 1
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
return 1 if label == 'POSITIVE' else 0
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
self.update_input_layer(training_reviews[i])
label = self.get_target_for_label(training_labels[i])
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
error = out_1_2 - label
layel_2_error = error * self.sigmoid_output_2_derivative(out_1_2)
layer_1_error = np.dot(layel_2_error, self.weights_1_2.T)
self.weights_1_2 -= np.dot(out_1_2, layel_2_error) * self.learning_rate
self.weights_0_1 -= np.dot(self.layer_0.T, layer_1_error) * self.learning_rate
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
if(out_1_2 >= 0.5 and label == 1):
correct_so_far += 1
elif(out_1_2 < 0.5 and label == 0):
correct_so_far += 1
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
self.update_input_layer(review)
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
if out_1_2 > 0.5:
return 'POSITIVE'
else:
return 'NEGATIVE'
```
Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
```
That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 4.
## Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson.
# Analyzing Inefficiencies in our Network<a id='lesson_5'></a>
The following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
```
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (1 * weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
```
# Project 5: Making our Network More Efficient<a id='project_5'></a>
**TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:
* Copy the `SentimentNetwork` class from the previous project into the following cell.
* Remove the `update_input_layer` function - you will not need it in this version.
* Modify `init_network`:
>* You no longer need a separate input layer, so remove any mention of `self.layer_0`
>* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero
* Modify `train`:
>* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step.
>* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review.
>* Remove call to `update_input_layer`
>* Use `self`'s `layer_1` instead of a local `layer_1` object.
>* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review.
>* When updating `weights_0_1`, only update the individual weights that were used in the forward pass.
* Modify `run`:
>* Remove call to `update_input_layer`
>* Use `self`'s `layer_1` instead of a local `layer_1` object.
>* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review.
```
# TODO: -Copy the SentimentNetwork class from Project 4 lesson
# -Modify it according to the above instructions
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
for review in reviews:
for r in review.split(' '):
review_vocab.add(r)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
for label in labels:
label_vocab.add(label)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
for idx, word in enumerate(self.review_vocab):
self.word2index[word] = idx
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
for idx, label in enumerate(self.label_vocab):
self.label2index[label] = idx
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((input_nodes, hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
self.layer_0 *= 0
for term in review.split(' '):
if(term in self.word2index.keys()):
self.layer_0[:,self.word2index[term]] = 1
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
return 1 if label == 'POSITIVE' else 0
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
self.update_input_layer(training_reviews[i])
label = self.get_target_for_label(training_labels[i])
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
error = out_1_2 - label
layel_2_error = error * self.sigmoid_output_2_derivative(out_1_2)
layer_1_error = np.dot(layel_2_error, self.weights_1_2.T)
self.weights_1_2 -= np.dot(out_1_2, layel_2_error) * self.learning_rate
self.weights_0_1 -= np.dot(self.layer_0.T, layer_1_error) * self.learning_rate
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
if(out_1_2 >= 0.5 and label == 1):
correct_so_far += 1
elif(out_1_2 < 0.5 and label == 0):
correct_so_far += 1
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
self.update_input_layer(review)
out_0_1 = np.dot(self.layer_0, self.weights_0_1)
out_1_2 = self.sigmoid(np.dot(out_0_1, self.weights_1_2))
if out_1_2 > 0.5:
return 'POSITIVE'
else:
return 'NEGATIVE'
```
Run the following cell to recreate the network and train it once again.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
```
That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 5.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Further Noise Reduction<a id='lesson_6'></a>
```
Image(filename='sentiment_network_sparse_2.png')
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
```
# Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>
**TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following:
* Copy the `SentimentNetwork` class from the previous project into the following cell.
* Modify `pre_process_data`:
>* Add two additional parameters: `min_count` and `polarity_cutoff`
>* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)
>* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like.
>* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times.
>* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff`
* Modify `__init__`:
>* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data`
```
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
```
Run the following cell to train your network with a small polarity cutoff.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
And run the following cell to test it's performance. It should be
```
mlp.test(reviews[-1000:],labels[-1000:])
```
Run the following cell to train your network with a much larger polarity cutoff.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
And run the following cell to test it's performance.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 6.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Analysis: What's Going on in the Weights?<a id='lesson_7'></a>
```
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp_full.train(reviews[:-1000],labels[:-1000])
Image(filename='sentiment_network_sparse.png')
def get_most_similar_words(focus = "horrible"):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])
return most_similar.most_common()
get_most_similar_words("excellent")
get_most_similar_words("terrible")
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
```
| github_jupyter |
#### Importing the libraries
```
import pandas as pd
import numpy as np
```
#### Reading the csv as dataframe
```
df = pd.read_csv('googleplaystore_user_reviews.csv')
df.head(5)
```
*We will perform sentiment analysis on Google Play store user reviews*
#### Renaming the column
```
df = df.rename(columns={'Translated_Review':'Reviews'})
df.columns
```
*The column **Translated_Review** has been renamed to **Reviews**.*
#### Checking information of the dataset
```
df.info()
```
*There are total of 64295 rocords and 5 columns in the dataset*
```
df.isnull().sum()
```
*There are 26868 missing values in **Reviews** column and 26863 missing values in **Sentiment**, **Sentiment_Polarity** and **Sentiment_Subjectivity** columns of the dataset.*
#### Checking for duplicate records
```
df.duplicated(subset=None, keep='first').sum()
```
*There are 33616 duplicate rows*
```
df.shape
df = df[df.duplicated(df.columns.tolist(), keep='first')==False]
df.duplicated(subset=None, keep='first').sum()
df.shape
```
*The duplicate rows have been removed*
```
df.isnull().sum()
```
#### Handling missing values in Reviews column
```
df[(df['Reviews'].isnull())].loc[:, 'App']
```
*The above displays the rows for which there are missing values in **Reviews** column*
```
df[(df['Reviews'].isnull())].loc[:, 'App'].unique()
```
*The above displays the unique Apps for which there are missing values in **Reviews** column*
```
'''
def apps_missing_reviews(dataset):
for values in dataset['App'].unique().tolist():
if dataset[dataset['App']==values].App.count() != dataset[dataset['App']==values].Reviews.count():
print(values, " : ", dataset[dataset['App']==values].App.count(), " : ", dataset[dataset['App']==values].Reviews.count())
apps_missing_reviews(df)
'''
```
*The above displays the Apps for which there are 1 or more missing values in the **Reviews** column*
```
'''
def apps_no_missing_reviews(dataset):
for values in dataset['App'].unique().tolist():
#print(values)
if dataset[dataset['App']==values].App.count() == dataset[dataset['App']==values].Reviews.count():
print(values, " : ", dataset[dataset['App']==values].App.count(), " : ", dataset[dataset['App']==values].Reviews.count())
apps_no_missing_reviews(df)
'''
```
*The above displays the Apps for which there are no missing values in the **Reviews** column*
#### Dropping rows having missing values in the Reviews column
```
df = df[df['Reviews'].isnull()==False]
df
df.isnull().sum()
```
*There are no more missing values in the dataset*
```
df.to_csv('file1.csv', index=False)
df = pd.read_csv('file1.csv')
df
df.isnull().sum()
```
#### Cleaning the punctuation marks
```
import re
import string
#df['Reviews'].unique().tolist()
def remove_punctuation(text):
no_punct = "".join([c for c in text if c not in string.punctuation])
return no_punct
df['Reviews'] = df['Reviews'].apply(lambda x: remove_punctuation(x))
def cleaning(dataset, characters):
dataset['Reviews'] = dataset['Reviews'].str.lower()
dataset['Reviews'] = dataset['Reviews'].str.replace('*', 'i')
dataset['Reviews'] = [re.sub('โฅ๏ธ|โค|\d', '', e) for e in df['Reviews']]
dataset['Reviews'] = [re.sub('\s+', ' ', e) for e in df['Reviews']]
for ch in characters:
dataset['Reviews'] = dataset['Reviews'].str.replace(ch, '')
return dataset
char=['โ', 'โ', 'โ']
df = cleaning(df, char)
df.Reviews.unique().tolist()
df
```
*The punctuation marks have been cleaned in the dataset*
#### Tokenizing the Reviews
```
from nltk.tokenize import word_tokenize
df['Reviews'].dropna(inplace=True)
df['Reviews'] = df['Reviews'].apply(word_tokenize)
df
```
*The Reviews have been tokenized i.e. split into tokens/pieces*
#### Removing the stop words from Reviews
```
#nltk.download('stopwords')
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
def remove_stopwords(text):
words = [w for w in text if w not in stopwords]
return words
df['Reviews'] = df['Reviews'].apply(lambda x: remove_stopwords(x))
df
```
*The sopwords have been removed from Reviews*
#### Lemmatization of Reviews
```
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def word_lemmatizer(text):
lem_text = [lemmatizer.lemmatize(i) for i in text]
return lem_text
df['Reviews'] = df['Reviews'].apply(lambda x: word_lemmatizer(x))
df['Reviews'].head(10)
```
#### Determining sentiment polarity and subjectivity using TextBlob library
```
from textblob import TextBlob
pol = lambda x: TextBlob(x).sentiment.polarity
sub = lambda x: TextBlob(x).sentiment.subjectivity
df
df['Reviews'] = df['Reviews'].apply(lambda x: " ".join(x))
df
df['Predicted_Polarity'] = df['Reviews'].apply(pol)
df['Predicted_Subjectivity'] = df['Reviews'].apply(sub)
df.head(10)
df[df['App'] == 'Housing-Real Estate & Property']
```
#### Summary
*The actual and the predicted values of snetiment polarity and subjectivity are very close to each other for the respective applications.*
| github_jupyter |
## Exploratory analysis of the US Airport Dataset
This dataset contains data for 25 years[1995-2015] of flights between various US airports and metadata about these routes. Taken from Bureau of Transportation Statistics, United States Department of Transportation.
Let's see what can we make out of this!
```
%matplotlib inline
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
pass_air_data = pd.read_csv('datasets/passengers.csv')
```
In the `pass_air_data` dataframe we have the information of number of people that fly every year on a particular route on the list of airlines that fly that route.
```
pass_air_data.head()
# Create a MultiDiGraph from this dataset
passenger_graph = nx.from_pandas_edgelist(pass_air_data, source='ORIGIN', target='DEST', edge_attr=['YEAR', 'PASSENGERS', 'UNIQUE_CARRIER_NAME'], create_using=nx.MultiDiGraph())
```
### Cleveland to Chicago, how many people fly this route?
```
passenger_graph['CLE']['ORD'][25]
temp = [(i['YEAR'], i['PASSENGERS'])for i in dict(passenger_graph['CLE']['ORD']).values()]
x, y = zip(*temp)
plt.plot(x, y)
plt.show()
```
## Exercise
Find the busiest route in 1990 and in 2015 according to number of passengers, and plot the time series of number of passengers on these routes.
You can use the DataFrame instead of working with the network. It will be faster ;)
[5 mins]
```
temp = pass_air_data.groupby(['YEAR'])['PASSENGERS'].transform(max) == pass_air_data['PASSENGERS']
pass_air_data[temp][pass_air_data.YEAR.isin([1990, 2015])]
pass_air_data[(pass_air_data['ORIGIN'] == 'LAX') & (pass_air_data['DEST'] == 'HNL')].plot('YEAR', 'PASSENGERS')
pass_air_data[(pass_air_data['ORIGIN'] == 'LAX') & (pass_air_data['DEST'] == 'SFO')].plot('YEAR', 'PASSENGERS')
```
So let's have a look at the important nodes in this network, i.e. important airports in this network. We'll use pagerank, betweenness centrality and degree centrality.
```
# nx.pagerank(passenger_graph)
def year_network(G, year):
temp_g = nx.DiGraph()
for i in G.edges(data=True):
if i[2]['YEAR'] == year:
temp_g.add_edge(i[0], i[1], weight=i[2]['PASSENGERS'])
return temp_g
pass_2015 = year_network(passenger_graph, 2015)
len(pass_2015)
len(pass_2015.edges())
# Load in the GPS coordinates of all the airports
lat_long = pd.read_csv('datasets/GlobalAirportDatabase.txt', delimiter=':', header=None)
lat_long[lat_long[1].isin(list(pass_2015.nodes()))]
pos_dict = {}
for airport in lat_long[lat_long[1].isin(list(pass_2015.nodes()))].iterrows():
pos_dict[airport[1][1]] = (airport[1][15], airport[1][14])
pos_dict
```
## Exercise
Using the position dictionary `pos_dict` create a plot of the airports, only the nodes not the edges.
- As we don't have coordinates for all the airports we have to create a subgraph first.
- Use `nx.subgraph(Graph, iterable of nodes)` to create the subgraph
- Use `nx.draw_networkx_nodes(G, pos)` to map the nodes.
or
- Just use a scatter plot :)
```
plt.figure(figsize=(20, 9))
G = nx.subgraph(pass_2015, pos_dict.keys())
nx.draw_networkx_nodes(G, pos=pos_dict, node_size=10, alpha=0.6, node_color='b')
# nx.draw_networkx_edges(G, pos=pos_dict, width=0.1, arrows=False)
plt.show()
plt.figure(figsize=(20, 9))
x = [i[0] for i in pos_dict.values()]
y = [i[1] for i in pos_dict.values()]
plt.scatter(x, y)
```
### What about degree distribution of this network?
```
plt.hist(list(nx.degree_centrality(pass_2015).values()))
plt.show()
```
Let's plot a log log plot to get a better overview of this.
```
d = {}
for i, j in dict(nx.degree(pass_2015)).items():
if j in d:
d[j] += 1
else:
d[j] = 1
x = np.log2(list((d.keys())))
y = np.log2(list(d.values()))
plt.scatter(x, y, alpha=0.4)
plt.show()
```
### Directed Graphs

```
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
# print(G.edges())
# G[1][2]
# G[2][1]
# G.is_directed()
# type(G)
G.add_edges_from([(1, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2)])
nx.draw_circular(G, with_labels=True)
G.in_degree()
nx.pagerank(G)
G.add_edge(5, 6)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
G.add_edge(2, 8)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
```
### Moving back to Airports
```
sorted(nx.pagerank(pass_2015, weight=None).items(), key=lambda x:x[1], reverse=True)[:10]
sorted(nx.betweenness_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.degree_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
```
'ANC' is the airport code of Anchorage airport, a place in Alaska, and according to pagerank and betweenness centrality it is the most important airport in this network Isn't that weird? Thoughts?
related blog post: https://toreopsahl.com/2011/08/12/why-anchorage-is-not-that-important-binary-ties-and-sample-selection/
Let's look at weighted version, i.e taking into account the number of people flying to these places.
```
sorted(nx.betweenness_centrality(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.pagerank(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
```
## How reachable is this network?
We calculate the average shortest path length of this network, it gives us an idea about the number of jumps we need to make around the network to go from one airport to any other airport in this network.
```
# nx.average_shortest_path_length(pass_2015)
```
Wait, What??? This network is not connected. That seems like a really stupid thing to do.
```
list(nx.weakly_connected_components(pass_2015))
```
### SPB, SSB, AIK anyone?
```
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['ORIGIN'] == 'AIK')]
pass_2015.remove_nodes_from(['SPB', 'SSB', 'AIK'])
nx.is_weakly_connected(pass_2015)
nx.is_strongly_connected(pass_2015)
```
### Strongly vs weakly connected graphs.
```
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 3)
G.add_edge(3, 1)
nx.draw(G)
G.add_edge(3, 4)
nx.draw(G)
nx.is_strongly_connected(G)
list(nx.strongly_connected_components(pass_2015))
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['DEST'] == 'TSP')]
pass_2015_strong = max(nx.strongly_connected_component_subgraphs(pass_2015), key=len)
len(pass_2015_strong)
nx.average_shortest_path_length(pass_2015_strong)
```
#### Exercise! (Actually this is a game :D)
How can we decrease the avg shortest path length of this network?
Think of an effective way to add new edges to decrease the avg shortest path length.
Let's see if we can come up with a nice way to do this, and the one who gets the highest decrease wins!!!
The rules are simple:
- You can't add more than 2% of the current edges( ~500 edges)
[10 mins]
```
sort_degree = sorted(nx.degree_centrality(pass_2015_strong).items(), key=lambda x:x[1], reverse=True)
top_count = 0
for n, v in sort_degree:
count = 0
for node, val in sort_degree:
if node != n:
if node not in pass_2015_strong.adj[n]:
pass_2015_strong.add_edge(n, node)
count += 1
if count == 25:
break
top_count += 1
if top_count == 20:
break
nx.average_shortest_path_length(pass_2015_strong)
```
### What about airlines? Can we find airline specific reachability?
```
passenger_graph['JFK']['SFO'][25]
def str_to_list(a):
return a[1:-1].split(', ')
for i in str_to_list(passenger_graph['JFK']['SFO'][25]['UNIQUE_CARRIER_NAME']):
print(i)
%%time
for origin, dest in passenger_graph.edges():
for key in passenger_graph[origin][dest]:
passenger_graph[origin][dest][key]['airlines'] = str_to_list(passenger_graph[origin][dest][key]['UNIQUE_CARRIER_NAME'])
```
### Exercise
Play around with United Airlines network.
- Extract a network for United Airlines flights from the metagraph `passenger_graph` for the year 2015
- Make sure it's a weighted network, where weight is the number of passengers.
- Find the number of airports and connections in this network
- Find the most important airport, according to PageRank and degree centrality.
```
united_network = nx.DiGraph()
for origin, dest in passenger_graph.edges():
if 25 in passenger_graph[origin][dest]:
if "'United Air Lines Inc.'" in passenger_graph[origin][dest][25]['airlines']:
united_network.add_edge(origin, dest, weight=passenger_graph[origin][dest][25]['PASSENGERS'])
len(united_network)
len(united_network.edges())
sorted(nx.pagerank(united_network, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.degree_centrality(united_network).items(), key=lambda x:x[1], reverse=True)[0:10]
```
### Exercise
We are in Cleveland so what should we do?
Obviously we will make a time series of number of passengers flying out of Cleveland with United Airlines over the years.
There are 2 ways of doing it.
- Create a new multidigraph specifically for this exercise.
OR
- exploit the `pass_air_data` dataframe.
```
pass_air_data[(pass_air_data.ORIGIN == 'CLE') &
(pass_air_data.UNIQUE_CARRIER_NAME.str.contains('United Air Lines Inc.'))
].groupby('YEAR')['PASSENGERS'].sum().plot()
```
| github_jupyter |
Before we begin, we will change a few settings to make the notebook look a bit prettier
```
%%html
<style> body {font-family: "Calibri", cursive, sans-serif;} </style>
```
<img src="https://github.com/IKNL/guidelines/blob/master/resources/logos/iknl_nl.png?raw=true" width=200 align="right">
# 01 - Pre-processing
Perform some pre-processing on the (synthetic) data
**Important**: include all your imports here and here only.
## 1.1 Read the data
**Tip**: take a look at [`pandas read_csv`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)
## 1.2 Rename the columns
Rename the columns of the original `DataFrame` as follows:
| Original name | New name |
|---------------|----------|
| `rn` | `nkr_id` |
| `zid` | `disease_id` |
| `eid` | `episode_id` |
| `gesl` | `gender` |
| `gebdat` | `birth_date` |
| `incdat` | `incidence_date` |
| `topo` | `topo` |
| `sublok` | `sublocalization` |
| `topog` | `topography` |
| `later` | `lateralization` |
| `morf` | `morphology` |
| `gedrag` | `behaviour` |
| `diffgr` | `grade` |
| `tumorsoort` | `tumor_type` |
| `basisd` | `diagnosis_basis` |
| `ct` | `t_clin` |
| `cn` | `n_clin` |
| `cm` | `m_clin` |
| `pt` | `t_patho` |
| `pn` | `n_patho` |
| `pm` | `m_patho` |
| `stadiumc` | `stage_clin` |
| `stadium` | `stage_patho` |
| `vitdat` | `contact_date` |
| `ovldat` | `death_date` |
This will make your code much easier to read (and more accessible to
non-Dutch speakers as well ๐).
**Tip**: take a look at [`pandas rename`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rename.html)
As you get familiarized with the data, be sure to complete the table found in `../data/README.md`.
## 1.3 Check data integrity
Data are never perfect. Let's see how imperfect this dataset is.
First, generate a visualization of the missing values.
**Tip**: take a look at the [`missingno` package](https://github.com/ResidentMario/missingno)
How many missing values (in absolute numbers and as a percentage)
are there for each column? Generate a bar plot showing this.
What methods do you know for dealing with missing values?
What are the advantages and disadvantages of each of them?
* ...
For now, we will keep it simple. Drop all the rows that contain missing data. How many records were lost because of this??
*In theory*, `topography` should be the concatenation of `topo` and
`sublocalization`. For what number/percentage of records
is this actually true?
## 1.4 Feature engineering
Let's make a small feature engineering. In all cases, make sure that you
only add these new features (i.e., columns) only if they aren't already present
in the `DataFrame`.
* Create a new column called `age_incidence`, which
contains the age of the patient at the moment of diagnosis. Use the
information contained in the columns `incidence_year` and `birth_date`.
* Create two new columns called `stage_clin_num` and `stage_patho_num`.
Each of them will have the numerical part of the data contained
in the columns `stage_clin` and `state_patho`, respectively.
* Create q new column called `death`, which has a `False` if the patient
is still alive or a `True` if the patient already passed away.
## 1.5 Decode features
Many features in the NKR are encoded. While this will be useful later on,
it can also make the data cryptic at this point.
Decode the column `gender` as indicated in the following table
| Original value | New value |
|----------------|-----------------|
| 1 | `male` |
| 2 | `female` |
| 3 | `hermaphrodite` |
## 1.6 Save the data
Lastly, save the modified DataFrame as `df_preprocessing.csv` in the
`data_processed` directory.
**Tip**: take a look at [`pandas to_csv`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html)
Make sure that you generate a new section in the data `README.md` corresponding to this new data file. Remember to include information of the new features.
| github_jupyter |
## Before we begin
We need to install the mysql client software. My apologies that I forgot to install it in the virtual machine this year!
open a Terminal window and type:
sudo apt-get update
sudo apt-get install mysql-client
This will install the client software we need to connect to our Docker mySQL database.
# Introduction to Databases and Structured Query Language (SQL)
As Data Scientists, you will frequently want to store data in an organized, structured manner that allows you to do complex queries. Because you are good Data Scientists, [**you do not use Excel!!!**](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-5-80)
In this course, we will only discuss **Relational Databases**, because those are the most common in bioinformatics. (There are other kinds!!). So when I say "database" I mean "relational database".
Databases are used to store information in a manner that, when used properly, is:
a) highly structured
b) constrained (i.e. detects errors)
c) transactional (i.e. can undo a command if it discovers a problem)
d) indexed (for speed of search)
e) searchable
The core concept of a database is a **Table**. Tables contain one particular "kind" of information (e.g. a Table could represent a Student, a University, a Book, or a portion of a Clinical Record.
Tables contain **Rows** and **Columns** where, generally, every column represents a "feature" of that information (e.g. a Student table might have **["name", "gender", "studentID", "age"]** as its columns/features). Every row represents an "individual", and their values for each feature (e.g. a Row in a Student table might have **["Mark Wilkinson", "M", "163483", "35"]** as its values.
A Database may have many Tables that represent various kinds of related information. For example, a library database might have a Books table, a Publishers table, and a Locations table. A Book has a Publisher, and a Location, so the tables need to be connected to one another. This is achieved using **keys**. Generally, every row (individual) in a table has a unique identifier (generally a number), and this is called its **key**. Because it is unique, it is possible to refer unambiguously to that individual record.
I think the easiest way to learn about databases and SQL is to start building one! We will use the MySQL Docker Container that we created in the previous lesson. We are going to create a Germplasm database (seed stocks). It will contain information about the seed (its amount, its harvest date, its location), the germplasm (its species, the allele it carries), and about the genetics related to that allele (the gene_id, the gene name, the protein name, and a link to the GenBank record)
(if that Docker Container isn't running, please **docker start course-mysql** now!)
**Note: This Jupyter Notebook is running the Python kernel. This allows us to use some nice tools in Python (the sql extension and SqlMagic) that provide access to the mysql database server from inside of the Notebook. You don't need to know any Python to do this. Note also that you can do exactly the same commands in your Terminal window.**
To connect to the MySQL Docker Container from your terminal window, type:
mysql -h 127.0.0.1 -P 3306 --protocol=tcp -u root -p
(then enter your password 'root' to access the database)
<pre>
</pre>
# SQL
Structured Query Language is a way to interact with a database server. It is used to create, delete, edit, fill, and query tables and their contents.
First, we will learn the SQL commands that allow us to explore the database server, and create new databases and tables.. Later, we will use SQL to put information into those tables. Finally, we will use SQL to query those tables.
## Python SQL Extension
The commands below are used to connect to the MySQL server in our Docker Container. You need to execute them ONCE. In every subsequent Juputer code window, you will have access to the database.
all SQL commands are preceded by
%sql
(**only in the Python extension! Not in your terminal window!**)
all SQL commands end with a ";"
```
# UNCOMMENT THIS SECTION IF YOU ARE RUNNING THE DOCKER IMAGE OF THIS COURSE
#import os
#os.system("echo jovyan | sudo -S service mysql start")
#%load_ext sql
#%sql mysql+pymysql://root@127.0.0.1:3306/mysql
#%sql DROP DATABASE IF EXISTS germplasm;
# ===============================================================================================
# COMMENT THIS SECTION IF YOU ARE RUNNING THE DOCKER IMAGE OF THIS COURSE
%load_ext sql
#%config SqlMagic.autocommit=False
%sql mysql+pymysql://root:root@127.0.0.1:3306/mysql
#%sql mysql+pymysql://anonymous@ensembldb.ensembl.org/homo_sapiens_core_92_38
#%sql DROP DATABASE IF EXISTS germplasm;
```
## show databases
**show databases** is the command to see what databases exist in the server. The ones you see now are the default databases that MySQL uses to organize itself. _**DO NOT TOUCH THESE DATABASES**_ **EVER EVER EVER EVER**
```
%sql show databases;
```
## create database
The command to create a database is **create database** (surprise! ;-) )
We will create a database called "germplasm"
```
%sql create database germplasm;
%sql show databases
```
## use database_name
the **use** command tells the server which database you want to interact with. Here we will use the database we just created
```
%sql use germplasm;
```
## show tables
The show tables command shows what tables the database contains (right now, none!)
```
%sql show tables;
```
# Planning your data structure
This is the hard part. What does our data "look like" in a well-structured, relational format?
Starting simply:
<center>stock table</center>
amount | date | location
--- | --- | ---
5 | 10/5/2013 | Room 2234
9.8 | 12/1/2015 | Room 998
-----------------------------
<center>germplasm table</center>
taxonid | allele
--- | ---
4150 | def-1
3701 | ap3
--------------------------------
<center>gene table</center>
gene | gene_name | embl
--- | --- | ---
DEF | Deficiens | https://www.ebi.ac.uk/ena/data/view/AB516402
AP3 | Apetala3 | https://www.ebi.ac.uk/ena/data/view/AF056541
## add indexes
It is usually a good idea to have an index column on every table, so let's add that first:
<center>stock table</center>
id | amount | date | location
--- | --- | --- | ---
1 | 5 | 10/5/2013 | Room 2234
2 | 9.8 | 12/1/2015 | Room 998
-----------------------------
<center>germplasm table</center>
id | taxonid | allele
--- | --- | ---
1 | 4150 | def-1
2 | 3701 | ap3
--------------------------------
<center>gene table</center>
id | gene | gene_name | embl
--- | --- | --- | ---
1 | DEF | Deficiens | https://www.ebi.ac.uk/ena/data/view/AB516402
2 | AP3 | Apetala3 | https://www.ebi.ac.uk/ena/data/view/AF056541
## find linkages
* Every germplasm has a stock record. This is a 1:1 relationship.
* Every germplasm represents a specific gene. This is a 1:1 relationship
So every germplasm must point to the index of a stock, and also to the index of a gene
Adding that into our tables we have:
<center>stock table</center>
id | amount | date | location
--- | --- | --- | ---
1 | 5 | 10/5/2013 | Room 2234
2 | 9.8 | 12/1/2015 | Room 998
-----------------------------
<center>germplasm table</center>
id | taxonid | allele | stock_id | genetics_id
--- | --- | --- | --- | ---
1 | 4150 | def-1 | 2 | 1
2 | 3701 | ap3 | 1 | 2
--------------------------------
<center>gene table</center>
id | gene | gene_name | embl
--- | --- | --- | ---
1 | DEF | Deficiens | https://www.ebi.ac.uk/ena/data/view/AB516402
2 | AP3 | Apetala3 | https://www.ebi.ac.uk/ena/data/view/AF056541
## data types in MySQL
I will not discuss [all MySQL Datatypes](https://dev.mysql.com/doc/refman/5.7/en/data-types.html), but we will look at only the ones we need. We need:
* Integers (type INTEGER)
* Floating point (type FLOAT)
* Date (type DATE [in yyyy-mm-dd format](https://dev.mysql.com/doc/refman/5.7/en/datetime.html) )
* Characters (small, variable-length --> type [VARCHAR(x)](https://dev.mysql.com/doc/refman/5.7/en/char.html) )
<pre>
</pre>
## create table
tables are created using the **create table** command (surprise!)
The [syntax of create table](https://dev.mysql.com/doc/refman/5.7/en/create-table.html) can be quite complicated, but we are only going to do the most simple examples.
create table table_name (column_name column_definition, column_name column_definition, ........)
column definitions include the data-type, and other options like if it is allowed to be null(blank), or if it should be treated as an "index" column.
Examples are easier to understand than words... so here are our table definitions:
```
#%sql drop table stock
%sql CREATE TABLE stock(id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, amount FLOAT NOT NULL, date DATE NOT NULL, location VARCHAR(20) NOT NULL);
%sql DESCRIBE stock
#%sql drop table germplasm
%sql CREATE TABLE germplasm(id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, taxonid INTEGER NOT NULL, allele VARCHAR(10) NOT NULL, stock_id INTEGER NOT NULL, gene_id INTEGER NOT NULL);
%sql DESCRIBE germplasm
#%sql drop table gene
%sql CREATE TABLE gene(id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, gene VARCHAR(10) NOT NULL, gene_name VARCHAR(30) NOT NULL, embl VARCHAR(70) NOT NULL);
%sql DESCRIBE gene
%sql show tables;
```
## loading data
There are many ways to import data into MySQL. If you have data in another (identical) MySQL database, you can "dump" the data, and then import it directly. If you have tab or comma-delimited (tsv, csv) you can **sometimes** import it directly from these formats. You can also enter data using SQL itself. This is usually the safest way, when you have to keep multiple tables synchronized (as we do, since the germplasm table is "linked to" the other two tables)
## insert into
The command to load data is:
insert into table_name (field1, field2, field3) values (value1, value2, value3)
Now... what data do we need to add, in what order?
The germplasm table needs the ID number from both the gene table and the stock table, so we cannot enter the germplasm information first. We must therefore enter the gene and stock data first.
```
# NOTE - we DO NOT put data into the "id" column! This column is auto_increment, so it "magically" creates its own value
%sql INSERT INTO gene (gene, gene_name, embl) VALUES ('DEF', "Deficiens", 'https://www.ebi.ac.uk/ena/data/view/AB516402');
%sql INSERT INTO gene (gene, gene_name, embl) VALUES ('AP3', "Apetala3", 'https://www.ebi.ac.uk/ena/data/view/AF056541');
%sql SELECT last_insert_id(); # just to show you that this function exists!
%sql INSERT INTO stock(amount, date, location) VALUES (5, '2013-05-10', 'Room 2234');
%sql INSERT INTO stock(amount, date, location) VALUES (9.8, '2015-01-12', 'Room 998');
```
#### Almost ready!
We now need to know the index numbers from the stock and gene databases that correspond to the data for the germplasm table. For this, we need to learn another function: **select**
## Select statements
**Select** is the command used to query the database. We will look in more detail later, but now all you need to know is that the most basic structure is:
select * from table_name
```
%sql SELECT * FROM stock; # notice that the id number was automatically generated
%sql SELECT * FROM gene;
```
<pre>
</pre>
Just a reminder, our germplasm data is:
id | taxonid | allele | stock_id | gene_id
--- | --- | --- | --- | --- |
1 | 4150 |def-1
2 | 3701 | ap3
We need to connect the *germplasm* table **gene_id** to the appropriate **id** from the *gene* table. i.e.
def-1 allele ---> DEF gene (id = 1)
ap3 allele ---> AP3 gene (id = 2)
We need to connect the *germplasm* table **stock_id** to the appropriate **id** from the *stock* table. i.e.
def-1 allele ---> Room 998 (id = 2)
ap3 allele ---> Room 2234 (id = 1)
Now we are ready to do our ("manual") insert of data into the *germplasm* table:
```
%sql INSERT INTO germplasm (taxonid, allele, stock_id, gene_id) VALUES (4150, 'def-1', 2, 1 );
%sql INSERT INTO germplasm (taxonid, allele, stock_id, gene_id) VALUES (3701, 'ap3', 1, 2 );
%sql SELECT * FROM germplasm;
```
## SQL UPDATE & SQL WHERE
Imagine that we are going to plant some seed from our def-1 germplasm. We need to update the *stock* record to show that there is now less seed available. We do this using an [UPDATE statement](https://www.techonthenet.com/mysql/update.php). UPDATE is used to change the values of a particular column or set of columns. But we don't want to change **all** of the values in that column, we only want to change the values for the DEF stock. For that, we need a WHERE clause.
WHERE allows you to set the conditions for an update. The general form is:
UPDATE table_name SET column = value WHERE column = value;
We will sow 1g of seed from DEF (stock.id = 2) (note that I am now starting to use the MySQL syntax for referring to *table*.**column** - the tablename followed by a "." followed by the column name).
The simplest UPDATE statement is:
```
%sql UPDATE stock SET amount = 8.8 WHERE id = 2;
%sql SELECT * FROM stock;
```
<pre>
</pre>
This simple solution is not very "friendly"... you are asking the database user to already know what the remaining amount is! It would be better if we simply reduced the amount by 1g.
That is done using in-line equations, like this:
```
%sql UPDATE stock SET amount = amount-1 WHERE id = 2;
%sql SELECT * FROM stock;
```
<pre>
</pre>
## Using indexes and 'joining' tables
The UPDATE we did is still not very friendly! My stock table does not have any information about what gene or allele is in that stock, so we have to **know** that the stock record is stock.id=2. This is bad!
It would be better if we could say "plant 1 gram of the stock that represents gene record DEF", but that information exists in two different tables. How do we join tables?
This is the main purpose of the "id" column. Note that, when we defined that column, we said that it is "auto_increment, not null, primary key", meaning that every record must have an id, and every id must be unique (NOTE: an auto-increment id *should never be manually modified/added*!!! <span style="color:red;">You Have Been Warned!!!</span>). Being a 'primary key' means that this column was intended to be the "pointer" from other tables in the database (like our germplasm table, that points to the id of the stock, and the id of the gene, tables)
When using UPDATE with multiple tables, we must name all of the tables, and then make the connection between them in the "where" clause, using *table*.**column** notation.
The update clause below shows how this is done (a "\" character means that the command continues on the next line):
```
%sql UPDATE stock, germplasm SET stock.amount = stock.amount-1 \
WHERE \
stock.id = germplasm.stock_id \
AND \
germplasm.allele = 'def-1';
%sql SELECT * FROM stock;
```
<pre>
</pre>
# Challenges for you!
1. (hard) when we plant our seeds, we should update both the quantity, and the date. What does that UPDATE statement look like?
2. (very hard!) when we plant our seed, instead of using the allele designation (def-1) I want to use the gene designation (DEF). This query spans **all three tables**. What does the UPDATE statement look like?
<span style="visibility:hidden;">
Challenge 1
%sql UPDATE stock,germplasm SET stock.amount=stock.amount-1, stock.date="2018-09-06" WHERE \
stock.id = germplasm.stock_id AND \
germplasm.allele='def-1';
Challenge2
%sql UPDATE stock,germplasm,gene SET stock.amount=stock.amount-0.2, stock.date="2018-09-06" WHERE \
stock.id = germplasm.stock_id AND \
gene.id = germplasm.gene_id AND \
gene.gene='DEF';
</span>
```
# challenge 1
# challenge 2
```
<pre>
</pre>
# SELECT queries
Querying the data is the most common operation on a database. You have seen simple SELECT queries, but now we will look at more complex ones.
The general structure is:
SELECT table1.column1, ... FROM table1, ... WHERE condition1 [AND|OR] condition2....
You probably understand this enough to show you the query that will show you all of the data:
```
%sql SELECT * FROM gene, stock, germplasm WHERE \
germplasm.stock_id = stock.id AND \
germplasm.gene_id = gene.id;
```
# Dealing with missing records - JOIN clauses
**Credit for the Venn diagrams used in this section goes to [Flatiron School](https://learn.co/) and are linked from their tutorial on [JOINs in SQL](https://learn.co/lessons/sql-complex-joins-readme) published under the [CC-BY-NC 4.0 license](https://creativecommons.org/licenses/by-nc-sa/4.0/)**
Your first database will probably be complete, and perfect! You will be very proud of it! ;-)
Over time, things will happen. Records will be deleted, and records will be added where there is incomplete information, for example, a germplasm stock record where the gene is unknown. You should think about these situations, because there are NO RULES telling you what you should do! You have to make a decision - a *policy* for your database - and you should follow that policy like a religion!
For example:
* If there is no known gene for a given germplasm, what does in the stock.allele column? What goes in the stock.gene_id column? What goes in the gene table? Discuss....
* If a stock is fully planted - no more seeds - what do you do? Should you delete the record? If you do, then your germplasm table is linked through the stock_id to a stock that no longer exists. If you gather more seed in the future, is that the same stock? (answer: NO, it is not!!!).... so do you update the existing stock record to say there is now 10g of seed?
* Remember, you are trapped! In your table definition you declared all columns to be "NOT NULL", meaning that if the row exists, there must be a value for each column in the row! What do you do if there isn't a value to put into that column?
* zero?
* What if you change the column definition to allow NULL?
* What does NULL mean? What does zero mean?
* How does software respond to NULL or zero values? (you don't know this yet, but we can talk about it)
For our database, I am going to suggest this policy:
1. if we don't know the allele, we put "unknown" in the allele column
2. We put '0' into the gene_id column (auto_increment starts with 1 in the gene table, so a 0 will match nothing!)
3. We DO NOT add a gene record at all.
Let's add a record like this one to our database:
```
%sql INSERT INTO stock(amount, date, location) VALUES (23, '2018-05-12', 'Room 289');
%sql INSERT INTO germplasm (taxonid, allele, stock_id, gene_id) VALUES (4150, 'unknown', LAST_INSERT_ID(), 0 );
# note that I am using LAST_INSERT_ID to capture the auto_increment value from the stock table insert
# this ensures that the germplasm and stock tables are 'synchronized'
%sql SELECT * FROM germplasm;
#%sql SELECT * FROM gene;
```
<pre>
</pre>
That looks good! ...but we have just created a problem! gene_id=0 doesn't exist in the gene table, so what happens with our beautiful SELECT query that we just created above?
```
%sql SELECT * FROM gene, germplasm WHERE \
germplasm.gene_id = gene.id;
```
### OH CRAP!!!! We lost our data!!
Our "unknown" germplasm has disappeared!! Or has it?
The problem is that stock.gene_id = gene.id failed for the "unknown" record, and so it isn't reflected in the output from the query. THIS IS BAD, if (for example) you were trying to take an inventory of all germplasm stocks you had!
How do we solve this? The answer is to use SQL's "JOIN" instruction.
There are four kinds of JOIN: INNER, LEFT OUTER, RIGHT OUTER, and FULL OUTER.
The join we are doing with our current SELECT query is an INNER join. Using a Venn diagram, the query looks like this:
<a href='https://learn.co/lessons/sql-complex-joins-readme'><img src='http://readme-pics.s3.amazonaws.com/Inner%20Join%20Venn%20Diagram.png' width=300px/></a>
Effectively, the intersection where BOTH the 'left' (gene.id) and 'right' (germplasm.id) are true.
You can duplicate this behavior using the INNER JOIN instruction. The syntax is a little bit different - look:
```
%sql SELECT * FROM gene INNER JOIN germplasm ON \
germplasm.gene_id = gene.id;
```
<pre>
</pre>
What we want is a query that allows one side to be "missing/absent/NULL", but the other side to exist.
Perhaps we need a "LEFT JOIN"?
... gene LEFT JOIN germplasm ...
Again, in this situation, "LEFT" means the table on the Left side of the SQL JOIN statement (gene)
As a Venn diagram, Left joins look like this (ignore the :
<a href='https://learn.co/lessons/sql-complex-joins-readme'><img src='http://readme-pics.s3.amazonaws.com/Left%20Outer%20Join%20Venn%20Diagram.png' width=300px/></a>
What it means is that, **in addition to the perfect matches at the intersection**, the record on the left (the gene record) should be included in the result set, **even if it doesn't match** with a germplasm record (on the right). Is that the solution to our problem?
```
%sql SELECT * FROM gene LEFT JOIN germplasm ON \
germplasm.gene_id = gene.id;
```
## PFFFFFF!! No, that was not the solution
Why?
What about a RIGHT JOIN?
... gene RIGHT JOIN germplasm ...
<a href='https://learn.co/lessons/sql-complex-joins-readme'><img src='http://readme-pics.s3.amazonaws.com/Right%20Outer%20Join%20Venn%20Diagram.png' width=300px/></a>
Again, in this situation, "RIGHT" means the table on the Right side of the SQL JOIN statement (germplasm). So the germplasm record should be included in the result set, even if a gene record does not exist. ...that sounds much more likely to be correct!
```
%sql SELECT * FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id;
```
### Voila!!
## Your turn
1) Create another record, where in this case, there is no **stock**, but there is a germplasm and a gene record.
2) Create the JOIN query between germplasm and stock that includes all germplasm records
<pre>
</pre>
# Other SELECT "magic"
You can do many other useful things with SELECTS, such as:
## COUNT()
If you want to count the number of records returned from a query, use the **COUNT() AS your_name** function:
```
%sql SELECT COUNT(*) AS "Number Of Matches" FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id;
```
## SUM(), AVG(), MAX()
You can do mathematical functions on results also, for example, you can take the SUM of a column - how much seed do we have in total?
(look carefully at this query! It's quite complicated!):
```
%sql SELECT SUM(stock.amount) FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id \
INNER JOIN stock ON germplasm.stock_id = stock.id;
```
<pre>
</pre>
Or you could take the **average AVG()** of a column - what is the average quantity of seed we have?
```
%sql SELECT AVG(stock.amount) FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id \
INNER JOIN stock ON germplasm.stock_id = stock.id;
```
<pre>
</pre>
Or you could take the **max MAX()** value of a column - what is the largest quantity of seed we have in our stocks?
```
%sql SELECT MAX(stock.amount) FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id \
INNER JOIN stock ON germplasm.stock_id = stock.id;
```
## ORDER BY
You can put your results in a specific order:
```
%sql SELECT gene.gene_name, stock.amount FROM gene RIGHT JOIN germplasm ON \
germplasm.gene_id = gene.id \
INNER JOIN stock ON germplasm.stock_id = stock.id \
ORDER BY stock.amount DESC; # change this to ASC
```
<pre>
</pre>
## Conclusion
1) Databases are a very powerful way to store structured information - far far better than Excel Spreadsheets!
2) It will take you years (literally, years!) to become an expert in MySQL! We have only explored the most common functions here.
```
%sql drop database germplasm;
```
| github_jupyter |
500 hPa Vorticity Advection
===========================
Plot an 500-hPa map with calculating vorticity advection using MetPy calculations.
Beyond just plotting 500-hPa level data, this uses calculations from `metpy.calc` to find
the vorticity and vorticity advection. Currently, this needs an extra helper function to
calculate the distance between lat/lon grid points.
Imports
```
from datetime import datetime
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
import numpy as np
import scipy.ndimage as ndimage
from metpy.units import units
from netCDF4 import num2date
from siphon.catalog import TDSCatalog
```
Data Aquisition
---------------
```
dt = datetime(2016, 4, 16, 18)
# Assemble our URL to the THREDDS Data Server catalog,
# and access our desired dataset within via NCSS
base_url = 'https://www.ncei.noaa.gov/thredds/catalog/model-namanl-old/'
cat = TDSCatalog(f'{base_url}{dt:%Y%m}/{dt:%Y%m%d}/catalog.xml')
ncss = cat.datasets[f'namanl_218_{dt:%Y%m%d}_{dt:%H}00_000.grb'].subset()
# Query for Latest GFS Run
query = ncss.query()
query.time(dt)
query.accept('netcdf')
query.variables('Geopotential_height_isobaric',
'u-component_of_wind_isobaric',
'v-component_of_wind_isobaric')
query.add_lonlat()
# Obtain our queried data
ds = ncss.get_data(query)
lon = ds.variables['lon'][:]
lat = ds.variables['lat'][:]
times = ds.variables[ds.variables['Geopotential_height_isobaric'].dimensions[0]]
vtime = num2date(times[:].squeeze(), units=times.units)
lev_500 = np.where(ds.variables['isobaric'][:] == 500)[0][0]
hght_500 = ds.variables['Geopotential_height_isobaric'][0, lev_500, :, :]
hght_500 = ndimage.gaussian_filter(hght_500, sigma=3, order=0) * units.meter
uwnd_500 = units('m/s') * ds.variables['u-component_of_wind_isobaric'][0, lev_500, :, :]
vwnd_500 = units('m/s') * ds.variables['v-component_of_wind_isobaric'][0, lev_500, :, :]
```
Begin Data Calculations
-----------------------
```
dx, dy = mpcalc.lat_lon_grid_deltas(lon, lat)
f = mpcalc.coriolis_parameter(np.deg2rad(lat)).to(units('1/sec'))
avor = mpcalc.vorticity(uwnd_500, vwnd_500, dx, dy, dim_order='yx') + f
avor = ndimage.gaussian_filter(avor, sigma=3, order=0) * units('1/s')
vort_adv = mpcalc.advection(avor, [uwnd_500, vwnd_500], (dx, dy), dim_order='yx') * 1e9
```
Map Creation
------------
```
# Set up Coordinate System for Plot and Transforms
dproj = ds.variables['LambertConformal_Projection']
globe = ccrs.Globe(ellipse='sphere', semimajor_axis=dproj.earth_radius,
semiminor_axis=dproj.earth_radius)
datacrs = ccrs.LambertConformal(central_latitude=dproj.latitude_of_projection_origin,
central_longitude=dproj.longitude_of_central_meridian,
standard_parallels=[dproj.standard_parallel],
globe=globe)
plotcrs = ccrs.LambertConformal(central_latitude=45., central_longitude=-100.,
standard_parallels=[30, 60])
fig = plt.figure(1, figsize=(14., 12))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, .02], bottom=.07, top=.99,
hspace=0.01, wspace=0.01)
ax = plt.subplot(gs[0], projection=plotcrs)
# Plot Titles
plt.title(r'500-hPa Heights (m), AVOR$*10^5$ ($s^{-1}$), AVOR Adv$*10^8$ ($s^{-2}$)',
loc='left')
plt.title(f'VALID: {vtime}', loc='right')
# Plot Background
ax.set_extent([235., 290., 20., 58.], ccrs.PlateCarree())
ax.coastlines('50m', edgecolor='black', linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=.5)
# Plot Height Contours
clev500 = np.arange(5100, 6061, 60)
cs = ax.contour(lon, lat, hght_500.m, clev500, colors='black', linewidths=1.0,
linestyles='solid', transform=ccrs.PlateCarree())
plt.clabel(cs, fontsize=10, inline=1, inline_spacing=10, fmt='%i',
rightside_up=True, use_clabeltext=True)
# Plot Absolute Vorticity Contours
clevvort500 = np.arange(-9, 50, 5)
cs2 = ax.contour(lon, lat, avor*10**5, clevvort500, colors='grey',
linewidths=1.25, linestyles='dashed', transform=ccrs.PlateCarree())
plt.clabel(cs2, fontsize=10, inline=1, inline_spacing=10, fmt='%i',
rightside_up=True, use_clabeltext=True)
# Plot Colorfill of Vorticity Advection
clev_avoradv = np.arange(-30, 31, 5)
cf = ax.contourf(lon, lat, vort_adv.m, clev_avoradv[clev_avoradv != 0], extend='both',
cmap='bwr', transform=ccrs.PlateCarree())
cax = plt.subplot(gs[1])
cb = plt.colorbar(cf, cax=cax, orientation='horizontal', extendrect='True', ticks=clev_avoradv)
cb.set_label(r'$1/s^2$', size='large')
# Plot Wind Barbs
# Transform Vectors and plot wind barbs.
ax.barbs(lon, lat, uwnd_500.m, vwnd_500.m, length=6, regrid_shape=20,
pivot='middle', transform=ccrs.PlateCarree())
```
| github_jupyter |
```
"""
Snowflake Batch Prediction API Snowflake S3 scoring job
v1.0 Mike Taveirne (doyouevendata) 3/21/2020
"""
import pandas as pd
import requests
import time
from pandas.io.json import json_normalize
import snowflake.connector
import my_creds
#from imp import reload
#reload(my_creds)
# datarobot parameters
API_KEY = my_creds.API_KEY
USERNAME = my_creds.USERNAME
DEPLOYMENT_ID = my_creds.DEPLOYMENT_ID
DATAROBOT_KEY = my_creds.DATAROBOT_KEY
# replace with the load balancer for your prediction instance(s)
DR_PREDICTION_HOST = my_creds.DR_PREDICTION_HOST
DR_APP_HOST = 'https://app.datarobot.com'
DR_MODELING_HEADERS = {'Content-Type': 'application/json', 'Authorization': 'token %s' % API_KEY}
# snowflake parameters
SNOW_ACCOUNT = my_creds.SNOW_ACCOUNT
SNOW_USER = my_creds.SNOW_USER
SNOW_PASS = my_creds.SNOW_PASS
SNOW_DB = 'TITANIC'
SNOW_SCHEMA = 'PUBLIC'
# ETL parameters
JOB_NAME = 'pass_scoring'
```
### Retrieve or Create S3 Credentials
```
# get a saved credential set, return None if not found
def dr_get_catalog_credentials(name, cred_type):
if cred_type not in ['basic', 's3']:
print('credentials type must be: basic, s3 - value passed was {ct}'.format(ct=cred_type))
return None
credentials_id = None
response = requests.get(
DR_APP_HOST + '/api/v2/credentials/',
headers=DR_MODELING_HEADERS,
)
if response.status_code == 200:
df = pd.io.json.json_normalize(response.json()['data'])[['credentialId', 'name', 'credentialType']]
if df[(df['name'] == name) & (df['credentialType'] == cred_type)]['credentialId'].size > 0:
credentials_id = df[(df['name'] == name) & (df['credentialType'] == cred_type)]['credentialId'].iloc[0]
else:
print('Request failed; http error {code}: {content}'.format(code=response.status_code, content=response.content))
return credentials_id
# create credentials set
def dr_create_catalog_credentials(name, cred_type, user, password, token=None):
if cred_type not in ['basic', 's3']:
print('credentials type must be: basic, s3 - value passed was {ct}'.format(ct=cred_type))
return None
if cred_type == 'basic':
json = {
"credentialType": cred_type,
"user": user,
"password": password,
"name": name
}
elif cred_type == 's3' and token != None:
json = {
"credentialType": cred_type,
"awsAccessKeyId": user,
"awsSecretAccessKey": password,
"awsSessionToken": token,
"name": name
}
elif cred_type == 's3' and token == None:
json = {
"credentialType": cred_type,
"awsAccessKeyId": user,
"awsSecretAccessKey": password,
"name": name
}
response = requests.post(
url = DR_APP_HOST + '/api/v2/credentials/',
headers=DR_MODELING_HEADERS,
json=json
)
if response.status_code == 201:
return response.json()['credentialId']
else:
print('Request failed; http error {code}: {content}'.format(code=response.status_code, content=response.content))
# get or create a credential set
def dr_get_or_create_catalog_credentials(name, cred_type, user, password, token=None):
cred_id = dr_get_catalog_credentials(name, cred_type)
if cred_id == None:
return dr_create_catalog_credentials(name, cred_type, user, password, token=None)
else:
return cred_id
credentials_id = dr_get_or_create_catalog_credentials('s3_community',
's3', my_creds.SNOW_USER, my_creds.SNOW_PASS)
```
### Extract Data to S3 via Snowflake
```
# create a connection
ctx = snowflake.connector.connect(
user=SNOW_USER,
password=SNOW_PASS,
account=SNOW_ACCOUNT,
database=SNOW_DB,
schema=SNOW_SCHEMA,
protocol='https'
)
# create a cursor
cur = ctx.cursor()
# execute sql to get start/end timestamps to use
sql = "select last_ts_scored_through, current_timestamp::TIMESTAMP_NTZ cur_ts " \
"from etl_history " \
"where job_nm = '{job}' " \
"order by last_ts_scored_through desc " \
"limit 1 ".format(job=JOB_NAME)
cur.execute(sql)
# fetch results into dataframe
df = cur.fetch_pandas_all()
start_ts = df['LAST_TS_SCORED_THROUGH'][0]
end_ts = df['CUR_TS'][0]
# execute sql to dump data into a single file in S3 stage bucket
# AWS single file snowflake limit 5 GB
sql = "COPY INTO @S3_SUPPORT/titanic/community/" + JOB_NAME + ".csv " \
"from " \
"( " \
" select passengerid, pclass, name, sex, age, sibsp, parch, ticket, fare, cabin, embarked " \
" from passengers_500k_ts " \
" where nvl(updt_ts, crt_ts) >= '{start}' " \
" and nvl(updt_ts, crt_ts) < '{end}' " \
") " \
"file_format = (format_name='default_csv' compression='none') header=true overwrite=true single=true;".format(start=start_ts, end=end_ts)
cur.execute(sql)
```
### Create DataRobot Session and Running Batch Prediction API Job
```
session = requests.Session()
session.headers = {
'Authorization': 'Bearer {}'.format(API_KEY)
}
INPUT_FILE = 's3://'+ my_creds.S3_BUCKET + '/titanic/community/' + JOB_NAME + '.csv'
OUTPUT_FILE = 's3://'+ my_creds.S3_BUCKET + '/titanic/community/' + JOB_NAME + '_scored.csv'
job_details = {
'deploymentId': DEPLOYMENT_ID,
'passthroughColumns': ['PASSENGERID'],
'numConcurrent': 4,
"predictionInstance" : {
"hostName": DR_PREDICTION_HOST,
"datarobotKey": DATAROBOT_KEY
},
'intakeSettings': {
'type': 's3',
'url': INPUT_FILE,
'credentialId': credentials_id
},
'outputSettings': {
'type': 's3',
'url': OUTPUT_FILE,
'credentialId': credentials_id
}
}
response = session.post(
DR_APP_HOST + '/api/v2/batchPredictions',
json=job_details
)
```
### Monitor S3 Scoring Status and Return Control Upon Completion
```
if response.status_code == 202:
job = response.json()
print('queued batch job: {}'.format(job['links']['self']))
while job['status'] == 'INITIALIZING':
time.sleep(3)
response = session.get(job['links']['self'])
response.raise_for_status()
job = response.json()
print('completed INITIALIZING')
if job['status'] == 'RUNNING':
while job['status'] == 'RUNNING':
time.sleep(3)
response = session.get(job['links']['self'])
response.raise_for_status()
job = response.json()
print('completed RUNNING')
print('status is now {status}'.format(status=job['status']))
if job['status'] != 'COMPLETED':
for i in job['logs']:
print(i)
else:
print('Job submission failed; http error {code}: {content}'.format(code=response.status_code, content=response.content))
```
### Truncate and Reload STG Staging Table with Results
```
# multi-statement executions
# https://docs.snowflake.com/en/user-guide/python-connector-api.html#execute_string
# truncate and load STG schema table with scored results
sql = "truncate titanic.stg.PASSENGERS_SCORED_BATCH_API; " \
" copy into titanic.stg.PASSENGERS_SCORED_BATCH_API from @S3_SUPPORT/titanic/community/" + JOB_NAME + "_scored.csv" \
" FILE_FORMAT = 'DEFAULT_CSV' ON_ERROR = 'ABORT_STATEMENT' PURGE = FALSE;"
ctx.execute_string(sql)
```
### Update Presentation Target Table With Results
```
# update target presentation table and ETL history table in transaction
sql = \
"begin; " \
"update titanic.public.passengers_500k_ts trg " \
"set trg.survival = src.survived_1_prediction " \
"from titanic.stg.PASSENGERS_SCORED_BATCH_API src " \
"where src.passengerid = trg.passengerid; " \
"insert into etl_history values ('{job}', '{run_through_ts}'); " \
"commit; ".format(job=JOB_NAME, run_through_ts=end_ts)
ctx.execute_string(sql)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn import ensemble
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import RandomForestRegressor
from matplotlib import pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.decomposition.pca import PCA
from sklearn.preprocessing import scale
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
def refine_data(arr1,arr2,arr3):
refined_data = []
for i in range(arr1.shape[0]):
rt = find_nearest(arr1[i],arr3[i][0])
refined_data.append([rt,arr2[i][arr1[i].tolist().index(rt)]])
refined_data = np.array(refined_data)
return refined_data
def get_metrics(arr1,arr2):
mse = mean_squared_error(arr1,arr2)
r2 = r2_score(arr1,arr2)
return mse,r2
d1 = pd.read_csv('X_data_t3',sep = ',')
d2 = pd.read_csv('y_data',sep = ',')
d3 = pd.concat([d1,d2],axis=1)
#data = d3.loc[(d3['maxRT_ab'] < 1e6) & (d3['maxRT_ab'] > 3e4)]
data = d3.loc[(d3['maxRT_ab'] > 3e4)]
#data = d3.loc[(d3['maxRT_ab'] < 1e6)]
#d3 = shuffle(d3)
#X = np.array(d3[['maxRT_t','x_start_t','diff_start','diff_end']])
#X = np.array(d3[['maxRT_ab','maxRT_baseline','x_start_ab','x_end_ab']])
#X = np.array(d3[['maxRT_t','maxRT_ab','maxRT_baseline','x_start_ab','x_end_ab']])
X = np.array(d3[['rt','maxRT_t','maxRT_ab','maxRT_base','x_start_t','x_start_ab','diff_start','x_end_t','x_end_ab','diff_end','width']])
#X = np.array(d3[['rt','maxRT_ab','x_start_t','x_end_t','width']])
y = np.array(d3[['y_left_t','y_left_ab']])
time = np.array(pd.read_csv('time',sep = ',',header = None).dropna(axis = 'columns').round(3))
abundance = np.array(pd.read_csv('abundance',sep = ',',header = None).dropna(axis = 'columns'))
baseline = np.array(pd.read_csv('baseline',sep = ',',header = None).dropna(axis = 'columns'))
#time = time[0:420]
#abundance = abundance[0:420]
#baseline = baseline[0:420]
np.amax(d3['maxRT_ab']), np.amin(d3['maxRT_ab']), X.shape
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)
scaler_train = MinMaxScaler(feature_range=(0,1))
#scaler_train = StandardScaler()
scaler_train.fit(X_train)
X_train =scaler_train.transform(X_train)
scaler_test = MinMaxScaler(feature_range=(0,1))
#scaler_test = StandardScaler()
scaler_test.fit(X_test)
X_test = scaler_test.transform(X_test)
n_estimators = 1000
max_depth = 1000
random_state = 1000
ite = 1
#mse_t,r2_t = [[]for i in range(len(n_estimators))], [[] for i in range(len(n_estimators))]
#mse_ab,r2_ab = [[]for i in range(len(n_estimators))], [[] for i in range(len(n_estimators))]
params = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': 20,
'n_jobs': -1, 'random_state':random_state}
clf = RandomForestRegressor(**params)
scores = cross_val_score(clf, X_train, y_train, cv=5)
clf.fit(X_train,y_train)
y_predict = clf.predict(X_test).round(3)
mse_1,r2_1 = get_metrics(y_test[:,0],y_predict[:,0])
mse_2,r2_2 = get_metrics(y_test[:,1],y_predict[:,1])
print(mse_1,r2_1,mse_2,r2_2)
y_test[:,0]
y_predict[:,0]
from sklearn.decomposition.pca import PCA
from sklearn.preprocessing import scale
pca = PCA()
X_reduced = pca.fit_transform(X_train)
scores = cross_val_score(clf, X_reduced, y_train, cv=5)
print(scores)
clf.fit(X_reduced,y_train)
y_predict = clf.predict(X_test).round(3)
mse_1,r2_1 = get_metrics(y_test[:,0],y_predict[:,0])
mse_2,r2_2 = get_metrics(y_test[:,1],y_predict[:,1])
print(r2_1,r2_2)
print(mse_1,mse_2)
y_predict[:,0]
np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)
```
| github_jupyter |
## ENGR 213 Lecture Notes
### Lecture 10
#### Shear Stress:
Now that we have some sense of shear and moment diagrams and how they relate to the normal stresses across the face of a cross section of a beam it's time to add another layer to our thinking. An important characteristic of the normal stress across the cross section of a beam is that it is **NOT** constant. That means that there are different normal forces between adjacent longitudinal slices of the beam. This results in what are called **longitudinal shear** forces that can also lead to failure of the beam. Be sure to distinguish the longitudinal shear forces from transverse shear forces which are perpendicular to the axis of the beam.
<img src="https://raw.githubusercontent.com/smithrockmaker/ENGR213/main/images/beamBendingVisual.jpg" width="600"/>
### Longitudinal Shear: Conceptual
If you think of a beam as being made up of a lot of thin layers of the material the concept of transverse shear is relatively apparent. Take a stack of paper and flex the stack (bending a beam) and notice that the ends of the paper stick out relative to each other. This illustrates that the individual layers of paper have slipped (sheared) relative to each other. If the stack of paper were a solid block then the individual layers would be prevented from slipping leading to shear stress between the layers. A simplified illustration is shown below.
<img src="https://raw.githubusercontent.com/smithrockmaker/ENGR213/master/images/beamLongitudinalShear.jpg" width="600"/>
### Shear Formula
The derivation of the shear formula is pretty cool but quite complex and I'm not sure how much it adds to our understanding. Like many of the formulae we have developed it involves considering a small cross section of the beam and slicing it parallel to the axis of beam. Then it's just a 'simple' matter of setting the sum of the forces and moments to 0 and solving for the shear stress.
The **shear formula** is deceptively simple. Knowing meaning of each term and how to determine it is the challenge.
.$$\large \tau_{longitudinal} = \tau = \frac{V\: Q}{I\: t}$$
V: V is the transverse shear force at the particular cross section we are considering. Note that this means that any point on the shear diagram where V = 0 there are no longitudinal shear stresses anywhere along that cross section.
I: I is the 2nd moment of inertia around the neutral axis of the cross section of the beam. This is the same moment of inertia we used in the flexure formula for normal stresses in the beam. Nothing new here.
t: t is the width of the shear surface. In general this is the width of the beam at the location of interest. Note that because it is in the denominator of the expression the shear stress is higher where the beam is narrower, typically on the vertical web.
Q: Q is the first moment of the area above the shear surface we are interested in. Consider the graphic below...
<img src="https://raw.githubusercontent.com/smithrockmaker/ENGR213/master/images/beamQDef.png" width="300"/>
For simple geometries $Q = y_s A_s$ while more generally...
.$$\large Q = \int_{y_{shear}}^{y_{max}} dA = y_s A_s$$
Notice that Q is **NOT** 0 for the neutral axis because $y_s$ is the distance from then neutral axis to the centroid of the area above the shear surface. In fact, Q reaches it's maximum value at the neutral axis (an interesting proof to consider).
#### Conceptually:
An important conceptual feature is that the shear stress is generally a maximum on the neutral axis. This is precisely the opposite of the normal stress in a beam.
#### Beam Design:
I don't have a great personal sense of the relationship between the shear strength of materials used in beams vs the normal strength. The fact that it is generally prudent to put holes through floor joists (a wooden I beam) at the neutral axis is a suggestion that shear stresses are not the primary concern (see image below).
<img src="https://raw.githubusercontent.com/smithrockmaker/ENGR213/master/images/TJIPenetrations.png" width="600"/>
A clear counterexample are wooden beams made up of laminates of smaller stock (called glulams) where the shear stresses on the glue joints can lead to delamination of the beams. Here's a related example that suggests shear failure though it might be more complex.
<img src="https://raw.githubusercontent.com/smithrockmaker/ENGR213/master/images/beamDelamination.jpg" width="600"/>
| github_jupyter |
# Inference and Validation
Now that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set. We avoid overfitting through regularization such as dropout while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch.
As usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here:
```python
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
```
The test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training.
```
import torch
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here I'll create a model like normal, using the same one from my solution for part 4.
```
from torch import nn, optim
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
```
The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set.
```
model = Classifier()
images, labels = next(iter(testloader))
# Get the class probabilities
ps = torch.exp(model(images))
# Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples
print(ps.shape)
```
With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index.
```
top_p, top_class = ps.topk(1, dim=1)
# Look at the most likely classes for the first 10 examples
print(top_class[:10,:])
```
Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape.
If we do
```python
equals = top_class == labels
```
`equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row.
```
equals = top_class == labels.view(top_class.shape)
equals
```
Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error
```
RuntimeError: mean is not implemented for type torch.ByteTensor
```
This happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implemented for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`.
```
accuracy = torch.mean(equals.type(torch.FloatTensor))
print(f'Accuracy: {accuracy.item()*100}%')
```
The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up our code by turning off gradients using `torch.no_grad()`:
```python
# turn off gradients
with torch.no_grad():
# validation pass here
for images, labels in testloader:
...
```
>**Exercise:** Implement the validation loop below and print out the total accuracy after the loop. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting. You should be able to get an accuracy above 80%.
```
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
epochs = 30
steps = 0
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
## TODO: Implement the validation pass and print out the validation accuracy
with torch.no_grad():
for images, labels in testloader:
images = images.view(images.shape[0], -1)
log_ps_test = model(images)
test_loss += criterion(log_ps_test, labels)
output = torch.exp(log_ps_test)
top_p, top_class = output.topk(1, dim=1)
equals = top_class == labels.view(top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
```
## Overfitting
If we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting.
<img src='assets/overfitting.png' width=450px>
The network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible. One option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called *early-stopping*. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss.
The most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing it's ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module.
```python
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
# Dropout module with 0.2 drop probability
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
# Now with dropout
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
# output so no dropout here
x = F.log_softmax(self.fc4(x), dim=1)
return x
```
During training we want to use dropout to prevent overfitting, but during inference we want to use the entire network. So, we need to turn off dropout during validation, testing, and whenever we're using the network to make predictions. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode.
```python
# turn off gradients
with torch.no_grad():
# set model to evaluation mode
model.eval()
# validation pass here
for images, labels in testloader:
...
# set model back to train mode
model.train()
```
> **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss or higher accuracy.
```
## TODO: Define your model with dropout added
from torch import nn
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784,256)
self.fc2 = nn.Linear(256,128)
self.fc3 = nn.Linear(128,64)
self.fc4 = nn.Linear(64,10)
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
x = F.log_softmax(self.fc4(x),dim = 1)
return x
## TODO: Train your model with dropout, and monitor the training progress with the validation loss and accuracy
from torch import optim
from tqdm import tqdm
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.005)
epochs = 30
steps = 0
train_losses, test_losses = [], []
for e in tqdm(range(epochs)):
running_loss = 0
for images, labels in trainloader:
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for images, labels in testloader:
log_ps_test = model(images)
test_loss += criterion(log_ps_test, labels)
output = torch.exp(log_ps_test)
top_p, top_class = output.topk(1, dim = 1)
equals = top_class == labels.view(top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
model.train()
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.plot(train_losses, label='Training loss')
plt.plot(test_losses, label='Validation loss')
plt.legend(frameon=False)
```
## Inference
Now that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context.
```
# Import helper module (should be in the repo)
import helper
# Test out your network!
model.eval()
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.view(1, 784)
# Calculate the class probabilities (softmax) for img
with torch.no_grad():
output = model.forward(img)
ps = torch.exp(output)
# Plot the image and probabilities
helper.view_classify(img.view(1, 28, 28), ps, version='Fashion')
import numpy as np
A = np.random.randn(100,100)
A.reshape(100*100, -1)
A.shape
```
## Next Up!
In the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.
| github_jupyter |
```
from __future__ import print_function
```
# Jupyter
We'll be using Jupyter for all of our examples -- this allows us to run python in a web-based notebook, keeping a history of input and output, along with text and images.
For Jupyter help, visit:
https://jupyter.readthedocs.io/en/latest/content-quickstart.html
We interact with python by typing into _cells_ in the notebook. By default, a cell is a _code_ cell, which means that you can enter any valid python code into it and run it. Another important type of cell is a _markdown_ cell. This lets you put text, with different formatting (italics, bold, etc) that describes what the notebook is doing.
You can change the cell type via the menu at the top, or using the shortcuts:
* ctrl-m m : mark down cell
* ctrl-m y : code cell
Some useful short-cuts:
* shift+enter = run cell and jump to the next (creating a new cell if there is no other new one)
* ctrl+enter = run cell-in place
* alt+enter = run cell and insert a new one below
ctrl+m h lists other commands
A "markdown cell" enables you to typeset LaTeX equations right in your notebook. Just put them in <span>$</span> or <span>$$</span>:
$$\frac{\partial \rho}{\partial t} + \nabla \cdot (\rho U) = 0$$
<div style="background-color: palevioletred; color: white; padding: 10px;">
**Important**: when you work through a notebook, everything you did in previous cells is still in memory and _known_ by python, so you can refer to functions and variables that were previously defined. Even if you go up to the top of a notebook and insert a cell, all the information done earlier in your notebook session is still defined -- it doesn't matter where physically you are in the notebook. If you want to reset things, you can use the options under the _Kernel_ menu.
</div>
<div style="background-color:yellow; padding: 10px"><h3><span class="fa fa-flash"></span> Quick Exercise:</h3></div>
Create a new cell below this one. Make sure that it is a _code_ cell, and enter the following code and run it:
```
print("Hello, World")
```
<hr>
```
print("Hello, World")
```
`print()` is a _function_ in python that takes arguments (in the `()`) and outputs to the screen. You can print multiple quantities at once like:
```
print(1, 2, 3)
```
# Basic Datatypes
Now we'll look at some of the basic datatypes in python -- these are analogous to what you will find in most programming languages, including numbers (integers and floating point), and strings.
Some examples come from the python tutorial:
http://docs.python.org/3/tutorial/
## integers
Integers are numbers without a decimal point. They can be positive or negative. Most programming languages use a finite-amount of memory to store a single integer, but in python will expand the amount of memory as necessary to store large integers.
The basic operators, `+`, `-`, `*`, and `/` work with integers
```
2+2+3
2*-4
```
Note: integer division is one place where python 2 and python 3 different
In python 3.x, dividing 2 integers results in a float. In python 2.x, dividing 2 integers results in an integer. The latter is consistent with many strongly-typed programming languages (like Fortran or C), since the data-type of the result is the same as the inputs, but the former is more inline with our expectations
```
1/2
```
To get an integer result, we can use the // operator.
```
1//2
```
Python is a _dynamically-typed language_—this means that we do not need to declare the datatype of a variable before initializing it.
Here we'll create a variable (think of it as a descriptive label that can refer to some piece of data). The `=` operator assigns a value to a variable.
```
a = 1
b = 2
```
Functions operate on variables and return a result. Here, `print()` will output to the screen.
```
print(a+b)
print(a*b)
```
Note that variable names are case sensitive, so a and A are different
```
A = 2048
print(a, A)
```
Here we initialize 3 variable all to `0`, but these are still distinct variables, so we can change one without affecting the others.
```
x = y = z = 0
print(x, y, z)
z = 1
print(x, y, z)
```
Python has some built in help (and Jupyter/ipython has even more)
```
help(x)
x?
```
Another function, `type()` returns the data type of a variable
```
print(type(x))
```
Note in languages like Fortran and C, you specify the amount of memory an integer can take (usually 2 or 4 bytes). This puts a restriction on the largest size integer that can be represented. Python will adapt the size of the integer so you don't *overflow*
```
a = 12345678901234567890123456789012345123456789012345678901234567890
print(a)
print(a.bit_length())
print(type(a))
```
## floating point
when operating with both floating point and integers, the result is promoted to a float. This is true of both python 2.x and 3.x
```
1./2
```
but note the special integer division operator
```
1.//2
```
It is important to understand that since there are infinitely many real numbers between any two bounds, on a computer we have to approximate this by a finite number. There is an IEEE standard for floating point that pretty much all languages and processors follow.
The means two things
* not every real number will have an exact representation in floating point
* there is a finite precision to numbers -- below this we lose track of differences (this is usually called *roundoff* error)
On our course website, I posted a link to a paper, _What every computer scientist should know about floating-point arithmetic_ -- this is a great reference on understanding how a computer stores numbers.
Consider the following expression, for example:
```
0.3/0.1 - 3
```
Here's another example: The number 0.1 cannot be exactly represented on a computer. In our print, we use a format specifier (the stuff inside of the {}) to ask for more precision to be shown:
```
a = 0.1
print("{:1.25}".format(a))
```
we can ask python to report the limits on floating point
```
import sys
print(sys.float_info)
```
Note that this says that we can only store numbers between 2.2250738585072014e-308 and 1.7976931348623157e+308
We also see that the precision is 2.220446049250313e-16 (this is commonly called _machine epsilon_). To see this, consider adding a small number to 1.0. We'll use the equality operator (`==`) to test if two numbers are equal:
<div style="background-color:yellow; padding: 10px"><h3><span class="fa fa-flash"></span> Quick Exercise:</h3></div>
Define two variables, $a = 1$, and $e = 10^{-16}$.
Now define a third variable, `b = a + e`
We can use the python `==` operator to test for equality. What do you expect `b == a` to return? run it an see if it agrees with your guess.
<hr>
```
a = 1
e = 10**-16
b = a+e
if b==a:
print(True)
else:
print(False)
```
## modules
The core python language is extended by a standard library that provides additional functionality. These added pieces are in the form of modules that we can _import_ into our python session (or program).
The `math` module provides functions that do the basic mathematical operations as well as provide constants (note there is a separate `cmath` module for complex numbers).
In python, you `import` a module. The functions are then defined in a separate _namespace_—this is a separate region that defines names and variables, etc. A variable in one namespace can have the same name as a variable in a different namespace, and they don't clash. You use the "`.`" operator to access a member of a namespace.
By default, when you type stuff into the python interpreter or here in the Jupyter notebook, or in a script, it is in its own default namespace, and you don't need to prefix any of the variables with a namespace indicator.
```
import math
```
`math` provides the value of pi
```
print(math.pi)
```
This is distinct from any variable `pi` we might define here
```
pi = 3
print(pi, math.pi)
```
Note here that `pi` and `math.pi` are distinct from one another—they are in different namespaces.
### floating point operations
The same operators, `+`, `-`, `*`, `/` work are usual for floating point numbers. To raise an number to a power, we use the `**` operator (this is the same as Fortran)
```
R = 2.0
print(math.pi*R**2)
```
operator precedence follows that of most languages. See
https://docs.python.org/3/reference/expressions.html#operator-precedence
in order of precedence:
* quantites in `()`
* slicing, calls, subscripts
* exponentiation (`**`)
* `+x`, `-x`, `~x`
* `*`, `@`, `/`, `//`, `%`
* `+`, `-`
(after this are bitwise operations and comparisons)
Parantheses can be used to override the precedence.
<div style="background-color:yellow; padding: 10px"><h3><span class="fa fa-flash"></span> Quick Exercise:</h3></div>
Consider the following expressions. Using the ideas of precedence, think about what value will result, then try it out in the cell below to see if you were right.
* `1 + 3*2**2`
* `1 + (3*2)**2`
* `2**3**2`
<hr>
```
"""
My guesses:
1 + 3*2**2 = 13
1 + (3*2)**2 = 37
2**3**2 = 64?
"""
print(1 + 3*2**2)
print(1 + (3*2)**2)
print(2**3**2)
#Okay, so if precedence is same, order is right to left. Whoops!
```
The math module provides a lot of the standard math functions we might want to use.
For the trig functions, the expectation is that the argument to the function is in radians—you can use `math.radians()` to convert from degrees to radians, ex:
```
print(math.cos(math.radians(45)))
```
Notice that in that statement we are feeding the output of one function (`math.radians()`) into a second function, `math.cos()`
When in doubt, as for help to discover all of the things a module provides:
```
help(math.sin)
```
## complex numbers
python uses '`j`' to denote the imaginary unit
```
print(1.0 + 2j)
a = 1j
b = 3.0 + 2.0j
print(a + b)
print(a*b)
```
we can use `abs()` to get the magnitude and separately get the real or imaginary parts
```
print(abs(b))
print(a.real)
print(a.imag)
```
## strings
python doesn't care if you use single or double quotes for strings:
```
a = "this is my string"
b = 'another string'
print(a)
print(b)
```
Many of the usual mathematical operators are defined for strings as well. For example to concatenate or duplicate:
```
print(a+b)
print(a + ". " + b)
print(a*2)
```
There are several escape codes that are interpreted in strings. These start with a backwards-slash, `\`. E.g., you can use `\n` for new line
```
a = a + "\n"
print(a)
```
<div style="background-color:yellow; padding: 10px"><h3><span class="fa fa-flash"></span> Quick Exercise:</h3></div>
The `input()` function can be used to ask the user for input.
* Use `help(input)` to see how it works.
* Write code to ask for input and store the result in a variable. `input()` will return a string.
* Use the `float()` function to convert a number entered as input to a floating point variable.
* Check to see if the conversion worked using the `type()` function.
<hr>
```
help(input)
num = input()
num = float(num)
print(type(num))
```
""" can enclose multiline strings. This is useful for docstrings at the start of functions (more on that later...)
```
c = """
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore
eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt
in culpa qui officia deserunt mollit anim id est laborum."""
print(c)
```
a raw string does not replace escape sequences (like \n). Just put a `r` before the first quote:
```
d = r"this is a raw string\n"
print(d)
```
slicing is used to access a portion of a string.
slicing a string can seem a bit counterintuitive if you are coming from Fortran. The trick is to think of the index as representing the left edge of a character in the string. When we do arrays later, the same will apply.
Also note that python (like C) uses 0-based indexing
Negative indices count from the right.
```
a = "this is my string"
print(a)
print(a[5:7])
print(a[0])
print(d)
print(d[-2])
```
<div style="background-color:yellow; padding: 10px"><h3><span class="fa fa-flash"></span> Quick Exercise:</h3></div>
Strings have a lot of _methods_ (functions that know how to work with a particular datatype, in this case strings). A useful method is `.find()`. For a string `a`,
`a.find(s)` will return the index of the first occurrence of `s`.
For our string `c` above, find the first `.` (identifying the first full sentence), and print out just the first sentence in `c` using this result
<hr>
```
index = c.find('.')
first = c[:index+1]
print(index, first)
```
there are also a number of methods and functions that work with strings. Here are some examples:
```
print(a.replace("this", "that"))
print(len(a))
print(a.strip()) # Also notice that strip removes the \n
print(a.strip()[-1])
```
Note that our original string, `a`, has not changed. In python, strings are *immutable*. Operations on strings return a new string.
```
print(a)
print(type(a))
```
As usual, ask for help to learn more:
```
help(str)
```
We can format strings when we are printing to insert quantities in particular places in the string. A `{}` serves as a placeholder for a quantity and is replaced using the `.format()` method:
```
a = 1
b = 2.0
c = "test"
print("a = {}; b = {}; c = {}".format(a, b, c))
print("a = {}".format(a))
```
| github_jupyter |
```
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
#import textblob
from keras.preprocessing import text, sequence
from keras import layers, models, optimizers
import numpy as np
import pandas as pd
import string
import nltk
nltk.download('punkt')
# create a dataframe using texts and lables
df= pd.read_csv('data_1000.csv')
df2= pd.DataFrame()
df2['text']= df['post_description_en']
df2['labels']= df['categories']
df2['labels']= df2['labels'].replace('Journalist, Writing And Translation', 'Government Works')
df2.groupby('labels').count()
df2= df2[(df2.labels!='Agriculture And Farming')& (df2.labels!='Artist')& (df2.labels!='Brokering / Agent')& (df2.labels!='Labor')& (df2.labels!='Plumber')& (df2.labels!='Sanitation Works')& (df2.labels!='Travel And Transport')]
#We remove all those categories whose count is in single digits
df2.groupby('labels').count()
df3= pd.read_csv('report_04-11.csv')
#len(df3)
df_04= pd.DataFrame()
df_04['text']= df3['translation']
df_04['labels']= df3['classifier']
df_04['labels']= df_04['labels'].apply(lambda x: x.lower())
df_04['text']= df_04['text'].apply(lambda x: x.lower())
def text_cleaning(a):
list2 = nltk.word_tokenize(a)
a_lemma = ' '.join([lemmatizer.lemmatize(words) for words in list2])
rem_punc= [char for char in a_lemma if char not in string.punctuation]
rem_punc= ''.join(rem_punc)
return [word for word in rem_punc.split() if word.lower() not in stopwords.words('english')]
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
lemmatizer = WordNetLemmatizer()
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
stopWords= set(stopwords.words('english'))
df_04['text_cleaned']= df_04.iloc[:,0].apply(text_cleaning)
for i,row in df_04.iterrows():
row['text_cleaned']= ' '.join([str(elem) for elem in row['text_cleaned']])
df_04
df2['text_cleaned']= df2.iloc[:,0].apply(text_cleaning)
for i,row in df2.iterrows():
row['text_cleaned']= ' '.join([str(elem) for elem in row['text_cleaned']])
df2['labels']= df2['labels'].apply(lambda x: x.lower())
df2['text']= df2['text'].apply(lambda x: x.lower())
df2['text_cleaned']= df2['text_cleaned'].apply(lambda x: x.lower())
df2
# split the dataset into training and validation datasets
train_x, valid_x, train_y, valid_y = model_selection.train_test_split(df2['text_cleaned'], df2['labels'], test_size=0.25, random_state=0, stratify= df2['labels'])
from sklearn.model_selection import StratifiedShuffleSplit
# sss = StratifiedShuffleSplit(n_splits=5, test_size=0.25, random_state=0)
# sss.get_n_splits(X, y)
# for train_index, test_index in sss.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# train_x, valid_x = X[train_index], X[test_index]
# train_y, valid_y = y[train_index], y[test_index]
x_test= df_04.text_cleaned
y_test= df_04.labels
# label encode the target variable
encoder = preprocessing.LabelEncoder()
train_y = encoder.fit_transform(train_y)
valid_y = encoder.fit_transform(valid_y)
y_test = encoder.fit_transform(y_test)
# create a count vectorizer object
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
#count_vect.fit(df2['text_cleaned'])
count_vect.fit(train_x)
# transform the training and validation data using count vectorizer object
xtrain_count = count_vect.transform(train_x)
xvalid_count = count_vect.transform(valid_x)
xtest_count = count_vect.transform(x_test)
# word level tf-idf
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000)
tfidf_vect.fit(df2['text'])
xtrain_tfidf = tfidf_vect.transform(train_x)
xvalid_tfidf = tfidf_vect.transform(valid_x)
xtest_tfidf = tfidf_vect.transform(x_test)
# ngram level tf-idf
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram.fit(df2['text'])
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(train_x)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(valid_x)
xtest_tfidf_ngram = tfidf_vect_ngram.transform(x_test)
# characters level tf-idf
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram_chars.fit(df2['text'])
xtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(train_x)
xvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(valid_x)
xtest_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(x_test)
# load the pre-trained word-embedding vectors
embeddings_index = {}
for i, line in enumerate(open('wiki-news-300d-1M.vec')):
values = line.split()
embeddings_index[values[0]] = np.asarray(values[1:], dtype='float32')
# create a tokenizer
token = text.Tokenizer()
token.fit_on_texts(df2['text'])
word_index = token.word_index
# convert text to sequence of tokens and pad them to ensure equal length vectors
train_seq_x = sequence.pad_sequences(token.texts_to_sequences(train_x), maxlen=70)
valid_seq_x = sequence.pad_sequences(token.texts_to_sequences(valid_x), maxlen=70)
# create token-embedding mapping
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
def train_model(classifier, feature_vector_train, label, feature_vector_valid, is_neural_net=False):
# fit the training dataset on the classifier
classifier.fit(feature_vector_train, label)
# predict the labels on validation dataset
predictions = classifier.predict(feature_vector_valid)
if is_neural_net:
predictions = predictions.argmax(axis=-1)
return metrics.accuracy_score(predictions, valid_y)
# Naive Bayes on Count Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_count, train_y, xvalid_count)
print("NB, Count Vectors: ", accuracy)
# Naive Bayes on Word Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf, train_y, xvalid_tfidf)
print("NB, WordLevel TF-IDF: ", accuracy)
# Naive Bayes on Ngram Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("NB, N-Gram Vectors: ", accuracy)
# Naive Bayes on Character Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars)
print("NB, CharLevel Vectors: ", accuracy)
# Linear Classifier on Count Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_count, train_y, xvalid_count)
print ("LR, Count Vectors: ", accuracy)
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf, train_y, xvalid_tfidf)
print("LR, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("LR, N-Gram Vectors: ", accuracy)
# Linear Classifier on Character Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars)
print("LR, CharLevel Vectors: ", accuracy)
# SVM on Ngram Level TF IDF Vectors
accuracy = train_model(svm.SVC(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("SVM, N-Gram Vectors: ", accuracy)
# RF on Count Vectors
accuracy = train_model(ensemble.RandomForestClassifier(), xtrain_count, train_y, xvalid_count)
print("RF, Count Vectors: ", accuracy)
# RF on Word Level TF IDF Vectors
accuracy = train_model(ensemble.RandomForestClassifier(), xtrain_tfidf, train_y, xvalid_tfidf)
print("RF, WordLevel TF-IDF: ", accuracy)
```
| github_jupyter |
**Import library**
```
import pandas as pd
import numpy as np
import calendar
from datetime import datetime
import time
# Standard plotly imports
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("paper", font_scale=1.3)
sns.set_style('white')
# stats
from statsmodels.tsa.statespace.sarimax import SARIMAX
from random import random
from statsmodels.tsa.stattools import adfuller
#Prophet
from fbprophet import Prophet
# SKLEARN
from sklearn.metrics import mean_squared_error
```
**Import data**
```
# Read in the raw temperature dataset
raw_global = pd.read_csv('GLB.Ts+dSST.csv', skiprows=1)
raw_global = raw_global.iloc[:,:13]
raw_global.head()
raw_global.tail()
```
**Data Preprocessing**
```
def clean_value(raw_value):
try:
return float(raw_value)
except:
return np.NaN
def preprocess_data(raw):
data_horizon = pd.date_range(start='1/1/1880', end='12/31/2019', freq='M')
data = pd.DataFrame(data_horizon, columns=['Date'])
#extract temperature data
temp_list = []
for idx in range(raw.shape[0]):
temp_list.extend(raw.iloc[idx,1:])
data['Temp'] = temp_list
#clean value
data['Temp'] = data['Temp'].apply(lambda x: clean_value(x))
data.fillna(method='ffill', inplace=True)
return data
global_t = preprocess_data(raw_global)
global_t.head()
global_t.tail()
```
**Data Visualization**
```
fig = px.line(global_t, x="Date", y="Temp", title='Global-mean monthly Combined Land-Surface Air and Sea-Surface Water Temperature Anomalies')
fig.show()
fig = px.line(global_t.resample('A', on='Date').mean().reset_index(), x="Date", y="Temp", title='Global-mean yearly Combined Land-Surface Air and Sea-Surface Water Temperature Anomalies')
fig.show()
```
Test stationarity
```
def test_stationarity(timeseries):
rolmean = timeseries.rolling(window=30).mean()
rolstd = timeseries.rolling(window=30).std()
plt.figure(figsize=(14,5))
sns.despine(left=True)
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best'); plt.title('Rolling Mean & Standard Deviation')
plt.show()
print ('<Results of Dickey-Fuller Test>')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4],
index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput)
test_stationarity(global_t.Temp.dropna())
```
since the p-value > 0.05, we accept the null hypothesis (H0), the data has a unit root and is non-stationary.
**Time Series Prediction - SARIMA**
The Seasonal Autoregressive Integrated Moving Average (SARIMA) method models the next step in the sequence as a linear function of the differenced observations, errors, differenced seasonal observations, and seasonal errors at prior time steps.
It combines the ARIMA model with the ability to perform the same autoregression, differencing, and moving average modeling at the seasonal level.
The notation for the model involves specifying the order for the AR(p), I(d), and MA(q) models as parameters to an ARIMA function and AR(P), I(D), MA(Q) and m parameters at the seasonal level, e.g. SARIMA(p, d, q)(P, D, Q)m where โmโ is the number of time steps in each season (the seasonal period). A SARIMA model can be used to develop AR, MA, ARMA and ARIMA models.
The method is suitable for univariate time series with trend and/or seasonal components.
```
def plot(y_true,y_pred):
# Plot
fig = go.Figure()
x = global_t['Date'][global_t.shape[0]-len(y_true):]
fig.add_trace(go.Scatter(x=x, y=y_true, mode='lines', name='actual'))
fig.add_trace(go.Scatter(x=x, y=y_pred, mode='lines', name='predicted'))
# Edit the layout
fig.update_layout(title='Southern Hemisphere-mean Temperature: Predicted v.s. Actual',
xaxis_title='Month',
yaxis_title='Temperature')
fig.show()
def SARIMA_prediction(temp_data):
y_true = []
y_pred = []
temperature = temp_data['Temp'].tolist()
train = temperature[:-336]
test = temperature[len(train):]
#predict the latest 336 values (20% of data)
for idx in range(len(test)):
true_val = test[idx]
if len(y_pred)>0:
record = train+y_pred
else:
record = train
# fit model
model = SARIMAX(record, order=(1, 1, 1), seasonal_order=(1, 1, 1, 1))
model_fit = model.fit(disp=False,low_memory=True)
# make predictions
yhat = model_fit.predict(len(record), len(record))
# save value
y_true.append(true_val)
y_pred.extend(yhat)
print(mean_squared_error(y_true, y_pred))
plot(y_true,y_pred)
start_time = time.time()
SARIMA_prediction(global_t)
print("--- %s seconds ---" % (time.time() - start_time))
```
**Time Series Prediction - Prophet**
Prophet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
```
def prophet_prediction(temp_data):
#removing the last 336 values (10 years)
df = temp_data.iloc[:-336]
df = df.rename(columns={'Date':'ds', 'Temp':'y'})
#load prophet model
model = Prophet(weekly_seasonality=True)
model.fit(df)
#prediction
future = model.make_future_dataframe(periods=336, freq = 'm')
forecast = model.predict(future)
model.plot(forecast)
return forecast
start_time = time.time()
prophet_forecast = prophet_prediction(global_t)
print("--- %s seconds ---" % (time.time() - start_time))
prophet_forecast_last = prophet_forecast.iloc[prophet_forecast.shape[0]-336:]
global_t_last = global_t.iloc[global_t.shape[0]-336:]
mean_squared_error(global_t_last.Temp, prophet_forecast_last.yhat)
```
**Time series prediction - LSTM**
```
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping
earlyStop=EarlyStopping(monitor="val_loss",verbose=2,mode='min',patience=5)
```
**Data preparation**
```
temp_raw = np.array(global_t.Temp.astype("float32")).reshape(-1,1)
# Apply the MinMax scaler from sklearn to normalize data in the (0, 1) interval.
scaler = MinMaxScaler(feature_range = (0, 1))
temp_LSTM = scaler.fit_transform(temp_raw)
# Train test split - Using 80% of data for training, 20% for validation.
ratio = 0.6
train_size = int(len(temp_LSTM) * ratio)
val_size = int(len(temp_LSTM) * 0.2)
test_size = len(temp_LSTM) - train_size - val_size
train, val, test = temp_LSTM[0:train_size, :], temp_LSTM[train_size:train_size+val_size, :], temp_LSTM[train_size+val_size:len(temp_LSTM), :]
print("Number of entries (training set, val set, test set): " + str((len(train), len(val), len(test))))
def create_dataset(dataset):
window_size = 1
data_X, data_Y = [], []
for i in range(len(dataset) - window_size - 1):
a = dataset[i:(i + window_size), 0]
data_X.append(a)
data_Y.append(dataset[i + window_size, 0])
return(np.array(data_X), np.array(data_Y))
# Create test and training sets for one-step-ahead regression.
train_X, train_Y = create_dataset(train)
val_X, val_Y = create_dataset(val)
test_X, test_Y = create_dataset(test)
# Reshape the input data into appropriate form for Keras.
train_X = np.reshape(train_X, (train_X.shape[0], 1,train_X.shape[1]))
val_X = np.reshape(val_X, (val_X.shape[0], 1,val_X.shape[1]))
test_X = np.reshape(test_X, (test_X.shape[0], 1,test_X.shape[1]))
print("Training data for Keras shape:")
print(train_X.shape)
```
**LSTM Model**
The LSTM architecture here consists of:
- One input layer.
- One LSTM layer of 4 blocks.
- One Dense layer to produce a single output.
- Use MSE as loss function.
```
def LSTM_modelone(train_X, train_Y, window_size):
model = Sequential()
model.add(LSTM(4,
input_shape = (1, window_size)))
model.add(Dense(1))
model.compile(loss = "mean_squared_error",
optimizer = "adam")
model.fit(train_X,
train_Y,
epochs = 100,
batch_size = 10,
verbose = 2,
validation_data=(val_X,val_Y),callbacks=[earlyStop])
return model
start_time = time.time()
LSTM_model1 = LSTM_modelone(train_X, train_Y, window_size=1)
print("--- %s seconds ---" % (time.time() - start_time))
def predict_and_score(model, X, Y):
# Make predictions on the original scale of the data.
pred = scaler.inverse_transform(model.predict(X))
# Prepare Y data to also be on the original scale for interpretability.
orig_data = scaler.inverse_transform([Y])
# Calculate RMSE.
score = mean_squared_error(orig_data[0], pred[:, 0])
return score
print("Test data score: %.3f MSE" % predict_and_score(LSTM_model1,test_X, test_Y))
```
The second model architecture is slightly more complex. Its elements are:
- Define the LSTM with 100 neurons in the first hidden layer and 1 neuron in the output layer
- Dropout 20%.
- Use the MSE loss function and the efficient Adam version of stochastic gradient descent.
- The model will be fit for 50 training epochs with a batch size of 5.
```
def LSTM_modeltwo(train_X, train_Y):
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
print(model.summary())
model.fit(train_X, train_Y, epochs=50, batch_size=5, verbose=2, shuffle=False, validation_data=(val_X,val_Y),callbacks=[earlyStop])
return model
start_time = time.time()
LSTM_model2 = LSTM_modeltwo(train_X, train_Y)
print("--- %s seconds ---" % (time.time() - start_time))
print("Test data score: %.3f MSE" % predict_and_score(LSTM_model2,test_X, test_Y))
def predict_and_plot(model, X, Y):
# Make predictions on the original scale of the data.
pred = scaler.inverse_transform(model.predict(X))
# Prepare Y data to also be on the original scale for interpretability.
orig_data = scaler.inverse_transform([Y])
# Plot
fig = go.Figure()
x = global_t['Date'][global_t.shape[0]-len(orig_data[0]):]
fig.add_trace(go.Scatter(x=x, y=orig_data[0], mode='lines', name='actual'))
fig.add_trace(go.Scatter(x=x, y=pred[:, 0], mode='lines', name='predicted'))
# Edit the layout
fig.update_layout(title='Global Temperature: Predicted v.s. Actual',
xaxis_title='Month',
yaxis_title='Temperature')
fig.show()
predict_and_plot(LSTM_model2,test_X, test_Y)
```
**MLP Model**
```
def MLP_model(train_X, train_Y):
model = Sequential()
model.add(Dense(100, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(optimizer='adam', loss='mse')
print(model.summary())
model.fit(train_X, train_Y, epochs=50, batch_size=10, verbose=2, shuffle=False, validation_data=(val_X,val_Y),callbacks=[earlyStop])
return model
start_time = time.time()
MLP_model_result = MLP_model(train_X, train_Y)
print("--- %s seconds ---" % (time.time() - start_time))
print("Test data score: %.3f MSE" % predict_and_score(MLP_model_result,test_X, test_Y))
```
| github_jupyter |
```
from eppy import modeleditor
from eppy.modeleditor import IDF
import pandas as pd
pip install pandas
IDF.setiddname('/usr/local/bin/Energy+.idd')
idf = IDF('/idf/det_pre/detatched_pre.idf')
idf.epw = "idf/det_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/det_pre")
df = pd.read_csv("idf/det_pre/eplusmtr.csv")
peak_demand_joule_det_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/det_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_det_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_det_pre = df3.iloc[16,1]
ann_heat_demand_kwh_det_pre = df3.iloc[16,5]
print(peak_demand_joule_det_pre, ann_energy_demand_kwh_det_pre, ann_elec_demand_kwh_det_pre, ann_heat_demand_kwh_det_pre)
del idf
idf = IDF('/idf/det_post/detatched_post.idf')
idf.epw = "idf/det_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/det_post")
df = pd.read_csv("idf/det_post/eplusmtr.csv")
peak_demand_joule_det_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/det_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_det_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_det_post = df3.iloc[16,1]
ann_heat_demand_kwh_det_post = df3.iloc[16,5]
print(peak_demand_joule_det_post, ann_energy_demand_kwh_det_post, ann_elec_demand_kwh_det_post, ann_heat_demand_kwh_det_post)
del idf
idf = IDF('/idf/semi_d_pre/semi_d_pre.idf')
idf.epw = "idf/semi_d_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/semid_pre")
df = pd.read_csv("idf/semi_d_pre/eplusmtr.csv")
peak_demand_joule_semid_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/semi_d_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_semid_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_semid_pre = df3.iloc[16,1]
ann_heat_demand_kwh_semid_pre = df3.iloc[16,5]
print(peak_demand_joule_semid_pre, ann_energy_demand_kwh_semid_pre, ann_elec_demand_kwh_semid_pre, ann_heat_demand_kwh_semid_pre)
del idf
idf = IDF('/idf/semi_d_post/semi_d_post.idf')
idf.epw = "idf/semi_d_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/semid_post")
df = pd.read_csv("idf/semi_d_post/eplusmtr.csv")
peak_demand_joule_semid_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/semi_d_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_semid_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_semid_post = df3.iloc[16,1]
ann_heat_demand_kwh_semid_post = df3.iloc[16,5]
print(peak_demand_joule_semid_post, ann_energy_demand_kwh_semid_post, ann_elec_demand_kwh_semid_post, ann_heat_demand_kwh_semid_post)
del idf
idf = IDF('/idf/terr_pre/terraced_pre.idf')
idf.epw = "idf/terr_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/terr_pre")
df = pd.read_csv("idf/terr_pre/eplusmtr.csv")
peak_demand_joule_terr_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/terr_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_terr_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_terr_pre = df3.iloc[16,1]
ann_heat_demand_kwh_terr_pre = df3.iloc[16,5]
print(peak_demand_joule_terr_pre, ann_energy_demand_kwh_terr_pre, ann_elec_demand_kwh_terr_pre, ann_heat_demand_kwh_terr_pre)
del idf
idf = IDF('/idf/terr_post/terraced_post.idf')
idf.epw = "idf/terr_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/terr_post")
df = pd.read_csv("idf/terr_post/eplusmtr.csv")
peak_demand_joule_terr_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/terr_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_terr_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_terr_post = df3.iloc[16,1]
ann_heat_demand_kwh_terr_post = df3.iloc[16,5]
print(peak_demand_joule_terr_post, ann_energy_demand_kwh_terr_post, ann_elec_demand_kwh_terr_post, ann_heat_demand_kwh_terr_post)
del idf
idf = IDF('/idf/apt_pre/mid_apt_pre.idf')
idf.epw = "idf/apt_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/apt_pre")
df = pd.read_csv("idf/mid_apt_pre/eplusmtr.csv")
peak_demand_joule_mid_apt_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/mid_apt_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_mid_apt_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_mid_apt_pre = df3.iloc[16,1]
ann_heat_demand_kwh_mid_apt_pre = df3.iloc[16,5]
print(peak_demand_joule_mid_apt_pre, ann_energy_demand_kwh_mid_apt_pre, ann_elec_demand_kwh_mid_apt_pre, ann_heat_demand_kwh_mid_apt_pre)
del idf
idf = IDF('/idf/mid_apt_post/mid_apt_post.idf')
idf.epw = "idf/mid_apt_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/mid_apt_post")
df = pd.read_csv("idf/mid_apt_post/eplusmtr.csv")
peak_demand_joule_mid_apt_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/mid_apt_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_mid_apt_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_mid_apt_post = df3.iloc[16,1]
ann_heat_demand_kwh_mid_apt_post = df3.iloc[16,5]
print(peak_demand_joule_mid_apt_post, ann_energy_demand_kwh_mid_apt_post, ann_elec_demand_kwh_mid_apt_post, ann_heat_demand_kwh_mid_apt_post)
del idf
idf = IDF('/idf/top_apt_pre/top_apt_pre.idf')
idf.epw = "idf/top_apt_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/top_apt_pre")
df = pd.read_csv("idf/top_apt_pre/eplusmtr.csv")
peak_demand_joule_top_apt_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/top_apt_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_top_apt_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_top_apt_pre = df3.iloc[16,1]
ann_heat_demand_kwh_top_apt_pre = df3.iloc[16,5]
print(peak_demand_joule_top_apt_pre, ann_energy_demand_kwh_top_apt_pre, ann_elec_demand_kwh_top_apt_pre, ann_heat_demand_kwh_top_apt_pre)
del idf
idf = IDF('/idf/top_apt_post/top_apt_post.idf')
idf.epw = "idf/top_apt_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="/idf/top_apt_post")
df = pd.read_csv("idf/top_apt_post/eplusmtr.csv")
peak_demand_joule_top_apt_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("idf/top_apt_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_top_apt_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_top_apt_post = df3.iloc[16,1]
ann_heat_demand_kwh_top_apt_post = df3.iloc[16,5]
print(peak_demand_joule_top_apt_post, ann_energy_demand_kwh_top_apt_post, ann_elec_demand_kwh_top_apt_post, ann_heat_demand_kwh_top_apt_post)
peak_demand_joule_top_apt_post
peak_data = [['Detatched housepre', peak_demand_joule_det_pre, ann_energy_demand_kwh_det_pre, ann_elec_demand_kwh_det_pre, ann_heat_demand_kwh_det_pre], ['Detatched housepost', peak_demand_joule_det_post, ann_energy_demand_kwh_det_post, ann_elec_demand_kwh_det_post, ann_heat_demand_kwh_det_post], ['Semi detatched housepre', peak_demand_joule_semid_pre, ann_energy_demand_kwh_semid_pre, ann_elec_demand_kwh_semid_pre, ann_heat_demand_kwh_semid_pre],['Semi detatched housepost', peak_demand_joule_semid_post, ann_energy_demand_kwh_semid_post, ann_elec_demand_kwh_semid_post, ann_heat_demand_kwh_semid_post],['Terraced housepre', peak_demand_joule_terr_pre, ann_energy_demand_kwh_terr_pre, ann_elec_demand_kwh_terr_pre, ann_heat_demand_kwh_terr_pre], ['Terraced housepost', peak_demand_joule_terr_post, ann_energy_demand_kwh_terr_post, ann_elec_demand_kwh_terr_post, ann_heat_demand_kwh_terr_post], ['Apartmentpre', peak_demand_joule_mid_apt_pre, ann_energy_demand_kwh_mid_apt_pre, ann_elec_demand_kwh_mid_apt_pre, ann_heat_demand_kwh_mid_apt_pre],['Apartmentpost', peak_demand_joule_mid_apt_post, ann_energy_demand_kwh_mid_apt_post, ann_elec_demand_kwh_mid_apt_post, ann_heat_demand_kwh_mid_apt_post],['Top floor apt.pre', peak_demand_joule_top_apt_pre, ann_energy_demand_kwh_top_apt_pre, ann_elec_demand_kwh_top_apt_pre, ann_heat_demand_kwh_top_apt_pre],['Top floor apt.post', peak_demand_joule_top_apt_post, ann_energy_demand_kwh_top_apt_post, ann_elec_demand_kwh_top_apt_post, ann_heat_demand_kwh_top_apt_post], ]
df_peaks = pd.DataFrame(peak_data, columns = ['dwelling_type','peak_hourly_elec_demand(J)', "annual_energy_demand_kwh", "annual_elec_demand_kwh", "annual_heat_demand_kwh"])
df_peaks
df_peaks["peak_elec_demand(kW)"] = df_peaks["peak_hourly_elec_demand(J)"]/3600000
df_peaks["peak_elec_demand(kVA)"] = df_peaks["peak_elec_demand(kW)"]*0.85
df_peaks
df_peaks.to_csv("/idf/resi_modelling/energy_demand_by_building_type.csv")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ewotawa/secure_private_ai/blob/master/Section_2_Federated_Learning_Final_Project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Federated Learning Final Project
## Overview
* See <a href="https://classroom.udacity.com/nanodegrees/nd185/parts/3fe1bb10-68d7-4d84-9c99-9539dedffad5/modules/28d685f0-0cb1-4f94-a8ea-2e16614ab421/lessons/c8fe481d-81ea-41be-8206-06d2deeb8575/concepts/a5fb4b4c-e38a-48de-b2a7-4e853c62acbe">video</a> for additional details.
* Do Federated Learning where the central server is not trusted with the raw gradients.
* In the final project notebook, you'll receive a dataset.
* Train on the dataset using Federated Learning.
* The gradients should not come up to the server in raw form.
* Instead, use the new .move() command to move all of the gradients to one of the workers, sum them up there, and then bring that batch up to the central server and then bring that batch up
* Idea: the central server never actually sees the raw gradient for any person.โฏ
* We'll look at secure aggregation in course 3.
* For now, do a larger-scale Federated Learning case where you handle the gradients in a special way.
## Approach
* Use the method illustrated in the "DEEP LEARNING" article referenced below. Update the code such that the MNIST model trains locally. Updated for my personal code style preferences.
* Per conversation in the SPAIC Slack channel, use of a federated data loader approach trains the model and keeps the disaggregated gradients off of the local machine. The aggregate model returns when model.get() is called.
* Contacted the team at OpenMined. They confirmed that PySyft currently does not work with GPUs, although updates are in progress. (7/18/2019).
## References
* <a href = "https://blog.openmined.org/upgrade-to-federated-learning-in-10-lines/">DEEP LEARNING -> FEDERATED LEARNING IN 10 LINES OF PYTORCH + PYSYFT</a>
* <a href ="https://github.com/udacity/private-ai/pull/10">added data for Federated Learning project</a>
* <a href="https://github.com/OpenMined/PySyft/blob/master/examples/tutorials/Part%206%20-%20Federated%20Learning%20on%20MNIST%20using%20a%20CNN.ipynb">Part 6 - Federated Learning on MNIST using a CNN.ipynb</a>
* <a href="https://docs.google.com/spreadsheets/d/1x-QQK-3Wn86bvSbNTf2_p2FXVCqiic2QwjcArQEuQlg/edit#gid=0">Slack Channel's reference sheet </a>
* <a href="https://github.com/ucalyptus/Federated-Learning/blob/master/Federated%20Learning.ipynb">Federated Learning Example from Slack Channel reference sheet</a>
### Install libraries and dependencies
```
!pip install syft
import syft as sy
!pip install torch
!pip install torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import numpy as np
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
vw00 = sy.VirtualWorker(hook, id="vw00")
vw01 = sy.VirtualWorker(hook, id="vw01")
aggr = sy.VirtualWorker(hook, id="aggr")
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 1000
self.epochs = 10
self.lr = 0.01
self.momentum = 0.5
self.no_cuda = False
self.seed = 1
self.log_interval = 10
self.save_model = False
args = Arguments()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# Note: removed **kwargs from end of federated_train_loader and test_loader definitions.
transform = transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
federated_train_loader = sy.FederatedDataLoader(datasets.MNIST('../data', train=True, download=True, transform=transform).federate((vw00, vw01)),
batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, transform=transform),
batch_size=args.test_batch_size, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(federated_train_loader): # <-- now it is a distributed dataset
model.send(data.location) # <-- NEW: send the model to the right location
# data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
model.get() # <-- NEW: get the model back
if batch_idx % args.log_interval == 0:
loss = loss.get() # <-- NEW: get the loss back
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, len(train_loader) * args.batch_size, #batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
# data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# model = Net().to(device)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=args.lr) # TODO momentum is not supported at the moment
for epoch in range(1, args.epochs + 1):
train(args, model, device, federated_train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(), "mnist_cnn.pt")
```
| github_jupyter |
## Import the necessary libraries
```
import numpy as np
import missingno as miss
import os
import glob
from pathlib import Path
from datetime import datetime
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sklearn
from xgboost import XGBClassifier
%matplotlib inline
```
## Importing Data
```
print(f"Current directory: {Path.cwd()}")
print(f"Home directory: {Path.home()}")
path = "/Users/davidburton/DS/M3/KickstarterSuccessClassifier/data/interim/ks-projects-201801.csv"
path_processed = "/Users/davidburton/DS/M3/KickstarterSuccessClassifier/data/processed/ks-projects-201801.csv"
path_processed2 = "../data/processed/explored.csv"
df = pd.read_csv(path,
encoding = "ISO-8859-1")
df.head(2)
df.shape
df.isnull().any()
# Primer
# What I want to predict
target = "outcome"
# The Features or Attributes
features = df.drop(target,1).columns
features_by_dtype = {}
for f in features:
dtype = str(df[f].dtype)
if dtype not in features_by_dtype.keys():
features_by_dtype[dtype] = [f]
else:
features_by_dtype[dtype] += [f]
for k in features_by_dtype.keys():
string = "%s: %s" % (k , len(features_by_dtype[k]))
print(string)
keys = iter(features_by_dtype.keys())
k = next(keys)
dtype_list = features_by_dtype[k]
for d in dtype_list:
string = "%s: %s" % (d,len(df[d].unique()))
print(string)
sns.pairplot(df)
g = sns.pairplot(df[["backers", "pledged", "goal", "deadline_dayofweek",
"deadline_weekofyear", "launched_dayofweek",
"launched_weekofyear", "launch_hourofday", "duration_days"]],
diag_kind="hist")
for ax in g.axes.flat:
plt.setp(ax.get_xticklabels(), rotation=45)
df.columns
df['outcome'].value_counts().plot(kind='box');
df.columns
df.drop(['name'], axis = 1, inplace=True)
df.category.value_counts()
df.corr(method='spearman')
df.corr(method='pearson')
df.groupby('outcome').nunique()
# less backers means more likely to fail
# longer campaigns more likely to fail
df.category.unique()
#create a category dictionary
category_dict = {}
category_list = ['Publishing', 'Film & Video', 'Music', 'Food', 'Crafts', 'Games',
'Design', 'Comics', 'Fashion', 'Theater', 'Art', 'Photography',
'Technology', 'Dance', 'Journalism']
df.category.value_counts(normalize=True).plot(kind = 'pie', figsize=(14,7))
df_cat = df.category.value_counts()
df_cat = pd.DataFrame(df_cat)
df_cat = df_cat.rename(columns={'category':'count'})
df_cat['category'] = df_cat.index
ax = sns.barplot(x="category" , y= "count", data= df_cat, palette='rainbow')
ax.set_xlabel("Category", fontweight='bold')
ax.set_ylabel("Number of Projects", fontweight='bold')
plt.xticks(rotation=90);
```
Start doing some modeling
```
#encode
df.dtypes
#convert objects into datetime where appropriate
df.launch_time = pd.to_datetime(df.launch_time)
df.launched = pd.to_datetime(df.launched)
df.deadline = pd.to_datetime(df.deadline)
#encode
df_dummies = pd.get_dummies(df[['sub_category', 'category',
'currency', 'country']], drop_first=True)
df = df_dummies.merge(df, left_index=True, right_index=True)
#drop original categorical object features
df = df.drop(['sub_category', 'category',
'currency', 'country'],1)
df.shape
df.head(2)
df.columns
#https://elitedatascience.com/python-machine-learning-tutorial-scikit-learn
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
#families
#random forest
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
#cross-validation pipeline
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
#evaluation metrics
from sklearn.metrics import mean_squared_error, r2_score
#for saving models
from sklearn.externals import joblib
#keep only feature known from begining of campaign
X = df.drop(['outcome', 'backers', 'pledged','duration','deadline',
'launched', 'launch_time'],1)
#Target Variable
y = df.outcome
X=X.astype(int);
y.unique()
y.replace('failed',value=0,inplace=True)
y.replace('successful',value=1,inplace=True)
y.value_counts(normalize=True)
y = y.astype(int)
#split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=123,
stratify=y)
```
Fit the transformer on the training set (saving the means and standard deviations)<br>
Apply the transformer to the training set (scaling the training data)<br>
Apply the transformer to the test set (using the same means and standard deviations)<br>
```
# fit the transformer
scaler = preprocessing.StandardScaler().fit(X_train)
# sanity check
X_train_scaled = scaler.transform(X_train)
%%time
X_train_scaled.mean(axis=0)[:10]
X_train_scaled.std()
#worked on train set, use same method to scale on test data
X_test_scaled = scaler.transform(X_test)
# sanity check
X_test_scaled.mean(axis=0)[:10]
X_test_scaled.std()
#easier method
#pipleine with preprocessing and model
pipeline = make_pipeline(preprocessing.StandardScaler(),
RandomForestClassifier(n_estimators=100))
```
The above is a modeling pipeline that first transforms the data using StandardScaler() and then fits a model using a random forest regressor.
```
# figuring out hyperparameters
pipeline.get_params()
#define hyper parameters to tune
hyperparameters = { 'randomforestclassifier__max_features' : ['auto', 'sqrt', 'log2'],
'randomforestclassifier__max_depth': [None, 5, 3, 1]}
```
Cross-validation<br>
1. Split data into k equal parts, or "folds" (typically k=10).<br>
2. Preprocess k-1 training folds.<br>
3. Train model on k-1 folds (e.g. the first 9 folds).<br>
4. Preprocess the hold-out fold using the same transformations from step(2)<br>
5. Evaluate it on the remaining "hold-out" fold (e.g. the 10th fold).<br>
6. Perform steps (2) and (3) k times, each time holding out a different fold.<br>
7. Aggregate the performance across all k folds. This is the performance metric.<br>
```
%%time
#cross-validation with pipeline
clf = GridSearchCV(pipeline, hyperparameters, cv=10)
%%time
# Fit and tune model
#clf.fit(X_train, y_train)
rf = RandomForestClassifier(random_state = 42)
from pprint import pprint
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
```
| github_jupyter |
```
% load_ext autoreload
% autoreload 2
% matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import os, sys
opj = os.path.join
from tqdm import tqdm
from ex_mnist import p
from dset import get_dataloader
sys.path.append('../../src/models')
from models import CNN, FFN
# load data
train_loader, test_loader = get_dataloader(p.data_path,
batch_size=p.batch_size)
# import models
cnn = CNN().to(device)
ffn = FFN().to(device)
```
# train cnn
```
optimizer = torch.optim.Adam(cnn.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()
num_epochs = 50
train_losses = []
for epoch in range(num_epochs):
epoch_loss = 0.
for batch_idx, (data, y) in enumerate(train_loader):
data = data.to(device)
y = y.to(device)
# zero grad
optimizer.zero_grad()
output = cnn(data)
loss = criterion(output, y)
# backward
loss.backward()
# update step
optimizer.step()
iter_loss = loss.item()
epoch_loss += iter_loss
print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), iter_loss), end='')
mean_epoch_loss = epoch_loss / (batch_idx + 1)
train_losses.append(mean_epoch_loss)
# save model
torch.save(cnn.state_dict(), opj(p.model_path, 'CNN.pth'))
plt.plot(train_losses)
```
# train ffn
```
optimizer = torch.optim.Adam(ffn.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()
num_epochs = 50
train_losses = []
for epoch in range(num_epochs):
epoch_loss = 0.
for batch_idx, (data, y) in enumerate(train_loader):
data = data.to(device)
y = y.to(device)
# zero grad
optimizer.zero_grad()
output = ffn(data)
loss = criterion(output, y)
# backward
loss.backward()
# update step
optimizer.step()
iter_loss = loss.item()
epoch_loss += iter_loss
print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), iter_loss), end='')
mean_epoch_loss = epoch_loss / (batch_idx + 1)
train_losses.append(mean_epoch_loss)
# save model
torch.save(ffn.state_dict(), opj(p.model_path, 'FFN.pth'))
plt.plot(train_losses)
```
# model prediction
```
# check prediction
m = len(test_loader.dataset)
batch_size = test_loader.batch_size
y_pred_cnn = np.zeros(m)
y_pred_ffn = np.zeros(m)
y_true = np.zeros(m)
with torch.no_grad():
for batch_idx, (data, y) in tqdm(enumerate(test_loader, 0), total=int(np.ceil(m / batch_size))):
data = data.to(device)
# cnn prediction
outputs_cnn = cnn(data)
_, y_pred = torch.max(outputs_cnn.data, 1)
y_pred_cnn[batch_idx * batch_size:(batch_idx + 1) * batch_size] = y_pred.cpu().numpy()
# ffn prediction
outputs_ffn = ffn(data)
_, y_pred = torch.max(outputs_ffn.data, 1)
y_pred_ffn[batch_idx * batch_size:(batch_idx + 1) * batch_size] = y_pred.cpu().numpy()
# labels
y_true[batch_idx * batch_size:(batch_idx + 1) * batch_size] = y.numpy()
print("CNN accuracy {:.5f}% FFN accuracy {:.5f}%".format((y_true == y_pred_cnn).sum() / m * 100,
(y_true == y_pred_ffn).sum() / m * 100))
```
| github_jupyter |
# File I/O
So far we discussed how to process data, how to build, train and test deep learning models. However, at some point we are likely happy with what we obtained and we want to save the results for later use and distribution. Likewise, when running a long training process it is best practice to save intermediate results (checkpointing) to ensure that we don't lose several days worth of computation when tripping over the power cord of our server. At the same time, we might want to load a pretrained model (e.g. we might have word embeddings for English and use it for our fancy spam classifier). For all of these cases we need to load and store both individual weight vectors and entire models. This section addresses both issues.
## NDArray
In its simplest form, we can directly use the `save` and `load` functions to store and read NDArrays separately. This works just as expected.
```
from mxnet import nd
from mxnet.gluon import nn
x = nd.arange(4)
nd.save('x-file', x)
```
Then, we read the data from the stored file back into memory.
```
x2 = nd.load('x-file')
x2
```
We can also store a list of NDArrays and read them back into memory.
```
y = nd.zeros(4)
nd.save('x-files', [x, y])
x2, y2 = nd.load('x-files')
(x2, y2)
```
We can even write and read a dictionary that maps from a string to an NDArray. This is convenient, for instance when we want to read or write all the weights in a model.
```
mydict = {'x': x, 'y': y}
nd.save('mydict', mydict)
mydict2 = nd.load('mydict')
mydict2
```
## Gluon Model Parameters
Saving individual weight vectors (or other NDArray tensors) is useful but it
gets very tedious if we want to save (and later load) an entire model. After
all, we might have hundreds of parameter groups sprinkled throughout. Writing a
script that collects all the terms and matches them to an architecture is quite
some work. For this reason Gluon provides built-in functionality to load and
save entire networks rather than just single weight vectors. An important detail
to note is that this saves model *parameters* and not the entire model. I.e. if
we have a 3 layer MLP we need to specify the *architecture* separately. The
reason for this is that the models themselves can contain arbitrary code, hence
they cannot be serialized quite so easily (there is a way to do this for
compiled models - please refer to the [MXNet documentation](http://www.mxnet.io)
for the technical details on it). The result is that in order to reinstate a
model we need to generate the architecture in code and then load the parameters
from disk. The deferred initialization (:numref:`chapter_deferred_init`) is quite advantageous here since we can simply define a model without the need to put actual values in place. Let's start with our favorite MLP.
```
class MLP(nn.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
self.hidden = nn.Dense(256, activation='relu')
self.output = nn.Dense(10)
def forward(self, x):
return self.output(self.hidden(x))
net = MLP()
net.initialize()
x = nd.random.uniform(shape=(2, 20))
y = net(x)
```
Next, we store the parameters of the model as a file with the name 'mlp.params'.
```
net.save_parameters('mlp.params')
```
To check whether we are able to recover the model we instantiate a clone of the original MLP model. Unlike the random initialization of model parameters, here we read the parameters stored in the file directly.
```
clone = MLP()
clone.load_parameters('mlp.params')
```
Since both instances have the same model parameters, the computation result of the same input `x` should be the same. Let's verify this.
```
yclone = clone(x)
yclone == y
```
## Summary
* The `save` and `load` functions can be used to perform File I/O for NDArray objects.
* The `load_parameters` and `save_parameters` functions allow us to save entire sets of parameters for a network in Gluon.
* Saving the architecture has to be done in code rather than in parameters.
## Exercises
1. Even if there is no need to deploy trained models to a different device, what are the practical benefits of storing model parameters?
1. Assume that we want to reuse only parts of a network to be incorporated into a network of a *different* architecture. How would you go about using, say the first two layers from a previous network in a new network.
1. How would you go about saving network architecture and parameters? What restrictions would you impose on the architecture?
## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2329)

| github_jupyter |
# Twitter Konversationen zu einem Thema als Netzwerk untersuchen
- Aus Twitter-Daten kann man besonders gut Netzwerke basteln.
- Dabei kรถnnen wir frei definieren,wann eigentlich ein Nutzer mit einem anderen verbunden ist. Die gebrรคuchlichsten Definitionen sind:
1. Nutzer A retweetet Nutzer B (RT plotti was fรผr ein super tweet)
2. Nutzer A erwรคhnt Nutzer B (Ich geh das so die Straรe lang und seh @plotti)
3. Nutzer A schreibt Nutzer B (@plotti was geht heute)
4. (Nutzer A folgt Nutzer B (Leider um die Struktur einer Konversationen nicht sooo hilfreich. Auรerdem muss man รผber Twarc recht viele User sammeln um diese Information zu erhalten, es geht aber.))
# Daten Sammeln รผber Twarc
- https://github.com/DocNow/twarc
- Twarc: A command line tool (and Python library) for archiving Twitter JSON
- Sehr praktisch um Tweets zu einem Stichwort zu sammeln.
- Man muss eine Twiter app beantragen :(
- ```pip install twarc```
- ```twarc configure```

## Daten Sammeln
```twarc search zรผrich > zรผrich.json```
```
import sys
import json
import re
import numpy as np
from datetime import datetime
import pandas as pd
import networkx as nx
tweetfile = 'zรผrich.json'
```
# 1. Kanten erzeugen durch Retweets
- Personen retweeten sich und deswegen erzeugen wir eine Kante zwischen ihnen.
```
# 1. Export edges from Retweets
fh = open(tweetfile, 'r')
userdata = pd.DataFrame(columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count' ))
edges = pd.DataFrame(columns=('Source','Target','Time', "Strength"))
for line in fh:
try:
tweet = json.loads(line)
except:
continue
if 'retweeted_status' not in tweet:
continue
userdata = userdata.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['user']['screen_name'],
tweet['user']['created_at'],
tweet['user']['profile_image_url_https'],
tweet['user']['followers_count'],
tweet['user']['friends_count']]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
userdata = userdata.append(pd.DataFrame([[tweet['retweeted_status']['user']['id_str'],
tweet['retweeted_status']['user']['screen_name'],
tweet['retweeted_status']['user']['created_at'],
tweet['retweeted_status']['user']['profile_image_url_https'],
tweet['retweeted_status']['user']['followers_count'],
tweet['retweeted_status']['user']['friends_count']]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
edges = edges.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['retweeted_status']['user']['id_str'],
str(datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')),1]]
, columns=('Source','Target',"Time",'Strength')), ignore_index=True)
userdata.head()
edges.head()
```
# 2. Kanten erzeugen durch Mentions
- Personen erwรคhnen sich und deshalb erzeugen wir eine Kante zwischen den Personen.
```
fh = open(tweetfile, 'r')
userdata = pd.DataFrame(columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count' ))
edges = pd.DataFrame(columns=('Source','Target','Strength'))
for line in fh:
try:
tweet = json.loads(line)
except:
continue
if len(tweet['entities']['user_mentions']) == 0:
continue
for mention in tweet['entities']['user_mentions']:
userdata = userdata.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['user']['screen_name'],
tweet['user']['created_at'],
tweet['user']['profile_image_url_https'],
tweet['user']['followers_count'],
tweet['user']['friends_count']]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
if len(userdata[userdata['Id'].str.contains(mention['id_str'])]) == 0:
userdata = userdata.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['user']['screen_name'],
np.nan,
np.nan,
np.nan,
np.nan]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
edges = edges.append(pd.DataFrame([[tweet['user']['id_str'],
mention['id_str'],
str(datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y'))]]
, columns=('Source','Target','Strength')), ignore_index=True)
```
# 3. Kanten erzeugen durch gemeinsame Kommunikation
- Personen diskutieren miteinander und deshalb erzeugen wir eine Kante zwischen ihnen.
```
fh = open(tweetfile, 'r')
userdata = pd.DataFrame(columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count' ))
edges = pd.DataFrame(columns=('Source','Target','Strength'))
for line in fh:
try:
tweet = json.loads(line)
except:
continue
if tweet['in_reply_to_user_id_str'] is None:
continue
userdata = userdata.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['user']['screen_name'],
tweet['user']['created_at'],
tweet['user']['profile_image_url_https'],
tweet['user']['followers_count'],
tweet['user']['friends_count']]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
if len(userdata[userdata['Id'].str.contains(tweet['in_reply_to_user_id_str'])]) == 0:
userdata = userdata.append(pd.DataFrame([[tweet['in_reply_to_user_id_str'],
tweet['in_reply_to_screen_name'],
np.nan,
np.nan,
np.nan,
np.nan]], columns=('Id','Label','user_created_at','profile_image','followers_count','friends_count')), ignore_index=True)
edges = edges.append(pd.DataFrame([[tweet['user']['id_str'],
tweet['in_reply_to_user_id_str'],
str(datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y'))]]
, columns=('Source','Target','Strength')), ignore_index=True)
```
# Nur jene Kanten behalten die eine gewisse Stรคrke haben.
```
strengthLevel = 3 # Network connection strength level: the number of times in total each of the tweeters responded to or mentioned the other.
# If you have 1 as the level, then all tweeters who mentioned or replied to another at least once will be displayed. But if you have 5, only those who have mentioned or responded to a particular tweeter at least 5 times will be displayed, which means that only the strongest bonds are shown.
edges2 = edges.groupby(['Source','Target'])['Strength'].count()
edges2 = edges2.reset_index()
edges2 = edges2[edges2['Strength'] >= strengthLevel]
len(edges2)
```
# Daten als Gephi Netzwerk Exportieren
```
def robust_decode(bs):
'''Takes a byte string as param and convert it into a unicode one.
First tries UTF8, and fallback to Latin1 if it fails'''
cr = None
cr = bs.decode('ascii', 'ignore').encode('ascii')
return cr
import sys
reload(sys)
sys.setdefaultencoding('utf8')
userdata = userdata.sort_values(['Id','followers_count'], ascending=[True, False])
userdata = userdata.drop_duplicates(['Id'], keep='first')
ids = edges2['Source'].append(edges2['Target']).to_frame()
ids.columns = ['Id']
ids = ids.drop_duplicates()
nodes = pd.merge(ids, userdata, on='Id', how='left')
nodes = nodes.dropna()
nodes["Label"] = nodes["Label"].astype(str)
nodes["Id"] = nodes["Id"].astype(str)
G = nx.DiGraph(name="zรผrich")
for i, row in nodes.iterrows():
G.add_node(robust_decode(row["Id"]), label=robust_decode(row["Label"]))
for i, row in edges2.iterrows():
G.add_edge(robust_decode(row["Source"]),robust_decode(row["Target"]),weight=row["Strength"])
nx.write_gexf(G,"Zรผrich.gexf")
```
# Alternativ als csv speichern fรผr Kumu.io
```
# Export nodes from the edges and add node attributes for both Sources and Targets.
userdata = userdata.sort_values(['Id','followers_count'], ascending=[True, False])
userdata = userdata.drop_duplicates(['Id'], keep='first')
ids = edges2['Source'].append(edges2['Target']).to_frame()
ids.columns = ['Id']
ids = ids.drop_duplicates()
nodes = pd.merge(ids, userdata, on='Id', how='left')
# change column names for Kumu import (Run this when using Kumu)
nodes.columns = ['Id', 'Label', 'Date', 'Image', 'followers_count', 'friends_count']
edges2.columns = ['From','To','Strength']
# Export nodes and edges to csv files
nodes.to_csv('nodes.csv', encoding='utf-8', index=False)
edges2.to_csv('edges.csv', encoding='utf-8', index=False)
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt # for plotting
import numpy as np # for matrix and vector computations
import pandas as pd
import seaborn as sns
```
### Debugging
* Python array indices start from zero
* Vector/matrix operations work only with numpy arrays.Inspect matrix operations to make sure that you are adding and multiplying matrices of compatible dimensions. Printing the dimensions of numpy arrays using the shape property will help you debug.
* If you want to do matrix multiplication, you need to use the dot function in numpy. For, example if A and B are two numpy matrices, then the matrix operation AB is np.dot(A, B)
## Return a 5x5 Identity Matrix
```
A = np.eye(5) # using eye()
A
```
Implement linear regression with one variable to predict profits for a food truck. Suppose you are the CEO of a restaurant franchise and are considering different cities for opening a new outlet. The chain already has trucks in various cities and you have data for profits and populations from the cities. You would like to use this data to help you select which city to expand to next.
The file Data/ex1data1.txt contains the dataset for our linear regression problem. The first column is the population of a city (in 10,000s) and the second column is the profit of a food truck in that city (in $10,000s). A negative value for profit indicates a loss.
## 1) Load the dataset
```
# Load the dataset
data = np.loadtxt('ex1data1.txt',delimiter=',')
X = data[:,0]
y = data[:,1]
# X and y are matrices
m = y.size # number of training samples
m
X.shape, y.shape, X.ndim, y.ndim
```
## 2) Plotting the Data
Before starting on any task, it is often useful to understand the data by visualizing it. For this dataset has only two properties to plot (profit and population).
```
"""
Plots the data points x and y into a new figure. Plots the data
points and gives the figure axes labels of population and profit.
Parameters
----------
x : array_like
Data point values for x-axis.
y : array_like
Data point values for y-axis. Note x and y should have the same size.
----
You can use the 'ro' option with plot to have the markers
appear as red circles. Furthermore, you can make the markers larger by
using plot(..., 'ro', ms=10), where `ms` refers to marker size. You
can also set the marker edge color using the `mec` property.
"""
def plotData(x,y):
fig = plt.figure(figsize=(8,6))
plt.plot(x,y,'ro',ms=10,mec='k')
plt.xlabel('Profit in $10,000')
plt.ylabel('Population of a city in 10,000')
plotData(X,y)
```
## 3) Gradient Descent
Fit the linear regression parameters $\theta$ to the dataset using gradient descent.
<a id="section2"></a>
### 3.1 Update Equations
The objective of linear regression is to minimize the cost function $J(\theta)$
$$ J(\theta) = \frac{1}{2m} \sum_{i=1}^m \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)^2$$
where the hypothesis $h_\theta(x)$ is given by the linear model
$$ h_\theta(x) = \theta^Tx = \theta_0 + \theta_1 x_1$$
Recall that the parameters of your model are the $\theta_j$ values. These are
the values you will adjust to minimize cost $J(\theta)$. One way to do this is to
use the **batch gradient descent algorithm**. In batch gradient descent, each
iteration performs the update
$$ \theta_j = \theta_j - \alpha \frac{1}{m} \sum_{i=1}^m \left( h_\theta(x^{(i)}) - y^{(i)}\right)x_j^{(i)} \qquad \text{simultaneously update } \theta_j \text{ for all } j$$
With each step of gradient descent, your parameters $\theta_j$ come closer to the optimal values that will achieve the lowest cost J($\theta$).
<div class="alert alert-block alert-warning">
**Implementation Note:** We store each sample as a row in the the $X$ matrix in Python `numpy`. To take into account the intercept term ($\theta_0$), we add an additional first column to $X$ and set it to all ones. This allows us to treat $\theta_0$ as simply another 'feature'.
</div>
```
# initially X contains features x1,x2. Add x0 = 1, so X will now contain the features x0,x1,x2
#### Add a column of ones to X. The numpy function stack() joins arrays along a given axis.
# The first axis (axis=0) refers to rows (training samples), and second axis (axis=1) refers to columns (features).
X = np.stack([np.ones(m),X],axis=1) # This cell is executed only once!
```
<a id="section2"></a>
### 3.2 Computing the cost $J(\theta)$
As you perform gradient descent to minimize the cost function $J(\theta)$, it is helpful to monitor the convergence by computing the cost. Implement a function to calculate $J(\theta)$ so you can check the convergence of your gradient descent implementation.
Remember that the variables $X$ and $y$ are not scalar values. $X$ is a matrix whose rows represent the samples from the training set (feature) and $y$ (label) is a vector whose each element represent the value at a given row of $X$.
<a id="computeCost"></a>
```
"""
Compute cost for linear regression. Computes the cost of using theta as the
parameter for linear regression to fit the data points in X and y.
Parameters
----------
X : array_like
The input dataset of shape (m x n+1) dimesnions, where m is the number of samples,
and n is the number of features. We assume a vector of one's already
appended to the features so we have n+1 columns.
y : array_like
The values of the function at each data point. This is a vector of
shape (m, ) i.e. (mx1) dimensions
theta : array_like
The parameters for the hypothesis/regression function. This is a vector of
shape (n+1, ) i.e. (n+1)x1 dimensions.
Returns
-------
J : float - The value of the regression cost function.
"""
def computeCost(X,y,theta):
m = y.size # no. of training samples
J = 0
h = np.dot(X,theta) # X and theta are matrices
J = (1/(2 * m)) * np.sum(np.square(np.dot(X, theta) - y))
return J
# take random values of theta0 and theta1
J = computeCost(X,y ,theta=np.array([0.0,0.0])) # two values for theta0 and theta1
print(f"With theta = [0, 0] \nCost computed = {J:.2f}")
print()
J = computeCost(X,y ,theta=np.array([-1,2]))
print(f"With theta = [-1, 2] \nCost computed = {J:.2f}")
```
<a id="section3"></a>
### 3.3 Gradient descent
Complete a function which Implements gradient descent. Update $\theta$ with each iteration of the loop.
As you program, make sure you understand what you are trying to optimize and what is being updated. Keep in mind that the cost $J(\theta)$ is parameterized by the vector $\theta$, not $X$ and $y$. That is, we minimize the value of $J(\theta)$ by changing the values of the vector $\theta$, not by changing $X$ or $y$.
A good way to verify that gradient descent is working correctly is to look at the value of $J(\theta)$ and check that it is decreasing with each step.
```
"""
Performs gradient descent to learn `theta`. Updates theta by taking `num_iters`
gradient steps with learning rate `alpha`.
Parameters
----------
X : array_like
The input dataset of shape (m x n+1).
y : array_like
Value at given features. A vector of shape (m, ), i.e. (mx1) dimensions
theta : array_like
Initial values for the linear regression parameters.
A vector of shape (n+1, ), i.e. (n+1)x1 dimensions
alpha : float
The learning rate.
num_iters : int
The number of iterations for gradient descent.
Returns
-------
theta : array_like
The learned linear regression parameters. A vector of shape (n+1, ). This is the optimal theta
for which J is minimum
J_history : list
A python list for the values of the cost function after each iteration.
Instructions
------------
Peform a single gradient step on the parameter vector theta.
While debugging, it can be useful to print out the values of
the cost function (computeCost) and gradient here.
"""
def gradient_descent(X,y,theta,alpha,num_iters):
m = y.size # or y.shape[0] # number of training samples
# make a copy of theta, to avoid changing the original array, since numpy arrays are passed by reference to functions
theta = theta.copy()
J_history = [] # Use a python list to store cost in every iteration
for i in range(num_iters):
theta = theta - (alpha/m) * (np.dot(X,theta) - y).dot(X)
# print(theta)
# save the cost J in every iteration
min_cost = computeCost(X,y,theta)
J_history.append(min_cost)
# print(J_history[i])
return theta, J_history # theta will return 2 values --> theta0, theta1
# randomly initialize fitting parameters
theta = np.zeros(2)
# some gradient descent settings
iterations = 1500
alpha = 0.01
theta, J_history = gradient_descent(X,y,theta,alpha,iterations)
print('Theta found by gradient descent: {:.4f}, {:.4f}'.format(*theta)) # adds theta to empty string
```
## 4) Plot the linear fit
```
plotData(X[:,1],y) # plot the samples - excluding x1=0 (0th column)
# Linear regression line/hypothesis line of best fit --> y = h(x) = theta0 + theta1*X
# x is feature except x1=0,y is entire equation
plt.plot(X[:,1],np.dot(X,theta),ls='-')
plt.legend(['Training Data','Linear Regression']); # x is training data, y is linear regression line
```
## 5) Predict some values
```
# we now have the optimal theta
# Predict values for population sizes of 35,000 and 70,000
# Note that the first argument to the `numpy` function `dot` is a python list.
# `numpy` can internally convert **valid** python lists to numpy arrays when explicitly provided as arguments to `numpy` functions.
# profit x in 10,000 and population y in 10,000, so 3.5 --> 350000, 1 -> 10000
predict1 = np.dot([1,3.5],theta)
print(f"For population = 35,000, we predict a profit of {predict1 * 10000:.2f}")
predict2 = np.dot([1,7],theta)
print(f"For population = 35,000, we predict a profit of {predict2 * 10000:.2f}")
```
| github_jupyter |
# ๊ณผ์
๋ฐ์ดํฐ์
์ ์ข ๋ ์ฝ๊ฒ ๋ค๋ฃฐ ์ ์๋๋ก ์ ์ฉํ ๋๊ตฌ๋ก์ torch.utils.data.Dataset๊ณผ torch.utils.data.DataLoader๋ฅผ ์ ๊ณต ํ๋ค.
์ด๋ฅผ ์ฌ์ฉํ๋ฉด ๋ฏธ๋ ๋ฐฐ์น ํ์ต, ๋ฐ์ดํฐ ์
ํ(shuffle), ๋ณ๋ ฌ ์ฒ๋ฆฌ๊น์ง ๊ฐ๋จํ ์ํํ ์ ์๋ค. ๊ธฐ๋ณธ์ ์ธ ์ฌ์ฉ ๋ฐฉ๋ฒ์ Dataset์ ์ ์ํ๊ณ , ์ด๋ฅผ DataLoader์ ์ ๋ฌํ๋ ๊ฒ์ด๋ค
# ์ปค์คํ
๋ฐ์ดํฐ์
(Custom Dataset)
torch.utils.data.Dataset์ ์์๋ฐ์ ์ง์ ์ปค์คํ
๋ฐ์ดํฐ์
(Custom Dataset)์ ๋ง๋ค ์ ์๋ค.
Dataset์ ์์๋ฐ์ ๋ค์ ๋ฉ์๋๋ค์ ์ค๋ฒ๋ผ์ด๋ ํ์ฌ ์ปค์คํ
๋ฐ์ดํฐ์
์ ์์ฑํด ๋ณด์.
์ปค์คํ
๋ฐ์ดํฐ์
์ ๋ง๋ค ๋, ์ผ๋จ ๊ฐ์ฅ ๊ธฐ๋ณธ์ ์ธ ๋ผ๋๋ ์๋์ ๊ฐ๋ค.
```
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
def __len__(self):
def __getitem__(self, idx):
```
```
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
```
### Diabetes dataset
Diabetes dataset์ ์ด 442๋ช
์ ๋น๋จ๋ณ ํ์์ ๋ํ ์๋ฃ์ด๋ค.
age, sex, body mass index, average blood pressure, 6๊ฐ์ ํ์ฒญ๊ฐ์ผ๋ก ์ด๋ฃจ์ด์ ธ ์๋ค.
442๋ช
์ ๋น๋จ๋ณ ํ์๋ฅผ ๋์์ผ๋กํ ๊ฒ์ฌ ๊ฒฐ๊ณผ๋ฅผ ๋ํ๋ด๋ ๋ฐ์ดํฐ์ด๋ค.
- ํ๊ฒ ๋ฐ์ดํฐ : 1๋
๋ค ์ธก์ ํ ๋น๋จ๋ณ์ ์งํ๋ฅ
- ํน์ง ๋ฐ์ดํฐ (์ด ๋ฐ์ดํฐ์
์ ํน์ง ๋ฐ์ดํฐ๋ ๋ชจ๋ ์ ๊ทํ๋ ๊ฐ์ด๋ค.)
- Age
- Sex
- Body mass index
- Average blood pressure
- S1
- S2
- S3
- S4
- S5
- S6
```
df = pd.read_csv('diabetes.csv')
df.info()
```
### ๋ฌธ์ 1
๋ค์์ ์ฐธ์กฐํ์ฌ `diabetes.csv` ํ์ผ์ ์ฝ๊ณ custom dataset์ผ๋ก ์์ฑํด ๋ณด์์ค.
* len(dataset)์ ํ์ ๋ ๋ฐ์ดํฐ์
์ ํฌ๊ธฐ๋ฅผ ๋ฆฌํดํ len
* dataset[i]์ ํ์ ๋ i๋ฒ์งธ ์ํ์ ๊ฐ์ ธ์ค๋๋ก ํ๋ ์ธ๋ฑ์ฑ์ ์ํ get_item
# TODO : ๋ค์์ ์ฝ๋๋ฅผ ์์ฑํ์์ค.
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# ์ฌ๊ธฐ์ ์ฝ๋๋ฅผ ์์ฑํ์์ค.
def __len__(self):
# ์ฌ๊ธฐ์ ์ฝ๋๋ฅผ ์์ฑํ์์ค.
def __getitem__(self, idx):
# ์ฌ๊ธฐ์ ์ฝ๋๋ฅผ ์์ฑํ์์ค.
```
class CustomDataset(Dataset):
def __init__(self):
data = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32, skiprows=1)
self.x_data = torch.from_numpy(data[:, :-1]) # age,sex,bmi,bp,s1,s2,s3,s4,s5,s6
self.y_data = torch.from_numpy(data[:, -1]).view(-1,1) # target
def __getitem__(self, idx):
x = self.x_data[idx]
y = self.y_data[idx]
return x, y
def __len__(self):
return len(self.x_data)
```
### ๋ฌธ์ 2
dataset์ ์์ฑํ๊ณ `__getitem__()` ์์ฑ์ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ๋ฅผ ์กฐํํด ๋ณด์์ค.
```
dataset = CustomDataset()
dataset.__getitem__([0])
```
### ๋ฌธ์ 3
์ฝ์ด๋ค์ธ ๋ฐ์ด์
์ ์ ๊ฒฝ๋ง ๋ชจํ์ ์
๋ ฅ์ ์ฌ์ฉํ ์ ์๋๋ก `DataLoader` ๋ฅผ ์ฌ์ฉํ์์ค.
์ ์ ํ batch_size๋ฅผ ์ค์ ํ์์ค.
```
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset, batch_size=10, shuffle=True)
x, y = iter(dataloader).next()
print(x.shape, y.shape)
```
### ๋ฌธ์ 4
diabetes๋ฅผ ์์ธกํ๋ ์ ๊ฒฝ๋ง ๋ชจํ์ ์์ฑํ์์ค.
```
import torch
from torch import nn
model = nn.Sequential(nn.Linear(10, 1))
print(model)
```
### ๋ฌธ์ 5
loss ํจ์์ optimizer๋ฅผ ์ค์ ํ์์ค. learning_rate๋ฅผ ์ ์ ํ ์ ํํ์์ค.
```
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
```
### ๋ฌธ์ 6
๋ชจํ์ ํ๋ จํ์์ค. epoch์ ํ์๋ฅผ ์ ์ ํ ์ ํํ์์ค.
```
epochs = 1000
for epoch in range(epochs):
running_loss = 0
for data, target in dataloader:
optimizer.zero_grad()
pred = model(data)
loss = criterion(pred, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
if epoch % 100 == 0 :
print(f"Training loss: {running_loss/len(dataloader)}")
```
### ๋ฌธ์ 7
```
new_var = torch.FloatTensor([[ 0.0381, 0.0507, 0.0617, 0.0219, -0.0442, -0.0348,
-0.0434, -0.0026, 0.0199, -0.0176]])
```
์๋ก์ด ๋ฐ์ดํฐ๋ก diabets๋ฅผ ์์ธกํด ๋ณด์์ค.
```
new_var = torch.FloatTensor([[ 0.0381, 0.0507, 0.0617, 0.0219, -0.0442, -0.0348,
-0.0434, -0.0026, 0.0199, -0.0176]])
with torch.no_grad():
y_hat = model(new_var)
print(y_hat)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.