code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
<a href="https://colab.research.google.com/github/Imotep460/FastAIBlog/blob/master/_notebooks/2021-02-16-Kapitel-2%3A-hotdogs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
from fastbook import *
from fastai.vision.widgets import *
configId = ""
subscriptionKey = ""
def search_images_bing_new(key, term, customConfigId, min_sz=128):
url = 'https://api.bing.microsoft.com/v7.0/custom/images/search?' + 'q=' + term + '&' + 'customconfig=' + customConfigId + '&' + 'count=150'
r = requests.get(url, headers={'Ocp-Apim-Subscription-Key': key})
search_results = r.json()
return L([img["thumbnailUrl"] + ".jpg" for img in search_results["value"][:150]])
hotdogImages = search_images_bing_new(subscriptionKey, "hotdog", configId)
firstHotdogImage = hotdogImages[0]
hotdogDest = "hotdog.jpg"
download_url(firstHotdogImage, hotdogDest)
hotdogImg = Image.open(hotdogDest)
hotdogImg.to_thumb(128,128)
imagesPath = Path('images')
if not imagesPath.exists:
imagesPath.mkdir()
hotdogPath = Path(str(imagesPath) + '/hotdogs')
if not hotdogPath.exists():
hotdogPath.mkdir()
download_images(hotdogPath, urls = hotdogImages)
hotdogImageFiles = get_image_files(hotdogPath)
failedHotdogs = verify_images(hotdogImageFiles)
failedHotdogs
foodImages = search_images_bing_new(subscriptionKey, "food -hotdog", configId)
foodPath = Path(str(imagesPath) + '/food')
if not foodPath.exists():
foodPath.exists()
download_images(foodPath, urls=foodImages)
foodImageFiles = get_image_files(foodPath)
failedFood = verify_images(foodImageFiles)
failedFood
foods = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(128))
foods = foods.new(
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms())
dls = foods.dataloaders(imagesPath)
dls.valid.show_batch(max_n=12, nrows=1)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(8)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
learn.export()
Image.open("hotdog.jpg").to_thumb(128,128)
pred,pred_idx,probs = learn.predict("hotdog.jpg")
f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
Image.open("burger.jpg").to_thumb(128,128)
pred,pred_idx,probs = learn.predict("burger.jpg")
f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
Image.open("dog.jpg").to_thumb(128,128)
pred,pred_idx,probs = learn.predict("dog.jpg")
f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
Image.open("hotdogdog.jpg").to_thumb(128,128)
pred,pred_idx,probs = learn.predict("hotdogdog.jpg")
f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
```
| github_jupyter |
A neural network consist of cnn layer (Kim,2014) and 4 fully connected layers.
Source: https://github.com/jojonki/cnn-for-sentence-classification
```
from google.colab import drive
drive.mount('/content/drive')
import os
os.chdir('/content/drive/MyDrive/sharif/DeepLearning/ipython(guide)')
import numpy as np
import codecs
import os
import random
import pandas
from keras import backend as K
from keras.models import Model
from keras.layers.embeddings import Embedding
from keras.layers import Input, Dense, Lambda, Permute, Dropout
from keras.layers import Conv2D, MaxPooling1D
from keras.optimizers import SGD
import ast
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import gensim
from keras.models import load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
limit_number = 750
data = pandas.read_csv('../Data/limited_to_'+str(limit_number)+'.csv',index_col=0,converters={'body': eval})
data = data.dropna().reset_index(drop=True)
X = data["body"].values.tolist()
y = pandas.read_csv('../Data/limited_to_'+str(limit_number)+'.csv')
labels = []
tag=[]
for item in y['tag']:
labels += [i for i in re.sub('\"|\[|\]|\'| |=','',item.lower()).split(",") if i!='' and i!=' ']
tag.append([i for i in re.sub('\"|\[|\]|\'| |=','',item.lower()).split(",") if i!='' and i!=' '])
labels = list(set(labels))
mlb = MultiLabelBinarizer()
Y=mlb.fit_transform(tag)
len(labels)
sentence_maxlen = max(map(len, (d for d in X)))
print('sentence maxlen', sentence_maxlen)
freq_dist = pandas.read_csv('../Data/FreqDist_sorted.csv',index_col=False)
vocab=[]
for item in freq_dist["word"]:
try:
word=re.sub(r"[\u200c-\u200f]","",item.replace(" ",""))
vocab.append(word)
except:
pass
print(vocab[10])
vocab = sorted(vocab)
vocab_size = len(vocab)
print('vocab size', len(vocab))
w2i = {w:i for i,w in enumerate(vocab)}
# i2w = {i:w for i,w in enumerate(vocab)}
print(w2i["زبان"])
def vectorize(data, sentence_maxlen, w2i):
vec_data = []
for d in data:
vec = [w2i[w] for w in d if w in w2i]
pad_len = max(0, sentence_maxlen - len(vec))
vec += [0] * pad_len
vec_data.append(vec)
# print(d)
vec_data = np.array(vec_data)
return vec_data
vecX = vectorize(X, sentence_maxlen, w2i)
vecY=Y
X_train, X_test, y_train, y_test = train_test_split(vecX, vecY, test_size=0.2)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25)
print('train: ', X_train.shape , '\ntest: ', X_test.shape , '\nval: ', X_val.shape ,"\ny_tain:",y_train.shape )
# print(vecX[0])
embd_dim = 300
```
# ***If the word2vec model is not generated before, we should run the next block.***
```
# embed_model = gensim.models.Word2Vec(X, size=embd_dim, window=5, min_count=5)
# embed_model.save('word2vec_model')
```
# ***Otherwise, we can run the next block.***
```
embed_model=gensim.models.Word2Vec.load('word2vec_model')
word2vec_embd_w = np.zeros((vocab_size, embd_dim))
for word, i in w2i.items():
if word in embed_model.wv.vocab:
embedding_vector =embed_model[word]
# words not found in embedding index will be all-zeros.
word2vec_embd_w[i] = embedding_vector
from keras.layers import LSTM
def Net(vocab_size, embd_size, sentence_maxlen, glove_embd_w):
sentence = Input((sentence_maxlen,), name='SentenceInput')
# embedding
embd_layer = Embedding(input_dim=vocab_size,
output_dim=embd_size,
weights=[word2vec_embd_w],
trainable=False,
name='shared_embd')
embd_sentence = embd_layer(sentence)
embd_sentence = Permute((2,1))(embd_sentence)
embd_sentence = Lambda(lambda x: K.expand_dims(x, -1))(embd_sentence)
# cnn
cnn = Conv2D(1,
kernel_size=(5, sentence_maxlen),
activation='relu')(embd_sentence)
cnn = Lambda(lambda x: K.sum(x, axis=3))(cnn)
cnn = MaxPooling1D(3)(cnn)
cnn = Lambda(lambda x: K.sum(x, axis=2))(cnn)
hidden1=Dense(400,activation="relu")(cnn)
hidden2=Dense(300,activation="relu")(hidden1)
hidden3=Dense(200,activation="relu")(hidden2)
hidden4=Dense(150,activation="relu")(hidden3)
out = Dense(len(labels), activation='sigmoid')(hidden4)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model = Model(inputs=sentence, outputs=out, name='sentence_claccification')
model.compile(optimizer=sgd, loss='binary_crossentropy',metrics=["accuracy", "binary_accuracy",
"categorical_accuracy",])
return model
model = Net(vocab_size, embd_dim, sentence_maxlen,word2vec_embd_w)
print(model.summary())
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5) # Model stop training after 5 epoch where validation loss didnt decrease
mc = ModelCheckpoint('best_cnn_4fc.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True) #You save model weight at the epoch where validation loss is minimal
model.fit(X_train, y_train, batch_size=32,epochs=250,verbose=1,validation_data=(X_val, y_val),callbacks=[es,mc])#you can run for 1000 epoch btw model will stop after 5 epoch without better validation loss
```
# ***If the model is generated before:***
```
model = load_model('best_cnn_4fc_with_binary.h5')
# model.save('best_cnn_4fc_with_binary.h5')
pred=model.predict(X_test)
# For evaluation: If the probability > 0.5 you can say that it belong to the class.
print(pred[0])#example
y_pred=[]
measure = 9 * (np.mean(pred[0]) + 0.5*np.sqrt(np.var(pred[0])))
for l in pred:
temp=[]
for value in l:
if value>= measure:
temp.append(1)
else:
temp.append(0)
y_pred.append(temp)
measure
from sklearn.metrics import classification_report,accuracy_score
print("accuracy=",accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Ragged Tensors
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/beta/guide/ragged_tensors"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Setup
```
from __future__ import absolute_import, division, print_function, unicode_literals
import math
try:
%tensorflow_version 2.x # Colab only.
except Exception:
pass
import tensorflow as tf
```
## Overview
Your data comes in many shapes; your tensors should too.
*Ragged tensors* are the TensorFlow equivalent of nested variable-length
lists. They make it easy to store and process data with non-uniform shapes,
including:
* Variable-length features, such as the set of actors in a movie.
* Batches of variable-length sequential inputs, such as sentences or video
clips.
* Hierarchical inputs, such as text documents that are subdivided into
sections, paragraphs, sentences, and words.
* Individual fields in structured inputs, such as protocol buffers.
### What you can do with a ragged tensor
Ragged tensors are supported by more than a hundred TensorFlow operations,
including math operations (such as `tf.add` and `tf.reduce_mean`), array operations
(such as `tf.concat` and `tf.tile`), string manipulation ops (such as
`tf.substr`), and many others:
```
digits = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
words = tf.ragged.constant([["So", "long"], ["thanks", "for", "all", "the", "fish"]])
print(tf.add(digits, 3))
print(tf.reduce_mean(digits, axis=1))
print(tf.concat([digits, [[5, 3]]], axis=0))
print(tf.tile(digits, [1, 2]))
print(tf.strings.substr(words, 0, 2))
```
There are also a number of methods and operations that are
specific to ragged tensors, including factory methods, conversion methods,
and value-mapping operations.
For a list of supported ops, see the `tf.ragged` package
documentation.
As with normal tensors, you can use Python-style indexing to access specific
slices of a ragged tensor. For more information, see the section on
**Indexing** below.
```
print(digits[0]) # First row
print(digits[:, :2]) # First two values in each row.
print(digits[:, -2:]) # Last two values in each row.
```
And just like normal tensors, you can use Python arithmetic and comparison
operators to perform elementwise operations. For more information, see the section on
**Overloaded Operators** below.
```
print(digits + 3)
print(digits + tf.ragged.constant([[1, 2, 3, 4], [], [5, 6, 7], [8], []]))
```
If you need to perform an elementwise transformation to the values of a `RaggedTensor`, you can use `tf.ragged.map_flat_values`, which takes a function plus one or more arguments, and applies the function to transform the `RaggedTensor`'s values.
```
times_two_plus_one = lambda x: x * 2 + 1
print(tf.ragged.map_flat_values(times_two_plus_one, digits))
```
### Constructing a ragged tensor
The simplest way to construct a ragged tensor is using
`tf.ragged.constant`, which builds the
`RaggedTensor` corresponding to a given nested Python `list`:
```
sentences = tf.ragged.constant([
["Let's", "build", "some", "ragged", "tensors", "!"],
["We", "can", "use", "tf.ragged.constant", "."]])
print(sentences)
paragraphs = tf.ragged.constant([
[['I', 'have', 'a', 'cat'], ['His', 'name', 'is', 'Mat']],
[['Do', 'you', 'want', 'to', 'come', 'visit'], ["I'm", 'free', 'tomorrow']],
])
print(paragraphs)
```
Ragged tensors can also be constructed by pairing flat *values* tensors with
*row-partitioning* tensors indicating how those values should be divided into
rows, using factory classmethods such as `tf.RaggedTensor.from_value_rowids`,
`tf.RaggedTensor.from_row_lengths`, and
`tf.RaggedTensor.from_row_splits`.
#### `tf.RaggedTensor.from_value_rowids`
If you know which row each value belongs in, then you can build a `RaggedTensor` using a `value_rowids` row-partitioning tensor:

```
print(tf.RaggedTensor.from_value_rowids(
values=[3, 1, 4, 1, 5, 9, 2, 6],
value_rowids=[0, 0, 0, 0, 2, 2, 2, 3]))
```
#### `tf.RaggedTensor.from_row_lengths`
If you know how long each row is, then you can use a `row_lengths` row-partitioning tensor:

```
print(tf.RaggedTensor.from_row_lengths(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_lengths=[4, 0, 3, 1]))
```
#### `tf.RaggedTensor.from_row_splits`
If you know the index where each row starts and ends, then you can use a `row_splits` row-partitioning tensor:

```
print(tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_splits=[0, 4, 4, 7, 8]))
```
See the `tf.RaggedTensor` class documentation for a full list of factory methods.
### What you can store in a ragged tensor
As with normal `Tensor`s, the values in a `RaggedTensor` must all have the same
type; and the values must all be at the same nesting depth (the *rank* of the
tensor):
```
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]])) # ok: type=string, rank=2
print(tf.ragged.constant([[[1, 2], [3]], [[4, 5]]])) # ok: type=int32, rank=3
try:
tf.ragged.constant([["one", "two"], [3, 4]]) # bad: multiple types
except ValueError as exception:
print(exception)
try:
tf.ragged.constant(["A", ["B", "C"]]) # bad: multiple nesting depths
except ValueError as exception:
print(exception)
```
### Example use case
The following example demonstrates how `RaggedTensor`s can be used to construct
and combine unigram and bigram embeddings for a batch of variable-length
queries, using special markers for the beginning and end of each sentence.
For more details on the ops used in this example, see the `tf.ragged` package documentation.
```
queries = tf.ragged.constant([['Who', 'is', 'Dan', 'Smith'],
['Pause'],
['Will', 'it', 'rain', 'later', 'today']])
# Create an embedding table.
num_buckets = 1024
embedding_size = 4
embedding_table = tf.Variable(
tf.random.truncated_normal([num_buckets, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
# Look up the embedding for each word.
word_buckets = tf.strings.to_hash_bucket_fast(queries, num_buckets)
word_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, word_buckets) # ①
# Add markers to the beginning and end of each sentence.
marker = tf.fill([queries.nrows(), 1], '#')
padded = tf.concat([marker, queries, marker], axis=1) # ②
# Build word bigrams & look up embeddings.
bigrams = tf.strings.join([padded[:, :-1],
padded[:, 1:]],
separator='+') # ③
bigram_buckets = tf.strings.to_hash_bucket_fast(bigrams, num_buckets)
bigram_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, bigram_buckets) # ④
# Find the average embedding for each sentence
all_embeddings = tf.concat([word_embeddings, bigram_embeddings], axis=1) # ⑤
avg_embedding = tf.reduce_mean(all_embeddings, axis=1) # ⑥
print(avg_embedding)
```

## Ragged tensors: definitions
### Ragged and uniform dimensions
A *ragged tensor* is a tensor with one or more *ragged dimensions*,
which are dimensions whose slices may have different lengths. For example, the
inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is
ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different
lengths. Dimensions whose slices all have the same length are called *uniform
dimensions*.
The outermost dimension of a ragged tensor is always uniform, since it consists
of a single slice (and so there is no possibility for differing slice lengths).
In addition to the uniform outermost dimension, ragged tensors may also have
uniform inner dimensions. For example, we might store the word embeddings for
each word in a batch of sentences using a ragged tensor with shape
`[num_sentences, (num_words), embedding_size]`, where the parentheses around
`(num_words)` indicate that the dimension is ragged.

Ragged tensors may have multiple ragged dimensions. For example, we could store
a batch of structured text documents using a tensor with shape `[num_documents,
(num_paragraphs), (num_sentences), (num_words)]` (where again parentheses are
used to indicate ragged dimensions).
#### Ragged tensor shape restrictions
The shape of a ragged tensor is currently restricted to have the following form:
* A single uniform dimension
* Followed by one or more ragged dimensions
* Followed by zero or more uniform dimensions.
Note: These restrictions are a consequence of the current implementation, and we
may relax them in the future.
### Rank and ragged rank
The total number of dimensions in a ragged tensor is called its ***rank***, and
the number of ragged dimensions in a ragged tensor is called its ***ragged
rank***. In graph execution mode (i.e., non-eager mode), a tensor's ragged rank
is fixed at creation time: it can't depend
on runtime values, and can't vary dynamically for different session runs.
A ***potentially ragged tensor*** is a value that might be
either a `tf.Tensor` or a `tf.RaggedTensor`. The
ragged rank of a `tf.Tensor` is defined to be zero.
### RaggedTensor shapes
When describing the shape of a RaggedTensor, ragged dimensions are indicated by
enclosing them in parentheses. For example, as we saw above, the shape of a 3-D
RaggedTensor that stores word embeddings for each word in a batch of sentences
can be written as `[num_sentences, (num_words), embedding_size]`.
The `RaggedTensor.shape` attribute returns a `tf.TensorShape` for a
ragged tensor, where ragged dimensions have size `None`:
```
tf.ragged.constant([["Hi"], ["How", "are", "you"]]).shape
```
The method `tf.RaggedTensor.bounding_shape` can be used to find a tight
bounding shape for a given `RaggedTensor`:
```
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]]).bounding_shape())
```
## Ragged vs sparse tensors
A ragged tensor should *not* be thought of as a type of sparse tensor, but
rather as a dense tensor with an irregular shape.
As an illustrative example, consider how array operations such as `concat`,
`stack`, and `tile` are defined for ragged vs. sparse tensors. Concatenating
ragged tensors joins each row to form a single row with the combined length:

```
ragged_x = tf.ragged.constant([["John"], ["a", "big", "dog"], ["my", "cat"]])
ragged_y = tf.ragged.constant([["fell", "asleep"], ["barked"], ["is", "fuzzy"]])
print(tf.concat([ragged_x, ragged_y], axis=1))
```
But concatenating sparse tensors is equivalent to concatenating the corresponding dense tensors,
as illustrated by the following example (where Ø indicates missing values):

```
sparse_x = ragged_x.to_sparse()
sparse_y = ragged_y.to_sparse()
sparse_result = tf.sparse.concat(sp_inputs=[sparse_x, sparse_y], axis=1)
print(tf.sparse.to_dense(sparse_result, ''))
```
For another example of why this distinction is important, consider the
definition of “the mean value of each row” for an op such as `tf.reduce_mean`.
For a ragged tensor, the mean value for a row is the sum of the
row’s values divided by the row’s width.
But for a sparse tensor, the mean value for a row is the sum of the
row’s values divided by the sparse tensor’s overall width (which is
greater than or equal to the width of the longest row).
## Overloaded operators
The `RaggedTensor` class overloads the standard Python arithmetic and comparison
operators, making it easy to perform basic elementwise math:
```
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
y = tf.ragged.constant([[1, 1], [2], [3, 3, 3]])
print(x + y)
```
Since the overloaded operators perform elementwise computations, the inputs to
all binary operations must have the same shape, or be broadcastable to the same
shape. In the simplest broadcasting case, a single scalar is combined
elementwise with each value in a ragged tensor:
```
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
print(x + 3)
```
For a discussion of more advanced cases, see the section on
**Broadcasting**.
Ragged tensors overload the same set of operators as normal `Tensor`s: the unary
operators `-`, `~`, and `abs()`; and the binary operators `+`, `-`, `*`, `/`,
`//`, `%`, `**`, `&`, `|`, `^`, `<`, `<=`, `>`, and `>=`. Note that, as with
standard `Tensor`s, binary `==` is not overloaded; you can use
`tf.equal()` to check elementwise equality.
## Indexing
Ragged tensors support Python-style indexing, including multidimensional
indexing and slicing. The following examples demonstrate ragged tensor indexing
with a 2-D and a 3-D ragged tensor.
### Indexing a 2-D ragged tensor with 1 ragged dimension
```
queries = tf.ragged.constant(
[['Who', 'is', 'George', 'Washington'],
['What', 'is', 'the', 'weather', 'tomorrow'],
['Goodnight']])
print(queries[1])
print(queries[1, 2]) # A single word
print(queries[1:]) # Everything but the first row
print(queries[:, :3]) # The first 3 words of each query
print(queries[:, -2:]) # The last 2 words of each query
```
### Indexing a 3-D ragged tensor with 2 ragged dimensions
```
rt = tf.ragged.constant([[[1, 2, 3], [4]],
[[5], [], [6]],
[[7]],
[[8, 9], [10]]])
print(rt[1]) # Second row (2-D RaggedTensor)
print(rt[3, 0]) # First element of fourth row (1-D Tensor)
print(rt[:, 1:3]) # Items 1-3 of each row (3-D RaggedTensor)
print(rt[:, -1:]) # Last item of each row (3-D RaggedTensor)
```
`RaggedTensor`s supports multidimensional indexing and slicing, with one
restriction: indexing into a ragged dimension is not allowed. This case is
problematic because the indicated value may exist in some rows but not others.
In such cases, it's not obvious whether we should (1) raise an `IndexError`; (2)
use a default value; or (3) skip that value and return a tensor with fewer rows
than we started with. Following the
[guiding principles of Python](https://www.python.org/dev/peps/pep-0020/)
("In the face
of ambiguity, refuse the temptation to guess" ), we currently disallow this
operation.
## Tensor Type Conversion
The `RaggedTensor` class defines methods that can be used to convert
between `RaggedTensor`s and `tf.Tensor`s or `tf.SparseTensors`:
```
ragged_sentences = tf.ragged.constant([
['Hi'], ['Welcome', 'to', 'the', 'fair'], ['Have', 'fun']])
print(ragged_sentences.to_tensor(default_value=''))
print(ragged_sentences.to_sparse())
x = [[1, 3, -1, -1], [2, -1, -1, -1], [4, 5, 8, 9]]
print(tf.RaggedTensor.from_tensor(x, padding=-1))
st = tf.SparseTensor(indices=[[0, 0], [2, 0], [2, 1]],
values=['a', 'b', 'c'],
dense_shape=[3, 3])
print(tf.RaggedTensor.from_sparse(st))
```
## Evaluating ragged tensors
### Eager execution
In eager execution mode, ragged tensors are evaluated immediately. To access the
values they contain, you can:
* Use the
`tf.RaggedTensor.to_list()`
method, which converts the ragged tensor to a Python `list`.
```
rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]])
print(rt.to_list())
```
* Use Python indexing. If the tensor piece you select contains no ragged
dimensions, then it will be returned as an `EagerTensor`. You can then use
the `numpy()` method to access the value directly.
```
print(rt[1].numpy())
```
* Decompose the ragged tensor into its components, using the
`tf.RaggedTensor.values`
and
`tf.RaggedTensor.row_splits`
properties, or row-paritioning methods such as `tf.RaggedTensor.row_lengths()`
and `tf.RaggedTensor.value_rowids()`.
```
print(rt.values)
print(rt.row_splits)
```
### Broadcasting
Broadcasting is the process of making tensors with different shapes have
compatible shapes for elementwise operations. For more background on
broadcasting, see:
* [Numpy: Broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
* `tf.broadcast_dynamic_shape`
* `tf.broadcast_to`
The basic steps for broadcasting two inputs `x` and `y` to have compatible
shapes are:
1. If `x` and `y` do not have the same number of dimensions, then add outer
dimensions (with size 1) until they do.
2. For each dimension where `x` and `y` have different sizes:
* If `x` or `y` have size `1` in dimension `d`, then repeat its values
across dimension `d` to match the other input's size.
* Otherwise, raise an exception (`x` and `y` are not broadcast
compatible).
Where the size of a tensor in a uniform dimension is a single number (the size
of slices across that dimension); and the size of a tensor in a ragged dimension
is a list of slice lengths (for all slices across that dimension).
#### Broadcasting examples
```
# x (2D ragged): 2 x (num_rows)
# y (scalar)
# result (2D ragged): 2 x (num_rows)
x = tf.ragged.constant([[1, 2], [3]])
y = 3
print(x + y)
# x (2d ragged): 3 x (num_rows)
# y (2d tensor): 3 x 1
# Result (2d ragged): 3 x (num_rows)
x = tf.ragged.constant(
[[10, 87, 12],
[19, 53],
[12, 32]])
y = [[1000], [2000], [3000]]
print(x + y)
# x (3d ragged): 2 x (r1) x 2
# y (2d ragged): 1 x 1
# Result (3d ragged): 2 x (r1) x 2
x = tf.ragged.constant(
[[[1, 2], [3, 4], [5, 6]],
[[7, 8]]],
ragged_rank=1)
y = tf.constant([[10]])
print(x + y)
# x (3d ragged): 2 x (r1) x (r2) x 1
# y (1d tensor): 3
# Result (3d ragged): 2 x (r1) x (r2) x 3
x = tf.ragged.constant(
[
[
[[1], [2]],
[],
[[3]],
[[4]],
],
[
[[5], [6]],
[[7]]
]
],
ragged_rank=2)
y = tf.constant([10, 20, 30])
print(x + y)
```
Here are some examples of shapes that do not broadcast:
```
# x (2d ragged): 3 x (r1)
# y (2d tensor): 3 x 4 # trailing dimensions do not match
x = tf.ragged.constant([[1, 2], [3, 4, 5, 6], [7]])
y = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (2d ragged): 3 x (r1)
# y (2d ragged): 3 x (r2) # ragged dimensions do not match.
x = tf.ragged.constant([[1, 2, 3], [4], [5, 6]])
y = tf.ragged.constant([[10, 20], [30, 40], [50]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (3d ragged): 3 x (r1) x 2
# y (3d ragged): 3 x (r1) x 3 # trailing dimensions do not match
x = tf.ragged.constant([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10]]])
y = tf.ragged.constant([[[1, 2, 0], [3, 4, 0], [5, 6, 0]],
[[7, 8, 0], [9, 10, 0]]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
```
## RaggedTensor encoding
Ragged tensors are encoded using the `RaggedTensor` class. Internally, each
`RaggedTensor` consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.

```
rt = tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2],
row_splits=[0, 4, 4, 6, 7])
print(rt)
```
### Multiple ragged dimensions
A ragged tensor with multiple ragged dimensions is encoded by using a nested
`RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` adds a single
ragged dimension.

```
rt = tf.RaggedTensor.from_row_splits(
values=tf.RaggedTensor.from_row_splits(
values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
row_splits=[0, 3, 3, 5, 9, 10]),
row_splits=[0, 1, 1, 5])
print(rt)
print("Shape: {}".format(rt.shape))
print("Number of ragged dimensions: {}".format(rt.ragged_rank))
```
The factory function `tf.RaggedTensor.from_nested_row_splits` may be used to construct a
RaggedTensor with multiple ragged dimensions directly, by providing a list of
`row_splits` tensors:
```
rt = tf.RaggedTensor.from_nested_row_splits(
flat_values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
nested_row_splits=([0, 1, 1, 5], [0, 3, 3, 5, 9, 10]))
print(rt)
```
### Uniform Inner Dimensions
Ragged tensors with uniform inner dimensions are encoded by using a
multidimensional `tf.Tensor` for `values`.

```
rt = tf.RaggedTensor.from_row_splits(
values=[[1, 3], [0, 0], [1, 3], [5, 3], [3, 3], [1, 2]],
row_splits=[0, 3, 4, 6])
print(rt)
print("Shape: {}".format(rt.shape))
print("Number of ragged dimensions: {}".format(rt.ragged_rank))
```
### Alternative row-partitioning schemes
The `RaggedTensor` class uses `row_splits` as the primary mechanism to store
information about how the values are partitioned into rows. However,
`RaggedTensor` also provides support for four alternative row-partitioning
schemes, which can be more convenient to use depending on how your data is
formatted. Internally, `RaggedTensor` uses these additional schemes to improve
efficiency in some contexts.
<dl>
<dt>Row lengths</dt>
<dd>`row_lengths` is a vector with shape `[nrows]`, which specifies the
length of each row.</dd>
<dt>Row starts</dt>
<dd>`row_starts` is a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.</dd>
<dt>Row limits</dt>
<dd>`row_limits` is a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.</dd>
<dt>Row indices and number of rows</dt>
<dd>`value_rowids` is a vector with shape `[nvals]`, corresponding
one-to-one with values, which specifies each value's row index. In
particular, the row `rt[row]` consists of the values `rt.values[j]` where
`value_rowids[j]==row`. \
`nrows` is an integer that specifies the number of rows in the
`RaggedTensor`. In particular, `nrows` is used to indicate trailing empty
rows.</dd>
</dl>
For example, the following ragged tensors are equivalent:
```
values = [3, 1, 4, 1, 5, 9, 2, 6]
print(tf.RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]))
print(tf.RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]))
print(tf.RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_value_rowids(
values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5))
```
The RaggedTensor class defines methods which can be used to construct
each of these row-partitioning tensors.
```
rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
print(" values: {}".format(rt.values))
print(" row_splits: {}".format(rt.row_splits))
print(" row_lengths: {}".format(rt.row_lengths()))
print(" row_starts: {}".format(rt.row_starts()))
print(" row_limits: {}".format(rt.row_limits()))
print("value_rowids: {}".format(rt.value_rowids()))
```
(Note that `tf.RaggedTensor.values` and `tf.RaggedTensors.row_splits` are properties, while the remaining row-partitioning accessors are all methods. This reflects the fact that the `row_splits` are the primary underlying representation, and the other row-partitioning tensors must be computed.)
Some of the advantages and disadvantages of the different row-partitioning
schemes are:
+ **Efficient indexing**:
The `row_splits`, `row_starts`, and `row_limits` schemes all enable
constant-time indexing into ragged tensors. The `value_rowids` and
`row_lengths` schemes do not.
+ **Small encoding size**:
The `value_rowids` scheme is more efficient when storing ragged tensors that
have a large number of empty rows, since the size of the tensor depends only
on the total number of values. On the other hand, the other four encodings
are more efficient when storing ragged tensors with longer rows, since they
require only one scalar value for each row.
+ **Efficient concatenation**:
The `row_lengths` scheme is more efficient when concatenating ragged
tensors, since row lengths do not change when two tensors are concatenated
together (but row splits and row indices do).
+ **Compatibility**:
The `value_rowids` scheme matches the
[segmentation](../api_guides/python/math_ops.md#Segmentation)
format used by operations such as `tf.segment_sum`. The `row_limits` scheme
matches the format used by ops such as `tf.sequence_mask`.
```
```
| github_jupyter |
# Modeling and Simulation in Python
Chapter 14
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
```
### Code from previous chapters
```
def make_system(beta, gamma):
"""Make a system object for the SIR model.
beta: contact rate in days
gamma: recovery rate in days
returns: System object
"""
init = State(S=89, I=1, R=0)
init /= np.sum(init)
t0 = 0
t_end = 7 * 14
return System(init=init, t0=t0, t_end=t_end,
beta=beta, gamma=gamma)
def update_func(state, t, system):
"""Update the SIR model.
state: State (s, i, r)
t: time
system: System object
returns: State (sir)
"""
s, i, r = state
infected = system.beta * i * s
recovered = system.gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.row[t0] = init
for t in linrange(t0, t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
def calc_total_infected(results):
"""Fraction of population infected during the simulation.
results: DataFrame with columns S, I, R
returns: fraction of population
"""
return get_first_value(results.S) - get_last_value(results.S)
def sweep_beta(beta_array, gamma):
"""Sweep a range of values for beta.
beta_array: array of beta values
gamma: recovery rate
returns: SweepSeries that maps from beta to total infected
"""
sweep = SweepSeries()
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
sweep[system.beta] = calc_total_infected(results)
return sweep
```
## SweepFrame
The following sweeps two parameters and stores the results in a `SweepFrame`
```
def sweep_parameters(beta_array, gamma_array):
"""Sweep a range of values for beta and gamma.
beta_array: array of infection rates
gamma_array: array of recovery rates
returns: SweepFrame with one row for each beta
and one column for each gamma
"""
frame = SweepFrame(columns=gamma_array)
for gamma in gamma_array:
frame[gamma] = sweep_beta(beta_array, gamma)
return frame
```
Here's what the results look like.
```
beta_array = linspace(0.1, 0.9, 11)
gamma_array = linspace(0.1, 0.7, 4)
frame = sweep_parameters(beta_array, gamma_array)
frame.head()
```
And here's how we can plot the results.
```
for gamma in gamma_array:
label = 'gamma = ' + str(gamma)
plot(frame[gamma], label=label)
decorate(xlabel='Contacts per day (beta)',
ylabel='Fraction infected',
loc='upper left')
```
It's often useful to separate the code that generates results from the code that plots the results, so we can run the simulations once, save the results, and then use them for different analysis, visualization, etc.
### Contact number
After running `sweep_parameters`, we have a `SweepFrame` with one row for each value of `beta` and one column for each value of `gamma`.
```
frame.shape
```
The following loop shows how we can loop through the columns and rows of the `SweepFrame`. With 11 rows and 4 columns, there are 44 elements.
```
for gamma in frame.columns:
series = frame[gamma]
for beta in series.index:
frac_infected = series[beta]
print(beta, gamma, frac_infected)
```
Now we can wrap that loop in a function and plot the results. For each element of the `SweepFrame`, we have `beta`, `gamma`, and `frac_infected`, and we plot `beta/gamma` on the x-axis and `frac_infected` on the y-axis.
```
def plot_sweep_frame(frame):
"""Plot the values from a SweepFrame.
For each (beta, gamma), compute the contact number,
beta/gamma
frame: SweepFrame with one row per beta, one column per gamma
"""
for gamma in frame.columns:
series = frame[gamma]
for beta in series.index:
frac_infected = series[beta]
plot(beta/gamma, frac_infected, 'ro')
```
Here's what it looks like:
```
plot_sweep_frame(frame)
decorate(xlabel='Contact number (beta/gamma)',
ylabel='Fraction infected',
legend=False)
savefig('figs/chap06-fig03.pdf')
```
It turns out that the ratio `beta/gamma`, called the "contact number" is sufficient to predict the total number of infections; we don't have to know `beta` and `gamma` separately.
We can see that in the previous plot: when we plot the fraction infected versus the contact number, the results fall close to a curve.
### Analysis
In the book we figured out the relationship between $c$ and $s_{\infty}$ analytically. Now we can compute it for a range of values:
```
s_inf_array = linspace(0.0001, 0.9999, 101);
c_array = log(s_inf_array) / (s_inf_array - 1);
```
`total_infected` is the change in $s$ from the beginning to the end.
```
frac_infected = 1 - s_inf_array
frac_infected_series = Series(frac_infected, index=c_array);
```
Now we can plot the analytic results and compare them to the simulations.
```
plot_sweep_frame(frame)
plot(frac_infected_series, label='Analysis')
decorate(xlabel='Contact number (c)',
ylabel='Fraction infected')
savefig('figs/chap06-fig04.pdf')
```
The agreement is generally good, except for values of `c` less than 1.
## Exercises
**Exercise:** If we didn't know about contact numbers, we might have explored other possibilities, like the difference between `beta` and `gamma`, rather than their ratio.
Write a version of `plot_sweep_frame`, called `plot_sweep_frame_difference`, that plots the fraction infected versus the difference `beta-gamma`.
What do the results look like, and what does that imply?
```
def plot_sweep_frame_difference(frame):
"""Plot the values from a SweepFrame.
For each (beta, gamma), compute the contact number,
beta/gamma
frame: SweepFrame with one row per beta, one column per gamma
"""
for gamma in frame.columns:
series = frame[gamma]
for beta in series.index:
frac_infected = series[beta]
plot(beta - gamma, frac_infected, 'ro')
plot_sweep_frame_difference(frame)
decorate(xlabel='Contact difference (beta - gamma)',
ylabel='Fraction infected',
legend=False)
savefig('figs/chap06-fig03.pdf')
# Solution goes here
```
**Exercise:** Suppose you run a survey at the end of the semester and find that 26% of students had the Freshman Plague at some point.
What is your best estimate of `c`?
Hint: if you print `frac_infected_series`, you can read off the answer.
```
print(frac_infected_series)
# Alternative solution
"""We can use `np.interp` to look up `s_inf` and
estimate the corresponding value of `c`, but it only
works if the index of the series is sorted in ascending
order. So we have to use `sort_index` first.
"""
frac_infected_series.sort_index(inplace=True)
np.interp(0.26, frac_infected_series, frac_infected_series.index)
```
| github_jupyter |
## Business Understanding
Now let's look at the 2nd question of interest. That is - What part of the StackOverflow affects the users satisfaction towards StackOverflow?
I use the data from the Stack Overflow survey answered by more than 64,000 reviewers, with the personal information, coding experience, attitude towards coding and etc.
To answer this question we need to use the data related to the StackOverflow satisfaction such as the answers to question regarding the StackOverflow's service, moderation, community and etc.
## Data Understanding
To get started let's read in the necessary libraries and take a look at some of our columns of interest.
```
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('./survey_results_public.csv')
df.head()
# Pick the questionaire answers related to StackOverflow services.
rel_col = [
'StackOverflowSatisfaction', 'StackOverflowDescribes', 'StackOverflowDevices',
'StackOverflowFoundAnswer', 'StackOverflowCopiedCode', 'StackOverflowJobListing', 'StackOverflowCompanyPage',
'StackOverflowJobSearch','StackOverflowNewQuestion', 'StackOverflowAnswer', 'StackOverflowMetaChat',
'StackOverflowAdsRelevant', 'StackOverflowAdsDistracting', 'StackOverflowModeration',
'StackOverflowCommunity', 'StackOverflowHelpful'
]
df_rel = df[rel_col]
```
Let's look into the quantative variables first, from the description below it seems the only quantative variable is StackOverflowSatisfaction and it has null values.
```
print('Total rows:', len(df_rel))
df_rel.describe()
```
Great, seems many people likes StackOverflow.Let's also check what the categorical variables look like.
```
# Check what kinds of values do the categorical variables contain
for col in df_rel.select_dtypes(include = ['object']).columns:
print(col)
print(df_rel[col].value_counts())
print()
```
It seems some of the question has too many irregular answers and need to be cleaned.
## Prepare Data
Let's begin cleaning the variables. For StackOverflowSatisfaction just delete the null values since it has only a small portion.
```
# delete the NaN in StackOverflowSatisfaction
df_rel = df_rel[df_rel['StackOverflowSatisfaction'].notna()]
```
Also note that for the StackOverflowDevices, almost all the users use desktop, so it might be meaningless to use StackOverflowDevices for modeling.
```
# remove StackOverflowDevices since almost all users use desktops
df_rel = df_rel.drop('StackOverflowDevices', axis=1)
# Check what kinds of values do the categorical variables contain after the cleaning
for col in df_rel.select_dtypes(include = ['object']).columns:
print(col)
print(df_rel[col].value_counts())
print()
```
Since all explainatory variables are categorical, get dummys and use random forest model
```
# Define the function to clean data: create dummies for
# catagorical variables, and return df
def add_dummies(df):
'''
INPUT
df - the dataframe to be added dummy variables
OUTPUT
df - the dataframe output with all the categorical variables
converted to dummy variables
'''
# Dummy the categorical variables
cat_vars = df.select_dtypes(include=['object']).copy().columns
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
return df
df_rel = add_dummies(df_rel)
```
## Data Modeling
Use randomforest instead of linear model since all the explainatory variables are categorical.
```
### Use randomforest instead of linear model
from sklearn.ensemble import RandomForestRegressor
### Let's see what be the best number of features to use based on the test set performance
def find_optimal_rf_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
kwargs - include the arguments you want to pass to the rf model
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
rf_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
rf_model.fit(X_train, y_train)
y_test_preds = rf_model.predict(X_test)
y_train_preds = rf_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
rf_model = RandomForestRegressor()
rf_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, rf_model, X_train, X_test, y_train, y_test
```
## Evaluation
Evaluate the model performance, from the result below you can see the model is underfit and needs improvement.
```
X = df_rel.drop('StackOverflowSatisfaction', axis=1)
y = df_rel['StackOverflowSatisfaction']
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 20, 10, 5]
r2_test, r2_train, rf_model, X_train, X_test, y_train, y_test = find_optimal_rf_mod(X, y, cutoffs)
y_test_preds = rf_model.predict(X_test)
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
```
## Model improvement
Use GridSearchCV to search for optimal hyper parameters.
```
# use GridSearchCV to search for optimal hyper parameters
from sklearn.model_selection import GridSearchCV
### Let's see what be the best number of features to use based on the test set performance
def find_optimal_rf_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True, param_grid=None):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
kwargs - include the arguments you want to pass to the rf model
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
rf_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
if param_grid==None:
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
else:
rf_inst = RandomForestRegressor(n_jobs=-1, verbose=1)
rf_model = GridSearchCV(rf_inst, param_grid, n_jobs=-1)
rf_model.fit(X_train, y_train)
y_test_preds = rf_model.predict(X_test)
y_train_preds = rf_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
if param_grid==None:
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
else:
rf_inst = RandomForestRegressor(n_jobs=-1, verbose=1)
rf_model = GridSearchCV(rf_inst, param_grid, n_jobs=-1)
rf_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, rf_model, X_train, X_test, y_train, y_test
```
### Evaluation
Looks better than before.
```
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 20, 10, 5]
params = {'n_estimators': [10, 100, 1000], 'max_depth': [1, 5, 10, 100]}
r2_test, r2_train, rf_model, X_train, X_test, y_train, y_test = find_optimal_rf_mod(X, y, cutoffs, param_grid=params)
```
Let's check the importance of features.
```
features = X_train.columns
importances = rf_model.best_estimator_.feature_importances_
indices = np.argsort(importances)
plt.figure(figsize=(10,20))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
```
We can see that the top 3 facter affecting the satisfaction of StackOverflow is whether it's helpful, whether the moderation is fair, and whether the community is strong, so StackOverflow should keep or improve mainly from the usability, the moderation and the community.
| github_jupyter |
# LeetCode Algorithm Test Case 551
## (学生出勤记录 I)[https://leetcode-cn.com/problems/student-attendance-record-i/]
[TOC]
给你一个字符串 s 表示一个学生的出勤记录,其中的每个字符用来标记当天的出勤情况(缺勤、迟到、到场)。记录中只含下面三种字符:
1. 'A':Absent,缺勤
2. 'L':Late,迟到
3. 'P':Present,到场
如果学生能够 同时 满足下面两个条件,则可以获得出勤奖励:
1. 按 总出勤 计,学生缺勤('A')严格 少于两天。
2. 学生 不会 存在 连续 3 天或 3 天以上的迟到('L')记录。
如果学生可以获得出勤奖励,返回 true ;否则,返回 false 。
> 示例 1:
> 输入: s = "PPALLP"
> 输出: true
> 解释: 学生缺勤次数少于 2 次,且不存在 3 天或以上的连续迟到记录。
> 示例 2:
> 输入: s = "PPALLL"
> 输出: false
> 解释: 学生最后三天连续迟到,所以不满足出勤奖励的条件。
> 提示:
> - `1 <= s.length <= 1000`
> - `s[i]` 为 `A`、`L` 或 `P`
### Type A: Violent Enumeration Solution - Scheme I
> 2021/08/17 Kevin Tang
```
from typing import List
def checkRecord_TypeA_Scheme_A(s: str) -> bool:
"""
:param nums:
:param target:
:return:
>>> ic(checkRecord_TypeA_Scheme_A(s="PPALLP"))
True
>>> ic(checkRecord_TypeA_Scheme_A(s="PPALLL"))
False
>>> ic(checkRecord_TypeA_Scheme_A(s="AA"))
False
"""
absent, late, present = 0, 0, 0
lateContinuousCount = 0
for i in s:
if i == 'L':
late += 1
lateContinuousCount += 1
elif i == 'A':
absent += 1
lateContinuousCount = 0
elif i == 'P':
present += 1
lateContinuousCount = 0
if absent >= 2 or lateContinuousCount >= 3:
return False
return True
```
### Type A: Violent Enumeration Solution - Scheme II
> 2021/08/17 Kevin Tang
```
from typing import List
def checkRecord_TypeA_Scheme_B(s: str) -> bool:
"""
:param nums:
:param target:
:return:
>>> ic(checkRecord_TypeA_Scheme_B(s="PPALLP"))
True
>>> ic(checkRecord_TypeA_Scheme_B(s="PPALLL"))
False
>>> ic(checkRecord_TypeA_Scheme_B(s="AA"))
False
"""
absent: int = 0
lateContinuousCount = 0
for i in s:
if i == 'A':
absent += 1
if absent >= 2:
return False
if i == 'L':
lateContinuousCount += 1
if lateContinuousCount >= 3:
return False
else:
lateContinuousCount = 0
return True
```
### Type B: Built in Function Solution - Scheme I
> 2021/08/19 Kevin Tang
```
from typing import List
def checkRecord_TypeB_Scheme_A(s: str) -> bool:
"""
:param nums:
:param target:
:return:
>>> ic(checkRecord_TypeB_Scheme_A(s="PPALLP"))
True
>>> ic(checkRecord_TypeB_Scheme_A(s="PPALLL"))
False
>>> ic(checkRecord_TypeB_Scheme_A(s="AA"))
False
>>> ic(checkRecord_TypeB_Scheme_A(s="LPLPLPLPLPL"))
True
"""
return (s.find('A') == s.rfind('A')) and ('LLL' not in s)
```
### Test Script
```
import doctest
from icecream import ic
ic(doctest.testmod())
```
| github_jupyter |
# Supplemental Information
This notebook is intended to serve as a supplement to the manuscript "High-throughput workflows for determining adsorption energies on solid surfaces." It outlines basic use of the code and workflow software that has been developed for processing surface slabs and placing adsorbates according to symmetrically distinct sites on surface facets.
## Installation
To use this notebook, we recommend installing python via [Anaconda](https://www.continuum.io/downloads), which includes jupyter and the associated iPython notebook software.
The code used in this project primarily makes use of two packages, pymatgen and atomate, which are installable via pip or the matsci channel on conda (e. g. `conda install -c matsci pymatgen atomate`). Development versions with editable code may be installed by cloning the repositories and using `python setup.py develop`.
## Example 1: AdsorbateSiteFinder (pymatgen)
An example using the the AdsorbateSiteFinder class in pymatgen is shown below. We begin with an import statement for the necessay modules. To use the MP RESTful interface, you must provide your own API key either in the MPRester call i.e. ```mpr=MPRester("YOUR_API_KEY")``` or provide in in your .pmgrc.yaml configuration file. API keys can be accessed at materialsproject.org under your "Dashboard."
```
# Import statements
from pymatgen import Structure, Lattice, MPRester, Molecule
from pymatgen.analysis.adsorption import *
from pymatgen.core.surface import generate_all_slabs
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from matplotlib import pyplot as plt
%matplotlib inline
# Note that you must provide your own API Key, which can
# be accessed via the Dashboard at materialsproject.org
mpr = MPRester()
```
We create a simple fcc structure, generate it's distinct slabs, and select the slab with a miller index of (1, 1, 1).
```
fcc_ni = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5), ["Ni"], [[0, 0, 0]])
slabs = generate_all_slabs(fcc_ni, max_index=1, min_slab_size=8.0,
min_vacuum_size=10.0)
ni_111 = [slab for slab in slabs if slab.miller_index==(1,1,1)][0]
```
We make an instance of the AdsorbateSiteFinder and use it to find the relevant adsorption sites.
```
asf_ni_111 = AdsorbateSiteFinder(ni_111)
ads_sites = asf_ni_111.find_adsorption_sites()
print(ads_sites)
assert len(ads_sites) == 4
```
We visualize the sites using a tool from pymatgen.
```
fig = plt.figure()
ax = fig.add_subplot(111)
plot_slab(ni_111, ax, adsorption_sites=True)
```
Use the `AdsorbateSiteFinder.generate_adsorption_structures` method to generate structures of adsorbates.
```
fig = plt.figure()
ax = fig.add_subplot(111)
adsorbate = Molecule("H", [[0, 0, 0]])
ads_structs = asf_ni_111.generate_adsorption_structures(adsorbate,
repeat=[1, 1, 1])
plot_slab(ads_structs[0], ax, adsorption_sites=False, decay=0.09)
```
## Example 2: AdsorbateSiteFinder for various surfaces
In this example, the AdsorbateSiteFinder is used to find adsorption sites on different structures and miller indices.
```
fig = plt.figure()
axes = [fig.add_subplot(2, 3, i) for i in range(1, 7)]
mats = {"mp-23":(1, 0, 0), # FCC Ni
"mp-2":(1, 1, 0), # FCC Au
"mp-13":(1, 1, 0), # BCC Fe
"mp-33":(0, 0, 1), # HCP Ru
"mp-30": (2, 1, 1),
"mp-5229":(1, 0, 0),
} # Cubic SrTiO3
#"mp-2133":(0, 1, 1)} # Wurtzite ZnO
for n, (mp_id, m_index) in enumerate(mats.items()):
struct = mpr.get_structure_by_material_id(mp_id)
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True)
slab_dict = {slab.miller_index:slab for slab in slabs}
asf = AdsorbateSiteFinder.from_bulk_and_miller(struct, m_index, undercoord_threshold=0.10)
plot_slab(asf.slab, axes[n])
ads_sites = asf.find_adsorption_sites()
sop = get_rot(asf.slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites["all"]]
axes[n].plot(*zip(*ads_sites), color='k', marker='x',
markersize=10, mew=1, linestyle='', zorder=10000)
mi_string = "".join([str(i) for i in m_index])
axes[n].set_title("{}({})".format(struct.composition.reduced_formula, mi_string))
axes[n].set_xticks([])
axes[n].set_yticks([])
axes[4].set_xlim(-2, 5)
axes[4].set_ylim(-2, 5)
fig.savefig('slabs.png', dpi=200)
!open slabs.png
```
## Example 3: Generating a workflow from atomate
In this example, we demonstrate how MatMethods may be used to generate a full workflow for the determination of DFT-energies from which adsorption energies may be calculated. Note that this requires a working instance of [FireWorks](https://pythonhosted.org/FireWorks/index.html) and its dependency, [MongoDB](https://www.mongodb.com/). Note that MongoDB can be installed via [Anaconda](https://anaconda.org/anaconda/mongodb).
```
from fireworks import LaunchPad
lpad = LaunchPad()
lpad.reset('', require_password=False)
```
Import the necessary workflow-generating function from atomate:
```
from atomate.vasp.workflows.base.adsorption import get_wf_surface, get_wf_surface_all_slabs
```
Adsorption configurations take the form of a dictionary with the miller index as a string key and a list of pymatgen Molecule instances as the values.
```
co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]])
h = Molecule("H", [[0, 0, 0]])
```
Workflows are generated using the a slab a list of molecules.
```
struct = mpr.get_structure_by_material_id("mp-23") # fcc Ni
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True)
slab_dict = {slab.miller_index:slab for slab in slabs}
ni_slab_111 = slab_dict[(1, 1, 1)]
wf = get_wf_surface([ni_slab_111], molecules=[co, h])
lpad.add_wf(wf)
```
The workflow may be inspected as below. Note that there are 9 optimization tasks correponding the slab, and 4 distinct adsorption configurations for each of the 2 adsorbates. Details on running FireWorks, including [singleshot launching](https://pythonhosted.org/FireWorks/worker_tutorial.html#launch-a-rocket-on-a-worker-machine-fireworker), [queue submission](https://pythonhosted.org/FireWorks/queue_tutorial.html#), [workflow management](https://pythonhosted.org/FireWorks/defuse_tutorial.html), and more can be found in the [FireWorks documentation](https://pythonhosted.org/FireWorks/index.html).
```
lpad.get_wf_summary_dict(1)
```
Note also that running FireWorks via atomate may require system specific tuning (e. g. for VASP parameters). More information is available in the [atomate documentation](http://pythonhosted.org/atomate/).
## Example 4 - Screening of oxygen evolution electrocatalysts on binary oxides
This final example is intended to demonstrate how to use the MP API and the adsorption workflow to do an initial high-throughput study of oxygen evolution electrocatalysis on binary oxides of transition metals.
```
from pymatgen.core.periodic_table import *
from pymatgen.core.surface import get_symmetrically_distinct_miller_indices
import tqdm
lpad.reset('', require_password=False)
```
For oxygen evolution, a common metric for the catalytic activity of a given catalyst is the theoretical overpotential corresponding to the mechanism that proceeds through OH\*, O\*, and OOH\*. So we can define our adsorbates:
```
OH = Molecule("OH", [[0, 0, 0], [-0.793, 0.384, 0.422]])
O = Molecule("O", [[0, 0, 0]])
OOH = Molecule("OOH", [[0, 0, 0], [-1.067, -0.403, 0.796],
[-0.696, -0.272, 1.706]])
adsorbates = [OH, O, OOH]
```
Then we can retrieve the structures using the MP rest interface, and write a simple for loop which creates all of the workflows corresponding to every slab and every adsorption site for each material. The code below will take ~15 minutes. This could be parallelized to be more efficient, but is not for simplicity in this case.
```
elements = [Element.from_Z(i) for i in range(1, 103)]
trans_metals = [el for el in elements if el.is_transition_metal]
# tqdm adds a progress bar so we can see the progress of the for loop
for metal in tqdm.tqdm_notebook(trans_metals):
# Get relatively stable structures with small unit cells
data = mpr.get_data("{}-O".format(metal.symbol))
data = [datum for datum in data if datum["e_above_hull"] < 0.05]
data = sorted(data, key = lambda x: x["nsites"])
struct = Structure.from_str(data[0]["cif"], fmt='cif')
# Put in conventional cell settings
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
# Get distinct miller indices for low-index facets
wf = get_wf_surface_all_slabs(struct, adsorbates)
lpad.add_wf(wf)
print("Processed: {}".format(struct.formula))
```
Ultimately, running this code produces workflows that contain many (tens of thousands) of calculations, all of which can be managed using FireWorks and queued on supercomputing resources. Limitations on those resources might necessitate a more selective approach towards choosing surface facets or representative materials. Nevertheless, this approach represents a way to provide for a complete and structurally accurate way of screening materials for adsorption properties than can be managed using fireworks.
| github_jupyter |
# Decoding specified ISS tile(s)
This notebook provides an exampe how to decode an ISS tile from the mouse brain dataset used in the PoSTcode paper that is stored at local directory ``postcode/example-iss-tile-data/``.
```
import numpy as np
import pandas as pd
from pandas import read_csv
import matplotlib.pyplot as plt
import pickle
import os
from postcode.decoding_functions import *
from postcode.spot_detection_functions import *
from postcode.reading_data_functions import *
%load_ext autoreload
%autoreload 2
```
* Specify directory location ``data_path`` with channel_info.csv and taglist.csv
```
dataset_name = 'NT_ISS_KR0018'
data_path = os.path.dirname(os.getcwd()) + '/example-iss-tile-data/' + dataset_name + '/'
```
* Read channel_info.csv and taglist.csv files
```
barcodes_01, K, R, C, gene_names, channels_info = read_taglist_and_channel_info(data_path)
```
## Spot detection
* Input parameters for spot detection via trackpy should be specified in dictionary ``spots_params``, which has to contain value for key ``'trackpy_spot_diam'`` indicating spot diameter in pixels.
```
spots_params = {'trackpy_spot_diam':5} #parameters for spot detection: spot diameter must to be specified
spots_params['trackpy_prc'] = 0 #by default this parameter is set to 64, decrease it to select more spots
spots_params['trackpy_sep'] = 2 #by default this paramerer is set to 'trackpy_spot_diam'+1
tifs_path = data_path + 'selected-tiles/'
tile_names = read_csv(data_path + 'tile_names.csv')
x_min, x_max, y_min, y_max = find_xy_range_of_tile_names(tile_names['selected_tile_names'])
tiles_info = {'tile_size':1000, 'y_max_size':1000, 'x_max_size':1000, 'filename_prefix':'out_opt_flow_registered_', 'y_max':y_max, 'x_max':x_max}
tiles_to_load = {'y_start':1, 'y_end':1, 'x_start':12, 'x_end':12} #tile(s) to load (only 'X12_Y1' tile of size 1000x1000 is stored locally)
spots_out = load_tiles_to_extract_spots(tifs_path, channels_info, C, R, tile_names, tiles_info, tiles_to_load, spots_params,
anchors_cy_ind_for_spot_detect=0, compute_also_without_tophat=False, return_anchors=True)
print('In total {} spots were detected.'.format(spots_out['spots'].shape[0]))
```
## Spot decoding
* Estimate model parameters and compute class probabilities
```
out = decoding_function(spots_out['spots'], barcodes_01, print_training_progress=True)
```
* Create a data frame from the decoding output
```
df_class_names = np.concatenate((gene_names,['infeasible','background','nan']))
df_class_codes = np.concatenate((channels_info['barcodes_AGCT'],['inf','0000','NA']))
decoded_spots_df = decoding_output_to_dataframe(out, df_class_names, df_class_codes)
decoded_df = pd.concat([decoded_spots_df, spots_out['spots_loc']], axis=1)
```
## Visualizing decoding results
* Plot loss, estimated activation parameters and covariance: loss should decrease, $\hat\alpha+\hat\beta$ shoud be separated from $\hat\alpha$, covariance matrix should have a checkerboard pattern
```
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, gridspec_kw={'width_ratios': [1, 3, 1]}, figsize=(14, 2.5), dpi=100, facecolor='w', edgecolor='k')
channel_base = np.array(channels_info['channel_base'])[np.where(np.array(channels_info['coding_chs']) == True)[0]]
activation = (out['params']['codes_tr_v_star']+out['params']['codes_tr_consts_v_star'])[0,:].numpy() #corresponding to the channel activation (code=1)
no_activation = out['params']['codes_tr_consts_v_star'][0,:].numpy() # (code=0)
channel_activation=np.stack((no_activation,activation))
ax1.plot(np.arange(0,len(out['params']['losses'])),(1/out['class_probs'].shape[0]*np.asarray(out['params']['losses'])))
ax1.annotate(np.round(1/out['class_probs'].shape[0]*out['params']['losses'][-1],4),(-2+len(out['params']['losses']),0.2+1/out['class_probs'].shape[0]*out['params']['losses'][-1]),size=6)
ax1.set_title('Loss over iterations')
ax2.scatter(np.arange(1,1+R*C),activation,c='green',label=r'$\hat{\alpha}+\hat{\beta}$ (channel active)')
ax2.scatter(np.arange(1,1+R*C),no_activation,c='orange',label=r'$\hat{\alpha}$ (channel not active)')
ax2.legend(loc=9)
ax2.vlines(np.arange(0.5,R*C+.8,C), out['params']['codes_tr_consts_v_star'].min(), (out['params']['codes_tr_v_star']+out['params']['codes_tr_consts_v_star']).max(), linestyles='dashed')
ax2.set_xticks(np.arange(1,1+R*C))
ax2.set_xticklabels(np.tile(channel_base,R))
ax2.set_title('Parameters of the barcode transformation as activation / no activation')
covim = ax3.imshow(out['params']['sigma_star'])
ax3.set_xticks(np.arange(0,R*C))
ax3.set_xticklabels(np.tile(channel_base,R))
ax3.set_yticks(np.arange(0,R*C))
ax3.set_yticklabels(np.tile(channel_base,R))
ax3.set_title('Estimated covariance')
plt.colorbar(covim, ax=ax3, fraction=0.02)
plt.show()
```
* Plot histogram of barcode assignments
```
thr=0.7
df = pd.concat([decoded_df.Name[decoded_df.Probability>thr].value_counts(), decoded_df.Name[decoded_df.Probability <=thr].replace(np.unique(decoded_df.Name),'thr').value_counts()]).sort_index(axis=0)#.sort_values(ascending=False)
fig, ax = plt.subplots(1, 1, figsize=(14,3), dpi=100, facecolor='w', edgecolor='k')
df.plot(kind='bar',width=0.7,rot=90,logy=True,fontsize=6,ax=ax)
num_decoded_barcodes = sum((decoded_df.Name!='background')&(decoded_df.Name!='infeasible')&(decoded_df.Name!='NaN')&(decoded_df.Probability>thr))
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005),size=6)
plt.title('Histogram of decoded barcodes afther thresholding with {}: \n in total {} spots detected while {} spots decoded ({:.02f}%)'.format(thr,decoded_df.shape[0], num_decoded_barcodes, 100*num_decoded_barcodes/decoded_df.shape[0]), fontsize=10)
plt.show()
```
* Plot spatial patterns of a few selected barcodes over the whole tile
```
names = ['Cux2','Rorb','Grin3a','infeasible','background']
log_scale = True
fig, ax = plt.subplots(1, len(names), figsize=(3*len(names), 3), dpi=100, facecolor='w', edgecolor='k')
for i in range(len(names)):
im = heatmap_pattern(decoded_df, names[i], grid=10, thr=0.7, plot_probs=True)
if log_scale:
ims = ax[i].imshow(np.log2(1+im),cmap='jet')
else:
ims = ax[i].imshow(im)
ax[i].axis('off')
plt.colorbar(ims, ax=ax[i], fraction=0.02)
ax[i].set_title('{} (barcode: {})'.format(names[i],df_class_codes[df_class_names==names[i]][0]),fontsize=8)
fig.suptitle('Spatial patterns in logaritmic scale')
plt.show()
```
* Plot all detected / decoded spots and a selected barcode over a zoom of the anchor channel
```
x00 = 600; y00 = 350 #coordinates of the zoom (between 0--1000)
delta = 200 #size of the zoom in each axis (up to 1000)
anchor_zoom = spots_out['anchors'][y00:y00+delta,x00:x00+delta] #anchor of the last loaded tile
#in case multiple tiles were used, find coordinates corresponding to the last one loaded
y00 = y00+(int(decoded_df.Tile.iloc[-1][-2:].replace('Y',''))-tiles_to_load['y_start'])*tiles_info['tile_size']
x00 = x00+(int(decoded_df.Tile.iloc[-1][1:3].replace('_',''))-tiles_to_load['x_start'])*tiles_info['tile_size']
plt.figure(num=None, figsize=(12, 4), dpi=100, facecolor='w', edgecolor='k')
plt.subplot(1,3,1)
plt.imshow(np.log(0.06+anchor_zoom/anchor_zoom.max()),cmap='gray')
y0 = np.around(decoded_df.Y.to_numpy()).astype(np.int32)-y00; x0 = np.around(decoded_df.X.to_numpy()).astype(np.int32)-x00
y = y0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]; x = x0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]
plt.scatter(x,y,s=13,marker='.',c='orange')
plt.title('Detected spots',fontsize=10)
plt.axis('off')
from matplotlib import cm
from matplotlib.lines import Line2D
markers = list(Line2D.markers.keys()); markersL = markers[1:20]*(int(K/20)+1)
hsv_cols = cm.get_cmap('hsv', K+1); colL=hsv_cols(range(2*K)); colL=np.concatenate((colL[::2,],colL[1::2,]))
plt.subplot(1,3,2)
plt.imshow(np.log(0.06+anchor_zoom/anchor_zoom.max()),cmap='gray')
for name in gene_names:
col = colL[np.where(gene_names==name)[0][0],:]; mar = markersL[np.where(gene_names==name)[0][0]]
x0 = np.around(decoded_df.X[(decoded_df.Name == name) & (decoded_df.Probability >thr)].to_numpy()).astype(np.int32)-x00
y0 = np.around(decoded_df.Y[(decoded_df.Name == name) & (decoded_df.Probability >thr)].to_numpy()).astype(np.int32)-y00
y = y0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]; x = x0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]
plt.scatter(x,y,s=13,marker=mar,c=np.repeat(col.reshape((1,4)),x.shape[0],axis=0))
plt.title('Decoded barcodes',fontsize=10)
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(np.log(0.06+anchor_zoom/anchor_zoom.max()),cmap='gray')
name = 'Cux2'; thr=0.7
x0 = np.around(decoded_df.X[(decoded_df.Name == name) & (decoded_df.Probability >thr)].to_numpy()).astype(np.int32)-x00
y0 = np.around(decoded_df.Y[(decoded_df.Name == name) & (decoded_df.Probability >thr)].to_numpy()).astype(np.int32)-y00
y = y0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]; x = x0[(y0>=0)&(y0<delta)&(x0>=0)&(x0<delta)]
plt.scatter(x,y,s=13,marker='.',c='cyan')
plt.title('{} ({})'.format(name,decoded_df.Code[decoded_df.Name==name].to_numpy()[0]),fontsize=10)
plt.axis('off')
plt.suptitle('Zoomed section of the anchor channel used for spot detection')
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Gaurav7004/NEWS_ARTICLES_DEPLOYMENT/blob/main/ALL_NEWSPAPERS_ARTICLES_EXTRACTION_13th_Jan_2022.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
url='https://www.financialexpress.com/economy/page/'
agent = {"User-Agent":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
page=requests.get(url,headers=agent)
soup=BeautifulSoup(page.text,'html.parser')
page_links=[]
for page_num in range(1,5):
page_url=url+str(page_num)
page_links.append(page_url)
page_links[0:4]
str_for_search = []
# importing modules
import urllib.request
for url in page_links:
# opening the url for reading
html = urllib.request.urlopen(url)
# parsing the html file
htmlParse = BeautifulSoup(html, 'html.parser')
# print(htmlParse)
# getting all the paragraphs
for para in htmlParse.find_all('div', class_='entry-title'):
lis = para.find_all('a')
str_for_search.append(lis)
str(str_for_search[0])
Final_LIST = []
## Extracting required News Articles Link
for i in range(len(str_for_search)):
regex="(?P<url>https?://[^\s]+)"
matches = re.findall(regex, str(str_for_search[i]))
Final_LIST.append(matches[0])
Final_LIST[0]
## Lists to get dates and news articles
List_articles = []
List_date = []
List_month = []
List_year = []
for i in range(len(Final_LIST)):
# opening the url for reading
html = urllib.request.urlopen(str(Final_LIST[i]))
# parsing the html file
htmlParse = BeautifulSoup(html, 'html.parser')
# getting all the paragraphs of articles
for para in htmlParse.find_all(['div'], class_='entry-content wp-block-post-content'):
# txt = para.find_all('p')
List_articles.append(para.get_text())
# Getting respective month, date, year the article published
for det in htmlParse.find_all('div', class_='ie-network-post-meta-date'):
dt = det.get_text()
dt = dt.split(' ')
List_month.append(dt[0])
List_date.append(dt[1])
List_year.append(dt[2])
List_articles[109]
List_date[109]
## Newspaper Name
Newspaper_Name = ['The Financial Express'] * len(List_articles)
df = pd.DataFrame(list(zip(Newspaper_Name, List_articles, List_year, List_month, List_date)), columns =['Newspaper Name', 'Article', 'Year', 'Month', 'Date'])
df.to_excel('News_Articles_Scraped_Data.xlsx')
df
```
| github_jupyter |
# Self DCGAN
<table class="tfo-notebook-buttons" align="left" >
<td>
<a target="_blank" href="https://colab.research.google.com/github/HighCWu/SelfGAN/blob/master/implementations/dcgan/self_dcgan.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/HighCWu/SelfGAN/blob/master/implementations/dcgan/self_dcgan.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
## Datasets
```
import glob
import random
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None):
self.transform = transforms.Compose(transforms_)
self.files = sorted(glob.glob(root + '/**/*.*', recursive=True))
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)]).convert('RGB')
w, h = img.size
img = self.transform(img)
return img
def __len__(self):
return len(self.files)
```
## Prepare
```
import argparse
import os
import sys
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
os.makedirs('images_normal', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=2e-4, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--img_size', type=int, default=64, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=200, help='interval betwen image samples')
parser.add_argument('--data_use', type=str, default='bedroom', help='datasets:[mnist]/[bedroom]')
opt, _ = parser.parse_known_args()
if opt.data_use == 'mnist':
opt.img_size = 32
opt.channels = 1
print(opt)
import os, zipfile
from google.colab import files
if opt.data_use == 'bedroom':
os.makedirs('data/bedroom', exist_ok=True)
print('Please upload your kaggle api json.')
files.upload()
! mkdir /root/.kaggle
! mv ./kaggle.json /root/.kaggle
! chmod 600 /root/.kaggle/kaggle.json
! kaggle datasets download -d jhoward/lsun_bedroom
out_fname = 'lsun_bedroom.zip'
zip_ref = zipfile.ZipFile(out_fname)
zip_ref.extractall('./')
zip_ref.close()
os.remove(out_fname)
out_fname = 'sample.zip'
zip_ref = zipfile.ZipFile(out_fname)
zip_ref.extractall('data/bedroom/')
zip_ref.close()
os.remove(out_fname)
else:
os.makedirs('data/mnist', exist_ok=True)
img_shape = (opt.channels, opt.img_size, opt.img_size)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.init_size = opt.img_size // 4
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128*self.init_size**2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
nn.Tanh()
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(opt.channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt.img_size // 2**4
self.adv_layer = nn.Sequential( nn.Linear(128*ds_size**2, 1),
nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
class SelfGAN(nn.Module):
def __init__(self):
super(SelfGAN, self).__init__()
# Initialize generator and discriminator
self.generator = Generator()
self.discriminator = Discriminator()
def forward(self, z, real_img, fake_img):
gen_img = self.generator(z)
validity_gen = self.discriminator(gen_img)
validity_real = self.discriminator(real_img)
validity_fake = self.discriminator(fake_img)
return gen_img, validity_gen, validity_real, validity_fake
```
## SelfGAN Part
```
# Loss function
adversarial_loss = torch.nn.BCELoss()
shard_adversarial_loss = torch.nn.BCELoss(reduction='none')
# Initialize SelfGAN model
self_gan = SelfGAN()
if cuda:
self_gan.cuda()
adversarial_loss.cuda()
shard_adversarial_loss.cuda()
# Initialize weights
self_gan.apply(weights_init_normal)
# Configure data loader
dataloader = torch.utils.data.DataLoader(
ImageDataset('data/bedroom',
transforms_=[
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]) if opt.data_use == 'bedroom' else
datasets.MNIST('data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True, drop_last=True)
# Optimizers
optimizer = torch.optim.Adam(self_gan.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
last_imgs = Tensor(opt.batch_size, *img_shape)*0.0
```
### Standard performance on the GPU
```
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, imgs in enumerate(dataloader):
if opt.data_use != 'bedroom':
imgs = imgs[0]
# Adversarial ground truths
valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# Train SelfGAN
# -----------------
optimizer.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images
gen_imgs, validity_gen, validity_real, validity_fake = self_gan(z, real_imgs, last_imgs)
# Loss measures generator's ability to fool the discriminator and measure discriminator's ability to classify real from generated samples at the same time
gen_loss = adversarial_loss(validity_gen, valid)
real_loss = adversarial_loss(validity_real, valid)
fake_loss = adversarial_loss(validity_fake, fake)
v_g = 1 - torch.mean(validity_gen)
v_f = torch.mean(validity_fake)
s_loss = (real_loss + v_g*gen_loss*0.1 + v_f*fake_loss*0.9) / 2
s_loss.backward()
optimizer.step()
last_imgs = gen_imgs.detach()
sys.stdout.flush()
print ("\r[Epoch %d/%d] [Batch %d/%d] [S loss: %f R loss: %f F loss: %f G loss: %f]" % (epoch, opt.n_epochs, i, len(dataloader),
s_loss.item(), real_loss.item(), fake_loss.item(), gen_loss.item()),
end='')
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], 'images/%d.png' % batches_done, nrow=5, normalize=True)
```
### Running on the GPU with similar performance of running on the TPU (Maybe)
```
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, imgs in enumerate(dataloader):
if opt.data_use != 'bedroom':
imgs = imgs[0]
# Adversarial ground truths
valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# Train SelfGAN
# -----------------
optimizer.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
s = opt.batch_size//8
for k in range(8):
# Generate a batch of images
gen_imgs, validity_gen, validity_real, validity_fake = self_gan(z[k*s:k*s+s], real_imgs[k*s:k*s+s], last_imgs[k*s:k*s+s])
# Loss measures generator's ability to fool the discriminator and measure discriminator's ability to classify real from generated samples at the same time
gen_loss = shard_adversarial_loss(validity_gen, valid[k*s:k*s+s])
real_loss = shard_adversarial_loss(validity_real, valid[k*s:k*s+s])
fake_loss = shard_adversarial_loss(validity_fake, fake[k*s:k*s+s])
v_g = 1 - torch.mean(validity_gen)
v_r = 1 - torch.mean(validity_real)
v_f = torch.mean(validity_fake)
v_sum = v_g + v_r + v_f
s_loss = v_r*real_loss/v_sum + v_g*gen_loss/v_sum + v_f*fake_loss/v_sum
gen_loss = torch.mean(gen_loss)
real_loss = torch.mean(real_loss)
fake_loss = torch.mean(fake_loss)
s_loss = torch.mean(s_loss)
s_loss.backward()
last_imgs[k*s:k*s+s] = gen_imgs.detach()
optimizer.step()
sys.stdout.flush()
print ("\r[Epoch %d/%d] [Batch %d/%d] [S loss: %f R loss: %f F loss: %f G loss: %f]" % (epoch, opt.n_epochs, i, len(dataloader),
s_loss.item(), real_loss.item(), fake_loss.item(), gen_loss.item()),
end='')
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(last_imgs.data[:25], 'images/%d.png' % batches_done, nrow=5, normalize=True)
```
## Normal GAN Part
```
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
dataloader = torch.utils.data.DataLoader(
ImageDataset('data/bedroom',
transforms_=[
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]) if opt.data_use == 'bedroom' else
datasets.MNIST('data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, imgs in enumerate(dataloader):
if opt.data_use != 'bedroom':
imgs = imgs[0]
# Adversarial ground truths
valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
sys.stdout.flush()
print ("\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, opt.n_epochs, i, len(dataloader),
d_loss.item(), g_loss.item()),
end='')
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], 'images_normal/%d.png' % batches_done, nrow=5, normalize=True)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import gc
from time import time
import math
import random
import datetime
import pkg_resources
#import seaborn as sns
import scipy.stats as stats
import gc
import re
import operator
import sys
from sklearn import metrics
from sklearn import model_selection
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from torch.utils.data import TensorDataset, Subset, DataLoader
from torch.optim import Optimizer
#from nltk.stem import PorterStemmer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
#%load_ext autoreload
#%autoreload 2
#%matplotlib inline
from tqdm import tqdm, tqdm_notebook
import os
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import warnings
warnings.filterwarnings(action='once')
import pickle
#from apex import amp
import shutil
device=torch.device('cuda')
def seed_everything(seed=123):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Fixing Weight Decay Regularization in Adam`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# according to the paper, this penalty should come after the bias correction
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'], p.data)
return loss
MAX_SEQUENCE_LENGTH = 295
SEED = 42
EPOCHS = 20
Data_dir="../job_nlp/"
WORK_DIR = "../job_nlp/working/"
#num_to_load=100000 #Train size to match time limit
#valid_size= 50000 #Validation Size
TARGET = 'smishing'
# https://www.kaggle.com/matsuik/ppbert
package_dir_a = "../job_nlp/ppbert/pytorch-pretrained-bert/pytorch-pretrained-BERT"
sys.path.insert(0, package_dir_a)
from pytorch_pretrained_bert import convert_tf_checkpoint_to_pytorch
from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification,BertAdam
# Translate model from tensorflow to pytorch
BERT_MODEL_PATH = '../job_nlp/bert-pretrained-models/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12/'
convert_tf_checkpoint_to_pytorch.convert_tf_checkpoint_to_pytorch(
BERT_MODEL_PATH + 'bert_model.ckpt',
BERT_MODEL_PATH + 'bert_config.json',
WORK_DIR + 'pytorch_model.bin')
shutil.copyfile(BERT_MODEL_PATH + 'bert_config.json', WORK_DIR + 'bert_config.json')
# This is the Bert configuration file
from pytorch_pretrained_bert import BertConfig
bert_config = BertConfig('../job_nlp/bert-pretrained-models/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12/'+'bert_config.json')
bert_config
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length,tokenizer):
max_seq_length -=2
all_tokens = []
longer = 0
for text in tqdm_notebook(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a)>max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
print(longer)
return np.array(all_tokens)
BERT_MODEL_PATH = '../job_nlp/bert-pretrained-models/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12/'
tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None,do_lower_case=True)
%%time
train_df = pd.read_csv(os.path.join(Data_dir,"train.csv"))
test_df = pd.read_csv(os.path.join(Data_dir,"test.csv"))
train_df.label.value_counts(normalize=True)
# replace NaN
print(train_df.iloc[2400,:])
train_df.iloc[2400,1] = "@@@"
%%time
train_df['text'] = train_df[['title', 'content']].apply(lambda x: ' '.join(x), axis = 1)
test_df['text'] = test_df[['title', 'content']].apply(lambda x: ' '.join(x), axis = 1)
train_df.head()
len(train_df.iloc[0,0])
len(train_df.iloc[0,1])
len(train_df.iloc[0,3])
train_df['t_length'] = train_df['title'].apply(lambda x: len(x))
train_df['c_length'] = train_df['content'].apply(lambda x: len(x))
train_df['text_length'] = train_df['text'].apply(lambda x: len(x))
test_df['t_length'] = test_df['title'].apply(lambda x: len(x))
test_df['c_length'] = test_df['content'].apply(lambda x: len(x))
test_df['text_length'] = test_df['text'].apply(lambda x: len(x))
train_df.describe()
test_df.describe()
MAX_SEQUENCE_LENGTH = 400
```
### Tokenizing
```
#%%time
tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None,do_lower_case=True)
#train_df = pd.read_csv(os.path.join(Data_dir,"train.csv"))#.sample(num_to_load+valid_size,random_state=SEED)
print('loaded %d records' % len(train_df))
# Make sure all comment_text values are strings
train_df['content'] = train_df['content'].astype(str)
x_train = convert_lines(train_df["content"].fillna("DUMMY_VALUE"),MAX_SEQUENCE_LENGTH,tokenizer)
print("X_train : {}".format(len(x_train)))
#test_df = pd.read_csv(os.path.join(Data_dir,"public_test.csv"))#.sample(num_to_load+valid_size,random_state=SEED)
print('loaded %d records' % len(test_df))
test_df['content'] = test_df['content'].astype(str)
x_test = convert_lines(test_df["content"].fillna("DUMMY_VALUE"),MAX_SEQUENCE_LENGTH,tokenizer)
print("X_test : {}".format(len(x_test)))
train_df=train_df.fillna(0)
# above not working in linux ?? these x_train & x_test are obtained from windows
#x_train = np.loadtxt('../job_nlp/x_train.csv', delimiter=',')
#x_test = np.loadtxt('../job_nlp/x_test.csv', delimiter=',')
seed_everything(SEED)
output_model_file = "bert_pytorch.bin"
lr=2e-5
batch_size = 8
accumulation_steps=2
n_labels = 2
criterion = nn.CrossEntropyLoss()
TARGET = 'label'
train_df[TARGET] = train_df[TARGET]-1
#x_train = train_df['text']
y_train = torch.tensor(train_df[TARGET])#.long()
y_train
y_train[:5]
def to_numpy(x):
return x.cpu().detach().numpy()
test_dataset = TensorDataset(torch.tensor(x_test, dtype = torch.long)) #TensorDataset(X_valid, valid_length, torch.tensor(Y_valid))
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model = BertForSequenceClassification.from_pretrained("../job_nlp/working",cache_dir=None, num_labels=5)
%%time
best_epoch_list = []
best_val_acc_list = []
start_time = time()
n_splits = 5
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED).split(x_train, y_train))
for fold in [0, 1, 2, 3, 4]:
print("================ ༼ つ ◕_◕ ༽つ {}/{} fold training starts!".format(fold+1, n_splits))
fold_num = str(fold + 1)
trn_index, val_index = splits[fold]
X_train, X_valid = x_train[trn_index], x_train[val_index]
#train_length, valid_length = lengths[trn_index], lengths[val_index]
Y_train, Y_valid = y_train[trn_index], y_train[val_index]
train_dataset = TensorDataset(torch.tensor(X_train, dtype = torch.long), torch.tensor(Y_train, dtype=torch.long)) #TensorDataset(X_train, train_length, torch.tensor(Y_train))
valid_dataset = TensorDataset(torch.tensor(X_valid, dtype = torch.long), torch.tensor(Y_valid, dtype=torch.long)) #TensorDataset(X_valid, valid_length, torch.tensor(Y_valid))
model = BertForSequenceClassification.from_pretrained("../job_nlp/working",cache_dir=None, num_labels=5)
model.zero_grad()
model = model.to(device)
#optimizer = BertAdam(optimizer_grouped_parameters,
# lr=lr,
# warmup=0.05,
# t_total=num_train_optimization_steps)
#scheduler = StepLR(optimizer, step_size=5, gamma=0.5)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
#train = train_dataset
num_train_optimization_steps = int(EPOCHS*len(train_dataset)/batch_size/accumulation_steps)
#optimizer = BertAdam(optimizer_grouped_parameters,
# lr=lr,
# warmup=0.05,
# t_total=np.ceil(num_train_optimization_steps))
optimizer = AdamW(model.parameters(), lr, weight_decay=0.000025)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
best_valid_score = 0
best_val_acc = 0
#tq = tqdm_notebook(range(EPOCHS))
#model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0)
for epoch in range(1, EPOCHS + 1):
#start_time = time.time()
train_loss = 0
train_total_correct = 0
model.train()
optimizer.zero_grad()
#tk0 = tqdm_notebook(enumerate(train_loader),total=len(train_loader),leave=False)
for i, (x_batch, y_batch) in enumerate(train_loader):
preds = model(x_batch.to(device), attention_mask = (x_batch>0).to(device), labels=None)
loss = criterion(preds, y_batch.to(device))
loss.backward()
if (i+1) % accumulation_steps == 0: # Wait for several backward steps
optimizer.step() # Now we can do an optimizer step
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item()/len(train_loader)
# Validation Starts
model.eval()
val_loss = 0
valid_total_correct = 0
#valid_preds = np.zeros(len(valid_dataset),5)
#valid_targets = np.zeros(len(valid_dataset),5)
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(valid_loader):
#valid_targets[i*batch_size: (i+1)*batch_size] = y_batch.numpy().copy()
preds = model(x_batch.to(device), attention_mask = (x_batch>0).to(device), labels=None)
loss = criterion(preds, y_batch.to(device))
output_prob = F.softmax(preds, dim=1)
predict_vector = np.argmax(to_numpy(output_prob), axis=1)
label_vector = to_numpy(y_batch)
#valid_preds[i*batch_size: (i+1)*batch_size] = np.argmax(preds_prob.detach().cpu().squeeze().numpy())
bool_vector = predict_vector == label_vector
val_loss += loss.item()/len(valid_loader)
valid_total_correct += bool_vector.sum()
#val_score = roc_auc_score(valid_targets, valid_preds)
elapsed = time() - start_time
val_acc = valid_total_correct / len(valid_loader.dataset)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_epoch = epoch
print("val_acc has improved !! ")
best_epoch_list.append(best_epoch)
best_val_acc_list.append(best_val_acc)
torch.save(model.state_dict(), '../job_nlp/Bert_content_20e_maxseq400_fold_{}.pt'.format(fold))
#print("================ ༼ つ ◕_◕ ༽つ BEST epoch : {}, Accuracy : {} ".format(epoch, best_val_acc))
#lr = [_['lr'] for _ in optimizer.param_g] # or optimizer
print("================ ༼ つ ◕_◕ ༽つ Epoch {} - train_loss: {:.5f} val_loss: {:.5f} val_acc: {:.5f} elapsed: {:.0f}m {:.0f}s".format(epoch, train_loss, val_loss, best_val_acc, elapsed // 60, elapsed % 60))
print("============== ༼ つ ◕_◕ ༽つ BEST epoch : {}, Accuracy : {} ====================================".format(epoch, best_val_acc))
#best_epoch_list.append(best_epoch)
#best_val_acc_list.append(best_val_acc)
#---- Inference ----
#batch_size = 8
print("========================== ༼ つ ◕_◕ ༽つ Model Load {}_th FOLD =================================".format(fold))
model.load_state_dict(torch.load('Bert_content_20e_maxseq400_fold_{}.pt'.format(fold)))
model.eval()
predictions = np.zeros((len(test_loader.dataset),5))
with torch.no_grad():
for i, (x_batch, ) in enumerate(test_loader):
preds = model(x_batch.to(device), attention_mask = (x_batch>0).to(device), labels=None)
predictions[i*batch_size: (i+1)*batch_size] = to_numpy(preds)
print("predict values check : ",predictions[0])
np.savetxt("../job_nlp/bert_raw_submission/bert_content_20e_maxseq400_fold_{}.csv".format(fold), predictions, delimiter=",")
```
| github_jupyter |
# Standard Normal N(0,1)
Generate a total of 2000 i.i.d. standard normals N(0,1) using each method. Test the normality of the standard normals obtained from each method, using the Anderson-Darling test. Which data set is closer to the normal distribution? (Consult the paper by Stephens - filename 2008 Stephens.pdf on Canvas - to find the appropriate critical points for the Anderson-Darling statistic. Clearly identify those percentiles in your soultion.)
```
# imports
import random
import math
import numpy
import matplotlib.pyplot as plt
from scipy.stats import anderson
from mpl_toolkits.mplot3d import axes3d
%matplotlib notebook
# project imports
import rand
import halton
import bfs
import box_muller
import beasley_springer_moro
```
### Generate a total of 2000 i.i.d. standard normals N (0, 1) using Box Muller
```
# generate 1000 2-dim vectors, then flatten to create 2000 standard normals
N = 1000
s = 2
seq = rand.rand_seq
seq = halton.halton_seq
#seq = bfs.bfs_seq
l = box_muller.box_muller_seq(s, N, seq=seq)
# print the first 20
print(l[:10])
# flatten the sequence into 1 dimension
flattened = [item for sublist in l for item in sublist]
nums = flattened
print(nums[:20])
```
### Sort the sequence
```
nums = numpy.array(nums)
nums = sorted(nums)
print(nums[:20])
```
### Compute the sample mean and standard deviation
```
nums = numpy.array(nums)
mean = numpy.mean(nums)
var = numpy.var(nums)
std = numpy.std(nums)
print('mean = {}'.format(mean))
print('variance = {}'.format(var))
print('standard deviation = {}'.format(std))
# plot the histogram
plt.hist(nums, density=True, bins=30)
plt.ylabel('Standard Normal - Box Muller');
```
### Anderson Darling Test
reference:
https://en.wikipedia.org/wiki/Anderson%E2%80%93Darling_test#Test_for_normality
reference:
2008 Stephens.pdf pg. 4, "1.3 Modificatons for a test for normality, u, and sigma^2 unknown"
```
# normality test using scipy.stats
result = anderson(nums)
print('Statistic: %.3f' % result.statistic)
p = 0
for i in range(len(result.critical_values)):
sl, cv = result.significance_level[i], result.critical_values[i]
if result.statistic < result.critical_values[i]:
print('%.3f: %.3f, data looks normal (fail to reject H0)' % (sl, cv))
else:
print('%.3f: %.3f, data does not look normal (reject H0)' % (sl, cv))
```
### Generate a total of 2000 i.i.d. standard normals N (0, 1) using Beasley-Springer-Moro
```
N=2000
s=1
l = beasley_springer_moro.beasley_springer_moro_seq(s=s, N=N, seq=seq)
# print the first 20
print(l[:20])
# flatten the sequence into 1 dimension
flattened = [item for sublist in l for item in sublist]
nums = flattened
print(nums[:20])
```
### Sort the sequence
```
nums = numpy.array(nums)
nums = sorted(nums)
print(nums[:20])
```
### Compute the sample mean and standard deviation
```
nums = numpy.array(nums)
mean = numpy.mean(nums)
var = numpy.var(nums)
std = numpy.std(nums)
print('mean = {}'.format(mean))
print('variance = {}'.format(var))
print('standard deviation = {}'.format(std))
# plot the histogram
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(nums, density=True, bins=30)
ax.set_ylabel('Standard Normal - Beasley-Springer-Moro');
# normality test using scipy.stats
result = anderson(nums)
print('Statistic: %.3f' % result.statistic)
p = 0
for i in range(len(result.critical_values)):
sl, cv = result.significance_level[i], result.critical_values[i]
if result.statistic < result.critical_values[i]:
print('%.3f: %.3f, data looks normal (fail to reject H0)' % (sl, cv))
else:
print('%.3f: %.3f, data does not look normal (reject H0)' % (sl, cv))
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Post-training integer quantization with int16 activations
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_quant_16x8"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Overview
[TensorFlow Lite](https://www.tensorflow.org/lite/) now supports
converting activations to 16-bit integer values and weights to 8-bit integer values during model conversion from TensorFlow to TensorFlow Lite's flat buffer format. We refer to this mode as the "16x8 quantization mode". This mode can improve accuracy of the quantized model significantly, when activations are sensitive to the quantization, while still achieving almost 3-4x reduction in model size. Moreover, this fully quantized model can be consumed by integer-only hardware accelerators.
Some examples of models that benefit from this mode of the post-training quantization include:
* super-resolution,
* audio signal processing such
as noise cancelling and beamforming,
* image de-noising,
* HDR reconstruction
from a single image
In this tutorial, you train an MNIST model from scratch, check its accuracy in TensorFlow, and then convert the model into a Tensorflow Lite flatbuffer using this mode. At the end you check the accuracy of the converted model and compare it to the original float32 model. Note that this example demonstrates the usage of this mode and doesn't show benefits over other available quantization techniques in TensorFlow Lite.
## Build an MNIST model
### Setup
```
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
```
Check that the 16x8 quantization mode is available
```
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
```
### Train and export the model
```
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
```
For the example, you trained the model for just a single epoch, so it only trains to ~96% accuracy.
### Convert to a TensorFlow Lite model
Using the Python [TFLiteConverter](https://www.tensorflow.org/lite/convert/python_api), you can now convert the trained model into a TensorFlow Lite model.
Now, convert the model using `TFliteConverter` into default float32 format:
```
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
```
Write it out to a `.tflite` file:
```
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
```
To instead quantize the model to 16x8 quantization mode, first set the `optimizations` flag to use default optimizations. Then specify that 16x8 quantization mode is the required supported operation in the target specification:
```
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
```
As in the case of int8 post-training quantization, it is possible to produce a fully integer quantized model by setting converter options `inference_input(output)_type` to tf.int16.
Set the calibration data:
```
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter.representative_dataset = representative_data_gen
```
Finally, convert the model as usual. Note, by default the converted model will still use float input and outputs for invocation convenience.
```
tflite_16x8_model = converter.convert()
tflite_model_16x8_file = tflite_models_dir/"mnist_model_quant_16x8.tflite"
tflite_model_16x8_file.write_bytes(tflite_16x8_model)
```
Note how the resulting file is approximately `1/3` the size.
```
!ls -lh {tflite_models_dir}
```
## Run the TensorFlow Lite models
Run the TensorFlow Lite model using the Python TensorFlow Lite Interpreter.
### Load the model into the interpreters
```
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))
interpreter_16x8.allocate_tensors()
```
### Test the models on one image
```
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter_16x8.get_input_details()[0]["index"]
output_index = interpreter_16x8.get_output_details()[0]["index"]
interpreter_16x8.set_tensor(input_index, test_image)
interpreter_16x8.invoke()
predictions = interpreter_16x8.get_tensor(output_index)
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
```
### Evaluate the models
```
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
```
Repeat the evaluation on the 16x8 quantized model:
```
# NOTE: This quantization mode is an experimental post-training mode,
# it does not have any optimized kernels implementations or
# specialized machine learning hardware accelerators. Therefore,
# it could be slower than the float interpreter.
print(evaluate_model(interpreter_16x8))
```
In this example, you have quantized a model to 16x8 with no difference in the accuracy, but with the 3x reduced size.
| github_jupyter |
# Diagramas de Cortante e Momento em Vigas
Exemplo disponível em https://youtu.be/MNW1-rB46Ig
<img src="viga1.jpg">
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib import rc
# Set the font dictionaries (for plot title and axis titles)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'],'size': '18'})
rc('text', usetex=True)
q = 10
L = 1
N=10
# Reações de Apoio
VA=3*q*L/4
VB=q*L/4
print("Reação de Apoio em A (kN) =",VA)
print("Reação de Apoio em B (kN) =",VB)
```
Cálculo da Cortante pela integração do carregamento usando a Regra do Trapézio
```
def Cortante(q,x,V0):
# Entro com o carregamento, comprimento do trecho e cortante em x[0]
V = np.zeros(len(x)) # inicializa
dx=x[1] # passo
V[0]=V0 # Valor inicial da cortante
for i in range(1,N):
V[i]=V[i-1]+dx*(q[i-1]+q[i])/2
return np.array(V)
```
Cálculo do Momento Fletor pela integração do carregamento usando a Regra do Trapézio
```
def Momento(V,x,M0):
# Entro com a cortante, comprimento do trecho e momento em x[0]
M = np.zeros(len(x)) # inicializa
dx=x[1] # passo
M[0]=M0 # Valor inicial da cortante
for i in range(1,N):
M[i]=M0+M[i-1]+dx*(V[i-1]+V[i])/2
return np.array(M)
carregamento1 = q*np.ones(N)
carregamento2 =0*np.ones(N)
x1=np.linspace(0,L,N)
x2=np.linspace(L,2*L,N)
# Carregamento
plt.figure(figsize=(15,5))
plt.plot(x1,carregamento1,color='r',linewidth=2)
plt.fill_between(x1,carregamento1, facecolor='b', alpha=0.5)
plt.plot(x2,carregamento2,color='r',linewidth=2)
plt.fill_between(x2,carregamento2, facecolor='b', alpha=0.5)
plt.xlabel("Comprimento (m)")
plt.ylabel("Carregamento (kN/m)")
plt.grid(which='major', axis='both')
plt.title("Carregamento")
plt.show()
# Trecho I - 0<x<L
V1=-q*x1+VA # Cortante Teórica
M1=VA*x1-q*(x1*x1)/2 # Momento Teórico
# por integração numérica
V1int = Cortante(-carregamento1,x1,VA)
M1int = Momento(V1int,x1,0)
# Trecho II - L<x<2L
V2=VA-q*np.ones(N)*L # Cortante Teórico
M2=VA*x2-q*L*(x2-L/2) # Momento Teórico
# por integração numérica
V2int=Cortante(-carregamento2,x2,V1int[N-1])
M2int=Momento(V2int,x2,M1int[N-1])
# Cortante
plt.figure(figsize=(15,5))
plt.plot(x1,V1,color='r',linewidth=2)
plt.fill_between(x1, V1, facecolor='b', alpha=0.5)
plt.plot(x2,V2,color='r',linewidth=2,label="Método das Seções")
plt.fill_between(x2, V2, facecolor='b', alpha=0.5)
plt.plot(x1,V1int,color='k',linestyle = 'dotted', linewidth=5,label="Integração")
plt.plot(x2,V2int,color='k',linestyle = 'dotted', linewidth=5)
plt.legend(loc ="upper right")
plt.xlabel("Comprimento (m)")
plt.ylabel("Cortante (kN)")
plt.grid(which='major', axis='both')
plt.title("Diagrama de Cortante")
plt.show()
# Momento Fletor
plt.figure(figsize=(15,5))
plt.plot(x1,M1,color='r',linewidth=2)
plt.fill_between(x1, M1, facecolor='b', alpha=0.5)
plt.plot(x2,M2,color='r',linewidth=2,label="Método das Seções")
plt.fill_between(x2, M2, facecolor='b', alpha=0.5)
plt.plot(x1,M1int,color='k',linestyle = 'dotted', linewidth=5,label="Integração")
plt.plot(x2,M2int,color='k',linestyle = 'dotted', linewidth=5)
plt.legend(loc ="upper right")
plt.xlabel("Comprimento (m)")
plt.ylabel("Momento (kN.m)")
plt.grid(which='major', axis='both')
plt.title("Diagrama de Momento Fletor")
plt.show()
```
| github_jupyter |
<img src="../images/Boeing_full_logo.png" alt="Boeing" style="width: 400px;"/>
<br/>
<img src="../images/aeropython_logo.png" alt="AeroPython" style="width: 200px;"/>
# NumPy: Entrada/Salida
Con E/S (I/O en inglés) entendemos leer y escribir datos archivos. Es algo que necesitaremos hacer con relativa frecuencia, y en NumPy es muy sencillo de hacer. Para el caso de la **lectura** se usa la función `np.loadtxt`.
## Ejemplo con datos de temperaturas
Para practicar, vamos a leer el archivo `temperaturas.csv` que contiene datos diarios de temperaturas en Nueva York entre el 1 de enero de 2013 y el 1 de enero de 2014, obtenidos gratuitamente de http://ncdc.noaa.gov/. Como los hemos descargado en formato CSV habrá que tener algunas precauciones a la hora de leer el archivo.
```
!head ../data/temperaturas.csv
import numpy as np
datos = np.loadtxt("../data/temperaturas.csv",
skiprows=1, # Saltamos una línea
usecols=(1, 2, 3), # Solo columnas 2, 3 y 4
delimiter=',') # Separados por comas
datos[:9]
```
La primera columna es un entero con formato "AAAAMMDD" que vamos a ignorar. Las temperaturas están medidas en décimas de grado Celsius, así que hay que pasarlas a grados Celsius. Vamos a calcular también la temperatura media.
```
Tmax = datos[:, 1] / 10
Tmin = datos[:, 2] / 10
Tavg = (Tmax + Tmin) / 2
```
Como vamos a ignorar la columna de las fechas tenemos que crear un dominio para el eje x. Simplemente construiremos un array de enteros desde 0 hasta 365.
```
x = np.arange(366)
```
Supongamos que ahora queremos guardar nuestra tabla de datos en un archivo txt, para poder cargarlo ya modificado más adelante. Una manera fácil de hacerlo sería con otra función de NumPy: `np.savetxt`. Lo usaremos con los argumentos opcionales `fmt='%.5f', newline = '\r\n'` para obtener un fichero *bonito* que podamos entender de un vistazo.
```
matriz_datos = np.zeros([366, 4])
matriz_datos[:, 0] = x
matriz_datos[:, 1] = Tmax
matriz_datos[:, 2] = Tmin
matriz_datos[:, 3] = Tavg
print(matriz_datos[:10])
# np.savetxt('archivo_datos.txt', matriz_datos, fmt='%.5f', newline = '\r\n')
```
##### Ejercicio
Y ahora representamos la evolución de la temperatura media (por ejemplo de color negro), indicando "Daily summaries" en el título, "Days" en el eje x y "Temperature (C)" en el eje y, usando la interfaz orientada a objetos de matplotlib (función `plt.subplots`). Podemos crear una zona rellena entre la máxima y la mínima con la función `fill_between(x, max, min)` (por ejemplo de color #4f88b1). Si los límites del eje x no quedan como queremos podemos usar la función `set_xlim(xmin, xmax)`.
```
# aeropython: preserve
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(x, Tavg, 'k')
ax.set_xlim(0, 366)
ax.fill_between(x, Tmin, Tmax, facecolor='#4f88b1', edgecolor='none')
ax.set_title("Resúmenes diarios")
ax.set_xlabel("Días")
ax.set_ylabel("Temperatura (°C)")
```
---
_Ya hemos aprendido a efectuar algunas operaciones útiles con NumPy e incluso hemos hecho nuestro primer ejercicio de lectura de datos. Estamos en condiciones de empezar a escribir programas más interesantes, pero aún queda lo mejor._
Si quieres saber más sobre lectura y escritura de ficheros en Python, puedes consultar al documentación oficial:
https://docs.python.org/3.5/tutorial/inputoutput.html#reading-and-writing-files
---
<br/>
#### <h4 align="right">¡Síguenos en Twitter!
<br/>
###### <a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
<br/>
###### Este notebook ha sido realizado por: Juan Luis Cano, Mabel Delgado y Álex Sáez
<br/>
##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
---
_Las siguientes celdas contienen configuración del Notebook_
_Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_
File > Trusted Notebook
```
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../styles/aeropython.css'
HTML(open(css_file, "r").read())
```
| github_jupyter |
# EventVestor: Shareholder Meetings
In this notebook, we'll take a look at EventVestor's *Shareholder Meetings* dataset, available on the [Quantopian Store](https://www.quantopian.com/store). This dataset spans January 01, 2007 through the current day, and documents companies' annual and special shareholder meetings calendars.
### Blaze
Before we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as [Blaze](http://blaze.pydata.org). Blaze provides the Quantopian user with a convenient interface to access very large datasets.
Blaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side.
It is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization.
Helpful links:
* [Query building for Blaze](http://blaze.pydata.org/en/latest/queries.html)
* [Pandas-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-pandas.html)
* [SQL-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-sql.html).
Once you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using:
> `from odo import odo`
> `odo(expr, pandas.DataFrame)`
### Free samples and limits
One other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze.
There is a *free* version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day.
With preamble in place, let's get started:
```
# import the dataset
from quantopian.interactive.data.eventvestor import shareholder_meetings
# or if you want to import the free dataset, use:
# from quantopian.data.eventvestor import shareholder_meetings_free
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
shareholder_meetings.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
shareholder_meetings.count()
# Let's see what the data looks like. We'll grab the first three rows.
shareholder_meetings[:3]
```
Let's go over the columns:
- **event_id**: the unique identifier for this event.
- **asof_date**: EventVestor's timestamp of event capture.
- **symbol**: stock ticker symbol of the affected company.
- **event_headline**: a brief description of the event
- **meeting_type**: types include *annual meeting, special meeting, proxy contest*.
- **record_date**: record date to be eligible for proxy vote
- **meeting_date**: shareholder meeting date
- **timestamp**: this is our timestamp on when we registered the data.
- **sid**: the equity's unique identifier. Use this instead of the symbol.
We've done much of the data processing for you. Fields like `timestamp` and `sid` are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the `sid` across all our equity databases.
We can select columns and rows with ease. Below, we'll fetch Tesla's 2013 and 2014 meetings.
```
# get tesla's sid first
tesla_sid = symbols('TSLA').sid
meetings = shareholder_meetings[('2012-12-31' < shareholder_meetings['asof_date']) &
(shareholder_meetings['asof_date'] <'2015-01-01') &
(shareholder_meetings.sid == tesla_sid)]
# When displaying a Blaze Data Object, the printout is automatically truncated to ten rows.
meetings.sort('asof_date')
```
Now suppose we want a DataFrame of the Blaze Data Object above, but only want the `record_date, meeting_date`, and `sid`.
```
df = odo(meetings, pd.DataFrame)
df = df[['record_date','meeting_date','sid']]
df
```
| github_jupyter |
# Stack Overflow Survey 2019 Analysis
## Business Understanding
I am interested in finding the answers to the following questions related to career satisfaction.
- Are Developers satisfied with thier career?
- Who are the most satisfied developers?
- Is there is a significant relationship between compensation and career satisfaction?
```
#Imports
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
```
## Data Understanding
```
data = pd.read_csv('.\data\survey_results_public.csv')
data.shape
data.head()
data.columns
```
## Q1: Are developers satisfied with thier career?
### Prepare Data
Our focus is on career satisfaction of full-time developers (people who writes code), we'll filter the data accordingly and remove all na values in career satisfaction column in order to get more accurate results.
```
careerSat_data = data[(~ data['CareerSat'].isna())
& (data['Employment'] == 'Employed full-time')
& (data['MainBranch'] == 'I am a developer by profession')]
careerSat_data.shape
```
### Model Data
```
fig, ax = plt.subplots(1,1,figsize =(15,6))
ax.title.set_text('Career Satisfaction')
sns.countplot(careerSat_data['CareerSat'] ,order=['Very satisfied',
'Slightly satisfied',
'Neither satisfied nor dissatisfied',
'Slightly dissatisfied',
'Very dissatisfied'], ax = ax)
satisfaied_devs_perc = careerSat_data[careerSat_data['CareerSat'].isin(
['Very satisfied', 'Slightly satisfied'])].shape[0] / careerSat_data.shape[0] * 100
satisfaied_devs_perc
```
### Result
From the above plot we can till that most developers are satisfied with career path, to validate the result we calculated the percentage of satisfied developers over the whole sample, thus we can say that most developers are satisfied with thier career.
## Q2: who are the most satisfied ones? (job titles)
### Prepare Data
We will encode the values into 0 1 encoding in order to be able to calculate the satisfaction mean. For this reason we need to deal with NA values first. The devType column can have multiple combinations of devTypes which means if we impute it, more than one hot encoded column could be affected. Thus, we would rather drop them and then convert it to multiple ont hot encoded columns.
```
jobs_data = careerSat_data[~ careerSat_data['DevType'].isna()].copy()
DevTyps = ['Academic researcher', 'Data or business analyst',
'Data scientist or machine learning specialist','Database administrator',
'Designer', 'Developer, back-end', 'Developer, desktop or enterprise applications',
'Developer, embedded applications or devices', 'Developer, front-end',
'Developer, full-stack', 'Developer, game or graphics', 'Developer, mobile',
'Developer, QA or test','DevOps specialist','Educator', 'Engineer, data',
'Engineer, site reliability','Engineering manager', 'Marketing or sales professional',
'Product manager', 'Scientist', 'Senior executive/VP', 'Student', 'System administrator']
for devType in DevTyps:
jobs_data[devType]=0
for i, row in jobs_data.iterrows():
for value in row['DevType'].split(';'):
if value != 'Other':
jobs_data.loc[i, value] = 1
satisfaction_dict = {'Very satisfied':2,
'Slightly satisfied':1,
'Neither satisfied nor dissatisfied':0,
'Slightly dissatisfied':-1,
'Very dissatisfied':-2}
jobs_data['CareerSat']= jobs_data['CareerSat'].apply(lambda x : satisfaction_dict[x])
```
### Model Data
```
def get_careerSat_mean(df):
"""
Returns the mean of career satisfaction in the recieved dataset.
Args:
df: pandas dataframe that contains CareerSat column.
Returns:
Career satisfaction mean.
"""
return df['CareerSat'].mean()
sat_list = pd.Series()
for devType in DevTyps:
sat_list[devType] = get_careerSat_mean(jobs_data[jobs_data[devType] == 1])
sat_list = sat_list.sort_values(ascending = False )
plt.subplots(1,1,figsize =(15,3))
sat_list.plot.bar()
#Top 3
sat_list.sort_values(ascending=False)[:3]
```
### Result
By calculating the satisfaction mean, we find that most of the people working excutive and managerial levels are satisfied with thier career.
### Q3: Is compensation is the reason of career satisfaction?
### Prepare Data
First we'll unify compensation frequency. And since the number of NA values is small we'll drop them to make sure we don't lose the integrity of the data.
```
jobs_data = jobs_data[(~jobs_data['CompFreq'].isna()) & (~jobs_data['CompTotal'].isna())]
for i, row in jobs_data.iterrows():
if row['CompFreq'] == "Yearly":
jobs_data.loc[i, 'MonthlyCompTotal'] = row['CompTotal']/12
elif row['CompFreq'] == "Weekly":
jobs_data.loc[i, 'MonthlyCompTotal'] = row['CompTotal']* 4
else :
jobs_data.loc[i, 'MonthlyCompTotal'] = row['CompTotal']
```
### Model Data
Null Hypothesis: There is no relationship between career satisfaction and compensation.
Alternative Hypothesis: There is a high correlation between career satisfaction and compensation.
In order to test the hypothesis, we will extracted 2 subsets of the data based on monthly compensation values, one for those with low compensation(less than 75% of the population) and the other one for the developers with high compensation (above than 75% of the developers).
```
q1 = jobs_data['MonthlyCompTotal'].quantile(0.25)
q3 = jobs_data['MonthlyCompTotal'].quantile(0.75)
low_comp = jobs_data[jobs_data['MonthlyCompTotal']< q1]
high_comp = jobs_data[jobs_data['MonthlyCompTotal'] > q3]
# Calculate the observed difference on average Career Satisfaction rating between the two subsets
s1_mean = get_careerSat_mean(low_comp)
s2_mean = get_careerSat_mean(high_comp)
obs_diff = s2_mean - s1_mean
obs_diff
# Bootstrapping (simulating null distibution)
rand_diffs = []
size = 5000
for i in range(10000):
sample1 = jobs_data.sample(size)
sample2 = jobs_data.sample(size)
sample1_mean = get_careerSat_mean(sample1)
sample2_mean = get_careerSat_mean(sample2)
rand_diffs.append(sample1_mean - sample2_mean)
rand_diffs = np.array(rand_diffs)
rand_diffs.mean()
# Plot the null distibution
plt.hist(rand_diffs, bins=50)
plt.axvline(obs_diff, c='red')
plt.title('Null distibution with the observed mean')
# p-value
(rand_diffs > obs_diff).mean()
```
### Result
Since the observed difference was significatly larger than random and since to the calculated p value is smaller than 0.05, we can say that compensation is significatly related to career satisfaction.
| github_jupyter |
```
import mxnet as mx
import numpy as np
import random
import bisect
# set up logging
import logging
reload(logging)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')
```
# A Glance of LSTM structure and embedding layer
We will build a LSTM network to learn from char only. At each time, input is a char. We will see this LSTM is able to learn words and grammers from sequence of chars.
The following figure is showing an unrolled LSTM network, and how we generate embedding of a char. The one-hot to embedding operation is a special case of fully connected network.
<img src="http://data.dmlc.ml/mxnet/data/char-rnn_1.png">
<img src="http://data.dmlc.ml/mxnet/data/char-rnn_2.png">
```
from lstm import lstm_unroll, lstm_inference_symbol
from bucket_io import BucketSentenceIter
from rnn_model import LSTMInferenceModel
# Read from doc
def read_content(path):
with open(path) as ins:
content = ins.read()
return content
# Build a vocabulary of what char we have in the content
def build_vocab(path):
content = read_content(path)
content = list(content)
idx = 1 # 0 is left for zero-padding
the_vocab = {}
for word in content:
if len(word) == 0:
continue
if not word in the_vocab:
the_vocab[word] = idx
idx += 1
return the_vocab
# We will assign each char with a special numerical id
def text2id(sentence, the_vocab):
words = list(sentence)
words = [the_vocab[w] for w in words if len(w) > 0]
return words
# Evaluation
def Perplexity(label, pred):
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
```
# Get Data
```
import os
data_url = "http://data.dmlc.ml/mxnet/data/lab_data.zip"
os.system("wget %s" % data_url)
os.system("unzip -o lab_data.zip")
```
Sample training data:
```
all to Renewal Keynote Address Call to Renewal Pt 1Call to Renewal Part 2 TOPIC: Our Past, Our Future & Vision for America June
28, 2006 Call to Renewal' Keynote Address Complete Text Good morning. I appreciate the opportunity to speak here at the Call to R
enewal's Building a Covenant for a New America conference. I've had the opportunity to take a look at your Covenant for a New Ame
rica. It is filled with outstanding policies and prescriptions for much of what ails this country. So I'd like to congratulate yo
u all on the thoughtful presentations you've given so far about poverty and justice in America, and for putting fire under the fe
et of the political leadership here in Washington.But today I'd like to talk about the connection between religion and politics a
nd perhaps offer some thoughts about how we can sort through some of the often bitter arguments that we've been seeing over the l
ast several years.I do so because, as you all know, we can affirm the importance of poverty in the Bible; and we can raise up and
pass out this Covenant for a New America. We can talk to the press, and we can discuss the religious call to address poverty and
environmental stewardship all we want, but it won't have an impact unless we tackle head-on the mutual suspicion that sometimes
```
# LSTM Hyperparameters
```
# The batch size for training
batch_size = 32
# We can support various length input
# For this problem, we cut each input sentence to length of 129
# So we only need fix length bucket
buckets = [129]
# hidden unit in LSTM cell
num_hidden = 512
# embedding dimension, which is, map a char to a 256 dim vector
num_embed = 256
# number of lstm layer
num_lstm_layer = 3
# we will show a quick demo in 2 epoch
# and we will see result by training 75 epoch
num_epoch = 2
# learning rate
learning_rate = 0.01
# we will use pure sgd without momentum
momentum = 0.0
# we can select multi-gpu for training
# for this demo we only use one
devs = [mx.context.gpu(i) for i in range(1)]
# build char vocabluary from input
vocab = build_vocab("./obama.txt")
# generate symbol for a length
def sym_gen(seq_len):
return lstm_unroll(num_lstm_layer, seq_len, len(vocab) + 1,
num_hidden=num_hidden, num_embed=num_embed,
num_label=len(vocab) + 1, dropout=0.2)
# initalize states for LSTM
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
# we can build an iterator for text
data_train = BucketSentenceIter("./obama.txt", vocab, buckets, batch_size,
init_states, seperate_char='\n',
text2id=text2id, read_content=read_content)
# the network symbol
symbol = sym_gen(buckets[0])
```
# Train model
```
# Train a LSTM network as simple as feedforward network
model = mx.model.FeedForward(ctx=devs,
symbol=symbol,
num_epoch=num_epoch,
learning_rate=learning_rate,
momentum=momentum,
wd=0.0001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
# Fit it
model.fit(X=data_train,
eval_metric = mx.metric.np(Perplexity),
batch_end_callback=mx.callback.Speedometer(batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint("obama"))
```
# Inference from model
```
# helper strcuture for prediction
def MakeRevertVocab(vocab):
dic = {}
for k, v in vocab.items():
dic[v] = k
return dic
# make input from char
def MakeInput(char, vocab, arr):
idx = vocab[char]
tmp = np.zeros((1,))
tmp[0] = idx
arr[:] = tmp
# helper function for random sample
def _cdf(weights):
total = sum(weights)
result = []
cumsum = 0
for w in weights:
cumsum += w
result.append(cumsum / total)
return result
def _choice(population, weights):
assert len(population) == len(weights)
cdf_vals = _cdf(weights)
x = random.random()
idx = bisect.bisect(cdf_vals, x)
return population[idx]
# we can use random output or fixed output by choosing largest probability
def MakeOutput(prob, vocab, sample=False, temperature=1.):
if sample == False:
idx = np.argmax(prob, axis=1)[0]
else:
fix_dict = [""] + [vocab[i] for i in range(1, len(vocab) + 1)]
scale_prob = np.clip(prob, 1e-6, 1 - 1e-6)
rescale = np.exp(np.log(scale_prob) / temperature)
rescale[:] /= rescale.sum()
return _choice(fix_dict, rescale[0, :])
try:
char = vocab[idx]
except:
char = ''
return char
# load from check-point
_, arg_params, __ = mx.model.load_checkpoint("obama", 75)
# build an inference model
model = LSTMInferenceModel(num_lstm_layer, len(vocab) + 1,
num_hidden=num_hidden, num_embed=num_embed,
num_label=len(vocab) + 1, arg_params=arg_params, ctx=mx.gpu(), dropout=0.2)
# generate a sequence of 1200 chars
seq_length = 1200
input_ndarray = mx.nd.zeros((1,))
revert_vocab = MakeRevertVocab(vocab)
# Feel free to change the starter sentence
output ='The joke'
random_sample = True
new_sentence = True
ignore_length = len(output)
for i in range(seq_length):
if i <= ignore_length - 1:
MakeInput(output[i], vocab, input_ndarray)
else:
MakeInput(output[-1], vocab, input_ndarray)
prob = model.forward(input_ndarray, new_sentence)
new_sentence = False
next_char = MakeOutput(prob, revert_vocab, random_sample)
if next_char == '':
new_sentence = True
if i >= ignore_length - 1:
output += next_char
# Let's see what we can learned from char in Obama's speech.
print(output)
```
| github_jupyter |
# Further Pre-training MobileBERT MLM with Client-side Adam (Shakepeare)
```
# Copyright 2020, The TensorFlow Federated Authors.
# Copyright 2020, Ronald Seoh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
### Google Colab settings
```
# Use Google Colab
use_colab = True
# Is this notebook running on Colab?
# If so, then google.colab package (github.com/googlecolab/colabtools)
# should be available in this environment
# Previous version used importlib, but we could do the same thing with
# just attempting to import google.colab
try:
from google.colab import drive
colab_available = True
except:
colab_available = False
if use_colab and colab_available:
# Mount Google Drive root directory
drive.mount('/content/drive')
# cd to the appropriate working directory under my Google Drive
%cd '/content/drive/My Drive/Colab Notebooks/BERTerated'
# List the directory contents
!ls
```
### CUDA Multi GPU
```
# Use this code snippet to use specific GPUs
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="1,2,3"
# IPython reloading magic
%load_ext autoreload
%autoreload 2
# Install required packages
# !pip install -r requirements.txt
```
## Import packages
```
import tensorflow as tf
tf_physical_devices_gpu = tf.config.list_physical_devices('GPU')
# Allow the growth of GPU memory consumption to take place incrementally
if tf_physical_devices_gpu:
for gpu in tf_physical_devices_gpu:
tf.config.experimental.set_memory_growth(gpu, True)
import os
import sys
import random
import datetime
import json
import pathlib
import itertools
import time
import numpy as np
import tensorflow_federated as tff
import tensorflow_text as tf_text
import tensorflow_addons as tfa
import transformers
import nest_asyncio
nest_asyncio.apply()
import fedavg
import fedavg_client
import datasets
import utils
# Random seed settings
random_seed = 692
random.seed(random_seed) # Python
np.random.seed(random_seed) # NumPy
tf.random.set_seed(random_seed) # TensorFlow
# Test if TFF is working
tff.federated_computation(lambda: 'Hello, World!')()
# Print version information
print("Python version: " + sys.version)
print("NumPy version: " + np.__version__)
print("TensorFlow version: " + tf.__version__)
print("TensorFlow Federated version: " + tff.__version__)
print("Transformers version: " + transformers.__version__)
!nvidia-smi
tf_logical_devices_cpu = tf.config.list_logical_devices('CPU')
tf_logical_devices_gpu = tf.config.list_logical_devices('GPU')
```
## Experiment Settings
```
EXPERIMENT_CONFIG = {}
EXPERIMENT_CONFIG['HUGGINGFACE_MODEL_NAME'] = 'google/mobilebert-uncased'
EXPERIMENT_CONFIG['HUGGINGFACE_CACHE_DIR'] = os.path.join('.', 'transformers_cache')
EXPERIMENT_CONFIG['TOTAL_ROUNDS'] = 50 # Number of total training rounds
EXPERIMENT_CONFIG['ROUNDS_PER_EVAL'] = 1 # How often to evaluate
EXPERIMENT_CONFIG['TRAIN_CLIENTS_PER_ROUND'] = 10 # How many clients to sample per round.
EXPERIMENT_CONFIG['CLIENT_EPOCHS_PER_ROUND'] = 3
EXPERIMENT_CONFIG['BATCH_SIZE'] = 8 # Batch size used on the client.
EXPERIMENT_CONFIG['TEST_BATCH_SIZE'] = 16 # Minibatch size of test data.
# Maximum length of input token sequence for BERT.
EXPERIMENT_CONFIG['BERT_MAX_SEQ_LENGTH'] = 128
# Optimizer configuration
EXPERIMENT_CONFIG['SERVER_LEARNING_RATE'] = 1.0 # Server learning rate.
EXPERIMENT_CONFIG['CLIENT_LEARNING_RATE'] = 5e-5 # Client learning rate
# Client dataset setting
EXPERIMENT_CONFIG['TRAIN_NUM_CLIENT_LIMIT'] = -1
EXPERIMENT_CONFIG['TEST_NUM_CLIENT_LIMIT'] = -1
# Path to save trained weights and logs
EXPERIMENT_CONFIG['RESULTS_DIRECTORY'] = os.path.join(
'.', 'results',
'mobilebert_mlm_shakespeare_fedadam',
datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
)
EXPERIMENT_CONFIG['RESULTS_LOG'] = os.path.join(EXPERIMENT_CONFIG['RESULTS_DIRECTORY'], "logs")
EXPERIMENT_CONFIG['RESULTS_MODEL'] = os.path.join(EXPERIMENT_CONFIG['RESULTS_DIRECTORY'], "model")
EXPERIMENT_CONFIG['RESULTS_CONFIG'] = os.path.join(EXPERIMENT_CONFIG['RESULTS_DIRECTORY'], "config")
# Dump all the configuration into a json file
pathlib.Path(EXPERIMENT_CONFIG['RESULTS_CONFIG']).mkdir(parents=True, exist_ok=True)
with open(os.path.join(EXPERIMENT_CONFIG['RESULTS_CONFIG'], "config.json"), 'w') as config_file:
json.dump(EXPERIMENT_CONFIG, config_file, indent=6)
# TFF executor factory settings
# Reference: https://www.tensorflow.org/federated/api_docs/python/tff/backends/native/set_local_execution_context
tff.backends.native.set_local_execution_context(
num_clients=EXPERIMENT_CONFIG['TRAIN_CLIENTS_PER_ROUND'],
max_fanout=100,
clients_per_thread=1,
server_tf_device=tf_logical_devices_cpu[0],
client_tf_devices=tf_logical_devices_cpu,
)
```
## Dataset
### Dataset loader
```
train_client_data, test_client_data = tff.simulation.datasets.shakespeare.load_data(cache_dir='./tff_cache')
```
### Tokenizer
```
bert_tokenizer = transformers.AutoTokenizer.from_pretrained(
EXPERIMENT_CONFIG['HUGGINGFACE_MODEL_NAME'], cache_dir=EXPERIMENT_CONFIG['HUGGINGFACE_CACHE_DIR'])
# Imitate transformers tokenizer with TF.Text Tokenizer
tokenizer_tf_text, vocab_lookup_table, special_ids_mask_table = \
datasets.preprocessing_for_bert.convert_huggingface_tokenizer(bert_tokenizer)
```
### Preprocessing
```
def check_empty_snippet(x):
return tf.strings.length(x['snippets']) > 0
def tokenizer_and_mask_wrapped(x):
masked, labels = datasets.preprocessing_for_bert.tokenize_and_mask(tf.reshape(x['snippets'], shape=[1]),
max_seq_length=EXPERIMENT_CONFIG['BERT_MAX_SEQ_LENGTH'],
bert_tokenizer_tf_text=tokenizer_tf_text,
vocab_lookup_table=vocab_lookup_table,
special_ids_mask_table=special_ids_mask_table,
cls_token_id=bert_tokenizer.cls_token_id,
sep_token_id=bert_tokenizer.sep_token_id,
pad_token_id=bert_tokenizer.pad_token_id,
mask_token_id=bert_tokenizer.mask_token_id)
return (masked, labels)
def preprocess_for_train(train_dataset):
return (
train_dataset
# Tokenize each samples using MobileBERT tokenizer
#.map(tokenizer_and_mask_wrapped, num_parallel_calls=tf.data.experimental.AUTOTUNE, deterministic=False)
.map(tokenizer_and_mask_wrapped, num_parallel_calls=24, deterministic=False)
# Shuffle
.shuffle(100000)
# Form minibatches
# Use drop_remainder=True to force the batch size to be exactly BATCH_SIZE
# and make the shape **exactly** (BATCH_SIZE, SEQ_LENGTH)
.batch(EXPERIMENT_CONFIG['BATCH_SIZE'])#, drop_remainder=True)
# Repeat to make each client train multiple epochs
.repeat(count=EXPERIMENT_CONFIG['CLIENT_EPOCHS_PER_ROUND'])
)
def preprocess_for_test(test_dataset):
return (
test_dataset
# Tokenize each samples using MobileBERT tokenizer
#.map(tokenizer_and_mask_wrapped, num_parallel_calls=tf.data.experimental.AUTOTUNE, deterministic=False)
.map(tokenizer_and_mask_wrapped, num_parallel_calls=24, deterministic=False)
# Shuffle
.shuffle(100000)
# Form minibatches
# Use drop_remainder=True to force the batch size to be exactly TEST_BATCH_SIZE
# and make the shape **exactly** (TEST_BATCH_SIZE, SEQ_LENGTH)
.batch(EXPERIMENT_CONFIG['TEST_BATCH_SIZE'])
)
```
### Training set
```
# Since the dataset is pretty large, we randomly select TRAIN_NUM_CLIENT_LIMIT number of clients.
all_train_client_ids = train_client_data.client_ids
random.shuffle(all_train_client_ids)
if EXPERIMENT_CONFIG['TRAIN_NUM_CLIENT_LIMIT'] > 0:
selected_train_client_ids = all_train_client_ids[0:EXPERIMENT_CONFIG['TRAIN_NUM_CLIENT_LIMIT']]
else:
selected_train_client_ids = all_train_client_ids
train_client_data = train_client_data.preprocess(preprocess_fn=lambda x: x.filter(check_empty_snippet))
train_client_data = train_client_data.preprocess(preprocess_fn=preprocess_for_train)
print(train_client_data.element_type_structure)
train_client_states = {}
# Initialize client states for all clients (selected for the entire simulation)
for i, client_id in enumerate(selected_train_client_ids):
train_client_states[client_id] = fedavg_client.ClientState(
client_serial=i, num_processed=0, optimizer_options=utils.OptimizerOptions())
```
### Test set
```
test_client_data_all_merged = test_client_data.create_tf_dataset_for_client(
test_client_data.client_ids[0]).filter(check_empty_snippet)
if len(test_client_data.client_ids) > 1:
for i in range(1, len(test_client_data.client_ids)):
test_client_data_all_merged = test_client_data_all_merged.concatenate(
test_client_data.create_tf_dataset_for_client(test_client_data.client_ids[i]).filter(check_empty_snippet))
test_client_data_all_merged = preprocess_for_test(test_client_data_all_merged)
test_client_data_all_merged = test_client_data_all_merged.cache()
print(test_client_data_all_merged.element_spec)
```
## Model
```
bert_model = transformers.TFAutoModelForPreTraining.from_pretrained(
EXPERIMENT_CONFIG['HUGGINGFACE_MODEL_NAME'], cache_dir=EXPERIMENT_CONFIG['HUGGINGFACE_CACHE_DIR'])
print(bert_model.config)
# Due to the limitations with Keras subclasses, we can only use the main layer part from pretrained models
# and add output heads by ourselves
bert_keras_converted = utils.convert_huggingface_mlm_to_keras(
huggingface_model=bert_model,
max_seq_length=EXPERIMENT_CONFIG['BERT_MAX_SEQ_LENGTH'],
)
# Use lists of NumPy arrays to backup pretained weights
bert_pretrained_trainable_weights = []
bert_pretrained_non_trainable_weights = []
for w in bert_keras_converted.trainable_weights:
bert_pretrained_trainable_weights.append(w.numpy())
for w in bert_keras_converted.non_trainable_weights:
bert_pretrained_non_trainable_weights.append(w.numpy())
def tff_model_fn():
"""Constructs a fully initialized model for use in federated averaging."""
loss = utils.MaskedLMCrossEntropy()
model_wrapped = utils.KerasModelWrapper(
tf.keras.models.clone_model(bert_keras_converted),
train_client_data.element_type_structure, loss)
return model_wrapped
```
## Training
### Training setups
```
summary_writer = tf.summary.create_file_writer(EXPERIMENT_CONFIG['RESULTS_LOG'])
def server_optimizer_fn():
return tf.keras.optimizers.SGD(learning_rate=EXPERIMENT_CONFIG['SERVER_LEARNING_RATE'])
def client_optimizer_fn():
return transformers.AdamWeightDecay(
learning_rate=EXPERIMENT_CONFIG['CLIENT_LEARNING_RATE'],
weight_decay_rate=0.01,
)
%%time
iterative_process = fedavg.build_federated_averaging_process(
model_fn=tff_model_fn,
model_input_spec=train_client_data.element_type_structure,
initial_trainable_weights=bert_pretrained_trainable_weights,
initial_non_trainable_weights=bert_pretrained_non_trainable_weights,
server_optimizer_fn=server_optimizer_fn,
client_optimizer_fn=client_optimizer_fn)
%%time
server_state = iterative_process.initialize()
metric_eval = tfa.metrics.MeanMetricWrapper(fn=utils.calculate_masked_lm_cross_entropy, name='ce')
# The model for calculating validation loss only
# (This happens outside FedAvg)
model_final = utils.KerasModelWrapper(
tf.keras.models.clone_model(bert_keras_converted),
train_client_data.element_type_structure,
utils.MaskedLMCrossEntropy(),
tf_device_identifier="/GPU:0")
```
### Training loop
```
%%time
with summary_writer.as_default():
for round_num in range(1, EXPERIMENT_CONFIG['TOTAL_ROUNDS'] + 1):
# FedAvg
print(f'Round {round_num} start!')
# Training client selection
sampled_client_serials = np.random.choice(
len(selected_train_client_ids),
size=EXPERIMENT_CONFIG['TRAIN_CLIENTS_PER_ROUND'],
replace=False)
sampled_train_data = [
train_client_data.create_tf_dataset_for_client(selected_train_client_ids[client_serial])
for client_serial in sampled_client_serials
]
sampled_client_states = [
train_client_states[selected_train_client_ids[client_serial]]
for client_serial in sampled_client_serials
]
print("Selected client serials:", sampled_client_serials)
current_round_start_time = time.time()
server_state, new_client_states, train_loss = iterative_process.next(
server_state, sampled_client_states, sampled_train_data)
current_round_end_time = time.time()
currnt_round_running_time = current_round_end_time - current_round_start_time
print(f'Round {round_num} training loss: {train_loss}')
print(f'Round {round_num} execution time: {currnt_round_running_time}')
# Record the current round's training loss to the log
tf.summary.scalar('train_loss', train_loss, step=round_num)
tf.summary.scalar('train_running_time', currnt_round_running_time, step=round_num)
print()
# Update client states
print("Updating client states.")
for state in new_client_states:
train_client_states[selected_train_client_ids[state.client_serial]] = state
print()
print("Recording client statistics:")
for client_id in selected_train_client_ids:
state = train_client_states[client_id]
tf.summary.scalar(
'client_' + str(int(state.client_serial)) + '_num_processed',
int(state.num_processed), step=round_num)
print()
# Evaluation
if round_num % EXPERIMENT_CONFIG['ROUNDS_PER_EVAL'] == 0:
model_final.from_weights(server_state.model_weights)
# Test dataset generation for this round
print("Calculating validation metric:")
current_round_validation_start_time = time.time()
current_round_validation_metric = utils.keras_evaluate(
model_final.keras_model, test_client_data_all_merged, metric_eval, "/GPU:0")
current_round_validation_end_time = time.time()
current_round_validation_runnning_time = current_round_validation_end_time - current_round_validation_start_time
print(f'Round {round_num} validation metric: {current_round_validation_metric}')
print(f'Round {round_num} validation time: {current_round_validation_runnning_time}')
# Write down train_metrics to the log
tf.summary.scalar('validation_metric', current_round_validation_metric, step=round_num)
tf.summary.scalar('validation_running_time', current_round_validation_runnning_time, step=round_num)
print()
```
### Save the trained model
```
model_final.keras_model.save(EXPERIMENT_CONFIG['RESULTS_MODEL'])
```
| github_jupyter |
### Some helper functions
```
# define helper functions
def imShow(path):
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
image = cv2.imread(path)
height, width = image.shape[:2]
resized_image = cv2.resize(image,(3*width, 3*height), interpolation = cv2.INTER_CUBIC)
fig = plt.gcf()
fig.set_size_inches(18, 10)
plt.axis("off")
plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB))
plt.show()
# use this to upload files
def upload():
from google.colab import files
uploaded = files.upload()
for name, data in uploaded.items():
with open(name, 'wb') as f:
f.write(data)
print ('saved file', name)
# use this to download a file
def download(path):
from google.colab import files
files.download(path)
```
### Cloning and building DarkNet
```
# clone darknet repo
!git clone https://github.com/AlexeyAB/darknet
# change makefile to have GPU and OPENCV enabled
%cd darknet
!sed -i 's/OPENCV=0/OPENCV=1/' Makefile
!sed -i 's/GPU=0/GPU=1/' Makefile
!sed -i 's/CUDNN=0/CUDNN=1/' Makefile
!sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/' Makefile
# verify CUDA
!/usr/local/cuda/bin/nvcc --version
# make darknet (builds darknet so that you can then use the darknet executable file to run or train object detectors)
!make
```
### Importing our own data
```
%cd ..
from google.colab import drive
drive.mount('/content/drive')
# Own stored folder
!ls drive/MyDrive/TDT17/Data/training_data
# copy over both datasets into the root directory of the Colab VM (comment out test.zip if you are not using a validation dataset)
!cp drive/MyDrive/TDT17/Data/training_data/ambient_data.zip /content/darknet/data
!cp drive/MyDrive/TDT17/Data/training_data/intensity_data.zip /content/darknet/data
!cp drive/MyDrive/TDT17/Data/training_data/test_data.zip /content/darknet/data
!ls
# unzip the datasets and their contents so that they are now in /darknet/data/ folder
!unzip /content/darknet/data/ambient_data.zip -d /content/darknet/data
!unzip /content/darknet/data/intensity_data.zip -d /content/darknet/data
!unzip /content/darknet/data/test_data.zip -d /content/darknet/data
```
### Configuring training files
- Config file
- obj.names and obj.data
- generating train.txt and test.txt
```
# download cfg to google drive and change its name
!cp content/darknet/cfg/yolov4-custom.cfg /content/drive/MyDrive/TDT17/yolov4-obj.cfg
download('/content/drive/MyDrive/TDT17/yolov4-obj.cfg')
# upload the custom .cfg back to cloud VM from Google Drive
!cp /content/drive/MyDrive/TDT17/yolov4-obj.cfg ./cfg
#%cd darknet
!ls data/
# Creating correct .data files with pointer to backup location
# Replacing .data files with the one we created earlier, which points to correct backup location
!rm -f /content/darknet/data/ambient_data/ambient.data
!rm -f /content/darknet/data/intensity_data/intensity.data
!cp /content/drive/MyDrive/TDT17/Data/training_data/ambient_data/ambient.data /content/darknet/data/ambient_data/ambient.data
!cp /content/drive/MyDrive/TDT17/Data/training_data/intensity_data/intensity.data /content/darknet/data/intensity_data/intensity.data
```
### Note: Possible error with data formatting
### Downloading pre-trained weights for convolutional-layers
```
!wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137
obj_data_path = "/content/darknet/data/intensity_data"
config_path = "/content/drive/MyDrive/TDT17/Data/yolov4-obj.cfg"
```
## Fixing train and test datasets..
```
# We dont need to alter the folder/data/names structure, we just have to convert ambient.txt into main.txt, with correct pointers
def fix_files(name):
import os
base_path = f"/content/darknet/data/{name}_data"
txt_path = f"{base_path}/{name}.txt"
with open(txt_path, "r") as f:
filenames = f.read().split("\n")
print(filenames)
add_str = "/content/darknet/data/"
filenames_fixed = [f"{add_str}{f}" for f in filenames]
os.rename(txt_path, f"{txt_path.replace('.txt', '')}_OLD.txt")
#txt_path = "/content/darknet/data/intensity_data/intensity_new.txt"
with open(txt_path, "w") as f:
for l in filenames_fixed:
f.write(l + "\n")
fix_files("intensity")
```
## Starting training
- old weights: /content/drive/MyDrive/TDT17/backup/yolov4-obj_last.weights
```
# Training from Yolo weights:
#!./darknet detector train /content/darknet/data/intensity_data/intensity.data /content/drive/MyDrive/TDT17/Data/yolov4-obj.cfg yolov4.conv.137 -dont_show
# Training from backup weights
!./darknet detector train /content/darknet/data/intensity_data/intensity.data /content/drive/MyDrive/TDT17/Data/yolov4-obj.cfg /content/drive/MyDrive/TDT17/backup/yolov4-obj_last.weights -dont_show
imShow('chart.png')
```
### Checking MaP
```
!./darknet detector recall /content/darknet/data/intensity_data/intensity.names /content/drive/MyDrive/TDT17/Data/yolov4-obj_INFERENCE.cfg /content/drive/MyDrive/TDT17/backup/yolov4-obj_last.weights -dont_show
```
### Precting on single instance
```
# Copying config file
!cp /content/drive/MyDrive/TDT17/Data/yolov4-obj.cfg /content/drive/MyDrive/TDT17/Data/yolov4-obj_INFERENCE.cfg
# need to set our custom cfg to test mode
!sed -i 's/batch=64/batch=1/' /content/drive/MyDrive/TDT17/Data/yolov4-obj_INFERENCE.cfg
!sed -i 's/subdivisions=16/subdivisions=1/' /content/drive/MyDrive/TDT17/Data/yolov4-obj_INFERENCE.cfg
# run your custom detector with this command (upload an image to your google drive to test, thresh flag sets accuracy that detection must be in order to show it)
!./darknet detector test /content/darknet/data/intensity_data/intensity.data /content/drive/MyDrive/TDT17/Data/yolov4-obj_INFERENCE.cfg /content/drive/MyDrive/TDT17/backup/yolov4-obj_last.weights /content/darknet/data/test_data/data/ambient_video_03_frame_000000.PNG -thresh 0.1
imShow('predictions.jpg')
```
| github_jupyter |
## Depth Contours
### Generate depth-contours of all the Pareto-optimal data sets.
This notebook can be used to generate tradeoff values from all the Pareto-optimal data point files hard-coded in the dictionary `pfs`. Currently this notebook processes these Pareto-optimal fronts.
- DTLZ2 ($m$-Sphere) Problem
- DEBMDK (Knee) Problem
- CDEBMDK (Constrained Knee) Problem
- C0-DTLZ2 (A split $𝑚$-sphere with a small isolated cluster at $f_m$-axis)
- C2-DTLZ2 Problem
- DTLZ8 Problem (A 3-dimensional line and an 𝑚 -dimensional hypersurface)
- GAA Problem (A 10-objective and 18-constraint general aviation design problem)
**Note:** Sometimes, it might happen that `simple_shape.depth_contours()` function does not work for data points if the points are very sparse or they being on a fully convex surface (or on the same hyperplane). In that case, there will be only one layer. We solve this problem by approximating the depth-contours from another set of data points with similar shape and dimentions where the depth-contours are available. Please refer to `cvhull-approximation-test.ipynb` note book. Also note that, this is not a general solution.
```
%reload_ext autoreload
%autoreload 2
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams.update({'figure.max_open_warning': 0})
```
### Generate and save the depth contours
In this case we are computing the depth-contours from the convex-hulls. So we are using the `tda.simple_shape` module.
```
from viz.tda import simple_shape
from viz.utils import io
pfs = {'dtlz2': ['3d', '4d', '8d'], \
'dtlz2-nbi': ['3d', '4d', '8d'], \
'debmdk': ['3d', '4d', '8d'], \
'debmdk-nbi': ['3d', '4d', '8d'], \
'debmdk-all': ['3d', '4d', '8d'], \
'debmdk-all-nbi': ['3d', '4d', '8d'], \
'dtlz8': ['3d', '4d', '6d', '8d'], \
'dtlz8-nbi': ['3d', '4d', '6d', '8d'], \
'c2dtlz2': ['3d', '4d', '5d', '8d'], \
'c2dtlz2-nbi': ['3d', '4d', '5d', '8d'], \
'cdebmdk': ['3d', '4d', '8d'], \
'cdebmdk-nbi': ['3d', '4d', '8d'], \
'c0dtlz2': ['3d', '4d', '8d'], \
'c0dtlz2-nbi': ['3d', '4d', '8d'], \
'crash-nbi': ['3d'], \
'crash-c1-nbi': ['3d'], \
'crash-c2-nbi': ['3d'], \
'carside-nbi': ['3d'], \
'gaa': ['10d'], \
'gaa-nbi': ['10d']}
for pf in list(pfs.keys())[-2:]:
for dim in pfs[pf]:
fullpathf = "../data/{0:s}/{1:s}/f.csv".format(pf, dim)
if os.path.exists(fullpathf):
path, filenamef = os.path.split(fullpathf)
dirs = path.split('/')
frontname = dirs[-2]
F = np.loadtxt(fullpathf, delimiter=',')
print(fullpathf, F.shape, dirs, frontname)
# test simple_shape.depth_contour function
# it looks like these PFs are better displayed if project_collapse=False
if pf in ['dtlz8', 'dtlz8-nbi', 'crash-nbi', 'crash-c1-nbi', 'crash-c2-nbi']:
L = simple_shape.depth_contours(F, project_collapse=False)
elif pf in ['gaa', 'gaa-nbi']:
L = simple_shape.depth_contours(F, verbose=True)
else:
L = simple_shape.depth_contours(F)
# save the layers
io.savetxt(os.path.join(path, "depth-cont-cvhull.csv"), L, fmt='{:d}', delimiter=',')
# We are not using this since it's exrtemely slow and also doesn't give
# layers if all the points are on a fully convex surface.
# print("Generating depth-contours (project_collapse=False) for " + frontname)
# # test ss.depth_contour function without projection and collapse
# L = ss.depth_contours(F, project_collapse = False)
# save the layers
# io.savetxt(os.path.join(path, "depth-cont-cvhull.csv"), L, fmt = '{:d}', delimiter = ',')
```
### Plot and verify
```
%matplotlib inline
from viz.utils import io
pfs = {'dtlz2': ['3d', '4d', '8d'], \
'dtlz2-nbi': ['3d', '4d', '8d'], \
'debmdk': ['3d', '4d', '8d'], \
'debmdk-nbi': ['3d', '4d', '8d'], \
'debmdk-all': ['3d', '4d', '8d'], \
'debmdk-all-nbi': ['3d', '4d', '8d'], \
'dtlz8': ['3d', '4d', '6d', '8d'], \
'dtlz8-nbi': ['3d', '4d', '6d', '8d'], \
'c2dtlz2': ['3d', '4d', '5d', '8d'], \
'c2dtlz2-nbi': ['3d', '4d', '5d', '8d'], \
'cdebmdk': ['3d', '4d', '8d'], \
'cdebmdk-nbi': ['3d', '4d', '8d'], \
'c0dtlz2': ['3d', '4d', '8d'], \
'c0dtlz2-nbi': ['3d', '4d', '8d'], \
'crash-nbi': ['3d'], 'crash-c1-nbi': ['3d'], 'crash-c2-nbi': ['3d'], \
'gaa': ['10d'], \
'gaa-nbi': ['10d']}
for pf in list(pfs.keys()):
for dim in pfs[pf]:
fullpathf = "../data/{0:s}/{1:s}/f.csv".format(pf, dim)
if os.path.exists(fullpathf):
path, filenamef = os.path.split(fullpathf)
dirs = path.split('/')
frontname = dirs[-2]
F = np.loadtxt(fullpathf, delimiter = ',')
print(fullpathf, F.shape, dirs, frontname)
layerpathf = os.path.join(path, "depth-cont-cvhull.csv")
if os.path.exists(layerpathf):
L = io.loadtxt(layerpathf, dtype=int, delimiter=',')
if F.shape[1] == 2:
fig = plt.figure()
ax = fig.gca()
for l in L:
ax.scatter(F[l.astype(int),0], F[l.astype(int),1], s=1)
plt.show()
else:
fig = plt.figure()
ax = Axes3D(fig)
for l in L:
ax.scatter(F[l.astype(int),0], F[l.astype(int),1], F[l.astype(int),2], s=1)
plt.show()
```
| github_jupyter |
# p-Hacking and Multiple Comparisons Bias
By Delaney Mackenzie and Maxwell Margenot.
Part of the Quantopian Lecture Series:
* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
Notebook released under the Creative Commons Attribution 4.0 License.
---
Multiple comparisons bias is a pervasive problem in statistics, data science, and in general forecasting/predictions. The short explanation is that the more tests you run, the more likely you are to get an outcome that you want/expect. If you ignore the multitude of tests that failed, you are clearly setting yourself up for failure by misinterpreting what's going on in your data.
A particularly common example of this is when looking for relationships in large data sets comprising of many indepedent series or variables. In this case you run a test each time you evaluate whether a relationship exists between a set of variables.
## Statistics Merely Illuminates This Issue
Most folks also fall prey to multiple comparisons bias in real life. Any time you make a decision you are effectively taking an action based on an hypothesis. That hypothesis is often tested. You can end up unknowingly making many tests in your daily life.
An example might be deciding which medicine is helping cure a cold you have. Many people will take multiple medicines at once to try and get rid of symptoms. You may think that a certain medicine worked, when in reality none did and the cold just happened to start getting better at some point.
The point here is that this problem doesn't stem from statistical testing and p-values. Rather, these techniques give us much more information about the problem and when it might be occuring.
```
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
```
### Refresher: Spearman Rank Correlation
Please refer to [this lecture](https://www.quantopian.com/lectures/spearman-rank-correlation) for more full info, but here is a very brief refresher on Spearman Rank Correlation.
It's a variation of correlation that takes into account the ranks of the data. This can help with weird distributions or outliers that would confuse other measures. The test also returns a p-value, which is key here.
A higher coefficient means a stronger estimated relationship.
```
X = pd.Series(np.random.normal(0, 1, 100))
Y = X
r_s = stats.spearmanr(Y, X)
print 'Spearman Rank Coefficient: ', r_s[0]
print 'p-value: ', r_s[1]
```
If we add some noise our coefficient will drop.
```
X = pd.Series(np.random.normal(0, 1, 100))
Y = X + np.random.normal(0, 1, 100)
r_s = stats.spearmanr(Y, X)
print 'Spearman Rank Coefficient: ', r_s[0]
print 'p-value: ', r_s[1]
```
### p-value Refresher
For more info on p-values see [this lecture](https://www.quantopian.com/lectures/hypothesis-testing). What's important to remember is they're used to test a hypothesis given some data. Here we are testing the hypothesis that a relationship exists between two series given the series values.
####IMPORTANT: p-values must be treated as binary.
A common mistake is that p-values are treated as more or less significant. This is bad practice as it allows for what's known as [p-hacking](https://en.wikipedia.org/wiki/Data_dredging) and will result in more false positives than you expect. Effectively, you will be too likely to convince yourself that relationships exist in your data.
To treat p-values as binary, a cutoff must be set in advance. Then the p-value must be compared with the cutoff and treated as significant/not signficant. Here we'll show this.
### The Cutoff is our Significance Level
We can refer to the cutoff as our significance level because a lower cutoff means that results which pass it are significant at a higher level of confidence. So if you have a cutoff of 0.05, then even on random data 5% of tests will pass based on chance. A cutoff of 0.01 reduces this to 1%, which is a more stringent test. We can therefore have more confidence in our results.
```
# Setting a cutoff of 5% means that there is a 5% chance
# of us getting a significant p-value given no relationship
# in our data (false positive).
# NOTE: This is only true if the test's assumptions have been
# satisfied and the test is therefore properly calibrated.
# All tests have different assumptions.
cutoff = 0.05
X = pd.Series(np.random.normal(0, 1, 100))
Y = X + np.random.normal(0, 1, 100)
r_s = stats.spearmanr(Y, X)
print 'Spearman Rank Coefficient: ', r_s[0]
if r_s[1] < cutoff:
print 'There is significant evidence of a relationship.'
else:
print 'There is not significant evidence of a relationship.'
```
## Experiment - Running Many Tests
We'll start by defining a data frame.
```
df = pd.DataFrame()
```
Now we'll populate it by adding `N` randomly generated timeseries of length `T`.
```
N = 20
T = 100
for i in range(N):
X = np.random.normal(0, 1, T)
X = pd.Series(X)
name = 'X%s' % i
df[name] = X
df.head()
```
Now we'll run a test on all pairs within our data looking for instances where our p-value is below our defined cutoff of 5%.
```
cutoff = 0.05
significant_pairs = []
for i in range(N):
for j in range(i+1, N):
Xi = df.iloc[:, i]
Xj = df.iloc[:, j]
results = stats.spearmanr(Xi, Xj)
pvalue = results[1]
if pvalue < cutoff:
significant_pairs.append((i, j))
```
Before we check how many significant results we got, let's run out some math to check how many we'd expect. The formula for the number of pairs given N series is
$$\frac{N(N-1)}{2}$$
There are no relationships in our data as it's all randomly generated. If our test is properly calibrated we should expect a false positive rate of 5% given our 5% cutoff. Therefore we should expect the following number of pairs that achieved significance based on pure random chance.
```
(N * (N-1) / 2) * 0.05
```
Now let's compare to how many we actually found.
```
len(significant_pairs)
```
We shouldn't expect the numbers to match too closely here on a consistent basis as we've only run one experiment. If we run many of these experiments we should see a convergence to what we'd expect.
### Repeating the Experiment
```
def do_experiment(N, T, cutoff=0.05):
df = pd.DataFrame()
# Make random data
for i in range(N):
X = np.random.normal(0, 1, T)
X = pd.Series(X)
name = 'X%s' % i
df[name] = X
significant_pairs = []
# Look for relationships
for i in range(N):
for j in range(i+1, N):
Xi = df.iloc[:, i]
Xj = df.iloc[:, j]
results = stats.spearmanr(Xi, Xj)
pvalue = results[1]
if pvalue < cutoff:
significant_pairs.append((i, j))
return significant_pairs
num_experiments = 100
results = np.zeros((num_experiments,))
for i in range(num_experiments):
# Run a single experiment
result = do_experiment(20, 100, cutoff=0.05)
# Count how many pairs
n = len(result)
# Add to array
results[i] = n
```
The average over many experiments should be closer.
```
np.mean(results)
```
## Visualizing What's Going On
What's happening here is that p-values should be uniformly distributed, given no signal in the underlying data. Basically, they carry no information whatsoever and will be equally likely to be 0.01 as 0.99. Because they're popping out randomly, you will expect a certain percentage of p-values to be underneath any threshold you choose. The lower the threshold the fewer will pass your test.
Let's visualize this by making a modified function that returns p-values.
```
def get_pvalues_from_experiment(N, T):
df = pd.DataFrame()
# Make random data
for i in range(N):
X = np.random.normal(0, 1, T)
X = pd.Series(X)
name = 'X%s' % i
df[name] = X
pvalues = []
# Look for relationships
for i in range(N):
for j in range(i+1, N):
Xi = df.iloc[:, i]
Xj = df.iloc[:, j]
results = stats.spearmanr(Xi, Xj)
pvalue = results[1]
pvalues.append(pvalue)
return pvalues
```
We'll now collect a bunch of pvalues. As in any case we'll want to collect quite a number of p-values to start getting a sense of how the underlying distribution looks. If we only collect few, it will be noisy like this:
```
pvalues = get_pvalues_from_experiment(10, 100)
plt.hist(pvalues)
plt.ylabel('Frequency')
plt.title('Observed p-value');
```
Let's dial up our `N` parameter to get a better sense. Keep in mind that the number of p-values will increase at a rate of
$$\frac{N (N-1)}{2}$$
or approximately quadratically. Therefore we don't need to increase `N` by much.
```
pvalues = get_pvalues_from_experiment(50, 100)
plt.hist(pvalues)
plt.ylabel('Frequency')
plt.title('Observed p-value');
```
Starting to look pretty flat, as we expected. Lastly, just to visualize the process of drawing a cutoff, we'll draw two artificial lines.
```
pvalues = get_pvalues_from_experiment(50, 100)
plt.vlines(0.01, 0, 150, colors='r', linestyle='--', label='0.01 Cutoff')
plt.vlines(0.05, 0, 150, colors='r', label='0.05 Cutoff')
plt.hist(pvalues, label='P-Value Distribution')
plt.legend()
plt.ylabel('Frequency')
plt.title('Observed p-value');
```
We can see that with a lower cutoff we should expect to get fewer false positives. Let's check that with our above experiment.
```
num_experiments = 100
results = np.zeros((num_experiments,))
for i in range(num_experiments):
# Run a single experiment
result = do_experiment(20, 100, cutoff=0.01)
# Count how many pairs
n = len(result)
# Add to array
results[i] = n
np.mean(results)
```
And finally compare it to what we expected.
```
(N * (N-1) / 2) * 0.01
```
## Sensitivity / Specificity Tradeoff
As with any adjustment of p-value cutoff, we have a tradeoff. A lower cutoff decreases the rate of false positives, but also decreases the chance we find a real relationship (true positive). So you can't just decrease your cutoff to solve this problem.
https://en.wikipedia.org/wiki/Sensitivity_and_specificity
## Reducing Multiple Comparisons Bias
You can't really eliminate multiple comparisons bias, but you can reduce how much it impacts you. To do so we have two options.
### Option 1: Run fewer tests.
This is often the best option. Rather than just sweeping around hoping you hit an interesting signal, use your expert knowledge of the system to develop a great hypothesis and test that. This process of exploring the data, coming up with a hypothesis, then gathering more data and testing the hypothesis on the new data is considered the gold standard in statistical and scientific research. It's crucial that the data set on which you develop your hypothesis is not the one on which you test it. Because you found the effect while exploring, the test will likely pass and not really tell you anything. What you want to know is how consistent the effect is. Moving to new data and testing there will not only mean you only run one test, but will be an 'unbiased estimator' of whether your hypothesis is true. We discuss this a lot in other lectures.
### Option 2: Adjustment Factors and Bon Ferroni Correction
#### WARNING: This section gets a little technical. Unless you're comfortable with significance levels, we recommend looking at the code examples first and maybe reading the linked articles before fully diving into the text.
If you must run many tests, try to correct your p-values. This means applying a correction factor to the cutoff you desire to obtain the one actually used when determining whether p-values are significant. The most conservative and common correction factor is Bon Ferroni.
### Example: Bon Ferroni Correction
The concept behind Bon Ferroni is quite simple. It just says that if we run $m$ tests, and we have a significance level/cutoff of $a$, then we should use $a/m$ as our new cutoff when determining significance. The math works out because of the following.
Let's say we run $m$ tests. We should expect to see $ma$ false positives based on random chance that pass out cutoff. If we instead use $a/m$ as our cutoff, then we should expect to see $ma/m = a$ tests that pass our cutoff. Therefore we are back to our desired false positive rate of $a$.
Let's try it on our experiment above.
```
num_experiments = 100
results = np.zeros((num_experiments,))
N = 20
T = 100
desired_level = 0.05
num_tests = N * (N - 1) / 2
new_cutoff = desired_level / num_tests
for i in range(num_experiments):
# Run a single experiment
result = do_experiment(20, 100, cutoff=new_cutoff)
# Count how many pairs
n = len(result)
# Add to array
results[i] = n
np.mean(results)
```
As you can see, our number of significant results is now far lower on average. Which is good because the data was random to begin with.
### These are Often Overly Conservative
Because Bon Ferroni is so stringent, you can often end up passing over real relationships. There is a good example in the following article
https://en.wikipedia.org/wiki/Multiple_comparisons_problem
Effectively, it assumes that all the tests you are running are independent, and doesn't take into account any structure in your data. You may be able to design a more finely tuned correction factor, but this is adding a layer of complexity and therefore a point of failure to your research. In general any time you relax your stringency, you need to be very careful not to make a mistake.
Because of the over-zealousness of Bon Ferroni, often running fewer tests is the better option. Or, if you must run many tests, reserve multiple sets of data so your candidate signals can undergo an out-of-sample round of testing. For example, you might have the following flow:
* Let's say there are 100,000 possible relationships.
* Run a test on each possible relationship, and pick those that passed the test.
* With these candidates, run a test on a new out-of-sample set of data. Because you have many fewer candidates, you can now apply a Bon Ferroni correction to these p-values, or if necessary repeat another round of out-of-sample testing.
# What is p-Hacking?
p-hacking is just intentional or accidental abuse of multiple comparisons bias. It is surprisingly common, even in academic literature. The excellent statistical news website FiveThirtyEight has a great visualization here:
https://fivethirtyeight.com/features/science-isnt-broken/
Wikipedia's article is also informative:
https://en.wikipedia.org/wiki/Data_dredging
In general, the concept is simple. By running many tests or experiments and then focusing only on the ones that worked, you can present false positives as real results. Keep in mind that this also applies to running many different models or different types of experiments and on different data sets. Imagine that you spend a summer researching a new model to forecast corn future prices. You try 50 different models until finally one succeeds. Is this just luck at this point? Certainly you would want to be more careful about validating that model and testing it out-of-sample on new data before believing that it works.
# Final Notes
## You can never eliminate, only reduce risk.
In general you can never completely eliminate multiple comparisons bias, you can only reduce the risk of false positives using techniques we described above. At the end of the day most ideas tried in research don't work, so you'll end up testing many different hypotheses over time. Just try to be careful and use common sense about whether there is sufficient evidence that a hypothesis is true, or that you just happened to get lucky on this iteration.
## Use Out-of-Sample Testing
As mentioned above, out-of-sample testing is one of the best ways to reduce your risk. You should always use it, no matter the circumstances. Often one of the ways that false positives make it through your workflow is a lack of an out-of-sample test at the end.
####Sources
* https://en.wikipedia.org/wiki/Multiple_comparisons_problem
* https://en.wikipedia.org/wiki/Sensitivity_and_specificity
* https://en.wikipedia.org/wiki/Bonferroni_correction
* https://fivethirtyeight.com/features/science-isnt-broken/
*This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| github_jupyter |
# Roll decay test parameter sensitivity many
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", 200)
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 15, 7
import os
import copy
from scipy.optimize import curve_fit
from rolldecay.simulation import simulate
#import rolldecay.parameter_identification as parameter_identification
import rolldecay.read_funky_ascii
import inspect
from rolldecayestimators.direct_estimator import DirectEstimator
from rolldecayestimators.direct_linear_estimator import DirectLinearEstimator
from rolldecayestimators.norwegian_estimator import NorwegianEstimator
from rolldecayestimators.transformers import CutTransformer, LowpassFilterDerivatorTransformer, ScaleFactorTransformer, OffsetTransformer
#from rolldecay.equations_lambdify import calculate_acceleration, calculate_velocity
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
122/(19*2*3.14)
exclude_files = ['20084871055k.06.asc','20084871055k.03.asc'
]
acii_directory_path = os.path.join(rolldecay.data_path,'project1','Ascii files')
file_names = []
for file_name in os.listdir(acii_directory_path):
file_names.append(file_name)
file_names = list(set(file_names) - set(exclude_files))
exclude_files = ['20084871055k.06.asc','20084871055k.03.asc'
]
acii_directory_path = os.path.join(rolldecay.data_path,'project1','Ascii files')
file_names = []
for file_name in os.listdir(acii_directory_path):
file_names.append(file_name)
file_names = list(set(file_names) - set(exclude_files))
def fit(pipeline):
pipelines = {}
for file_name in file_names:
ascii_file_path = os.path.join(acii_directory_path,file_name)
df_raw = rolldecay.read_funky_ascii.read(ascii_path=ascii_file_path)[['phi']]
# Exclude tests where roll is not big enough:
if ((df_raw['phi'].max() < np.deg2rad(1)) |
(df_raw['phi'].min() > -np.deg2rad(1))):
continue
pipeline = copy.deepcopy(pipeline)
pipeline.fit(X=df_raw)
pipelines[file_name] = pipeline
return pipelines
def predict(pipelines):
df = pd.DataFrame()
for file_name, pipeline in pipelines.items():
estimator = pipeline[-1]
s = pd.Series(estimator.parameters, name=file_name)
s['score'] = estimator.score(X=estimator.X)
s['mean_damping'] = estimator.calculate_average_linear_damping()
df = df.append(s)
return df
```
## Direct
```
lowpass_filter = LowpassFilterDerivatorTransformer(cutoff=0.4)
scaler = ScaleFactorTransformer(scale_factor=29.565)
cutter = CutTransformer(phi_max=np.deg2rad(15), phi_min=np.deg2rad(1))
offset_transformer = OffsetTransformer()
bounds = {
'zeta':(0,np.inf), # Only positive damping
'd':(0,np.inf), # Only positive damping
}
direct_estimator = DirectEstimator(bounds=bounds)
steps = [
('filter',lowpass_filter),
('scaler',scaler),
('cutter', cutter),
('offset_transformer',offset_transformer),
('direct_estimator', direct_estimator)]
pipeline_direct = Pipeline(steps) # define the pipeline object.
pipeline_directs = fit(pipeline=pipeline_direct)
pipeline = pipeline_directs['20084871051k.01.asc']
df_direct = predict(pipelines=pipeline_directs)
```
## Norwegian
```
norwegian_estimator = NorwegianEstimator(bounds=bounds)
steps = [
('filter',lowpass_filter),
('scaler',scaler),
('cutter', cutter),
('offset_transformer',offset_transformer),
('norwegian_estimator', norwegian_estimator)]
pipeline_norwegian = Pipeline(steps) # define the pipeline object.
pipeline_norwegians = fit(pipeline=pipeline_norwegian)
df_norwegian = predict(pipelines=pipeline_norwegians)
```
## Linear method
```
direct_linear_estimator = DirectLinearEstimator()
steps = [
('filter',lowpass_filter),
('scaler',scaler),
('cutter', cutter),
('offset_transformer',offset_transformer),
('norwegian_estimator', direct_linear_estimator)]
pipeline_direct_linear = Pipeline(steps) # define the pipeline object.
pipeline_direct_linears = fit(pipeline=pipeline_direct_linear)
df_direct_linear = predict(pipelines=pipeline_direct_linears)
df_direct_linear['d']=0
mask = df_direct['score'] > 0.90
df_direct_good = df_direct.loc[mask].copy()
df_direct_linear_good = df_direct_linear.loc[mask].copy()
df_norwegian_good = df_norwegian.loc[mask].copy()
df1 = df_direct_good.sort_values(by='score', ascending=False).iloc[-10:]
#df1 = df_direct_good.sort_values(by='score', ascending=False).iloc[0:10]
index = df1.index.copy()
df1.reset_index(inplace=True)
df1['method'] = 'direct'
df2 = df_direct_linear_good.loc[index].reset_index()
df2['method'] = 'direct linear'
df3 = df_norwegian_good.loc[index].reset_index()
df3['method'] = 'norwegian'
df_comparison = pd.concat([df1,df2,df3], sort=False)
df_comparison.isnull().any()
for column in ['zeta','d','omega0','mean_damping','score']:
fig,ax = plt.subplots()
df = df_comparison.pivot(index='method',columns='index', values=column).transpose()
df.plot(kind='bar', ax=ax)
ylim = ax.get_ylim()
ax.set_ylim(df.min().min(),df.max().max())
ax.set_title(column)
file = df_direct_good.iloc[0].name
file
#file = df_direct_good.iloc[0].name
file = '20084871056k.08.asc'
pipelines = [pipeline_directs, pipeline_direct_linears, pipeline_norwegians]
fig,ax = plt.subplots()
for p in pipelines:
pipeline = p[file]
estimator = pipeline[-1]
estimator.plot_fit(ax=ax, model_test=False)
print('%s:%s' % (estimator.__class__.__name__,estimator.score(X=estimator.X)))
estimator.X.plot(y='phi', label='Model test', ax=ax, style='k--');
ax.legend()
pipeline = pipeline_norwegians[file]
estimator = pipeline[-1]
estimator.plot_damping()
#file = df_direct_good.iloc[0].name
#file = '20084871056k.08.asc'
pipelines = [pipeline_directs, pipeline_direct_linears, pipeline_norwegians]
fig,ax = plt.subplots()
for p in pipelines:
pipeline = p[file]
estimator = pipeline[-1]
estimator.plot_error(ax=ax)
print('%s:%s' % (estimator.__class__.__name__,estimator.score(X=estimator.X)))
estimator.X.plot(y='phi')
df_direct_good.describe()
df_norwegian_good.describe()
```
## The linear model is performing as good as the two quadratic for the present data
Need to investigate more ships to see if this changes...
```
df_direct_linear_good.describe()
pipeline = pipeline_directs['20084871056k.14.asc']
estimator = pipeline[-1]
estimator.X_amplitudes.plot(x='phi',y='omega0',style='.')
```
| github_jupyter |
# Artificial Intelligence Nanodegree
## Recurrent Neural Network Projects
Welcome to the Recurrent Neural Network Project in the Artificial Intelligence Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
### Implementation TODOs in this notebook
This notebook contains two problems, cut into a variety of TODOs. Make sure to complete each section containing a TODO marker throughout the notebook. For convenience we provide links to each of these sections below.
[TODO #1: Implement a function to window time series](#TODO_1)
[TODO #2: Create a simple RNN model using keras to perform regression](#TODO_2)
[TODO #3: Finish cleaning a large text corpus](#TODO_3)
[TODO #4: Implement a function to window a large text corpus](#TODO_4)
[TODO #5: Create a simple RNN model using keras to perform multiclass classification](#TODO_5)
[TODO #6: Generate text using a fully trained RNN model and a variety of input sequences](#TODO_6)
# Problem 1: Perform time series prediction
In this project you will perform time series prediction using a Recurrent Neural Network regressor. In particular you will re-create the figure shown in the notes - where the stock price of Apple was forecasted (or predicted) 7 days in advance. In completing this exercise you will learn how to construct RNNs using Keras, which will also aid in completing the second project in this notebook.
The particular network architecture we will employ for our RNN is known as [Long Term Short Memory (LSTM)](https://en.wikipedia.org/wiki/Long_short-term_memory), which helps significantly avoid technical problems with optimization of RNNs.
## 1.1 Getting started
First we must load in our time series - a history of around 140 days of Apple's stock price. Then we need to perform a number of pre-processing steps to prepare it for use with an RNN model. First off, it is good practice to normalize time series - by normalizing its range. This helps us avoid serious numerical issues associated how common activation functions (like tanh) transform very large (positive or negative) numbers, as well as helping us to avoid related issues when computing derivatives.
Here we normalize the series to lie in the range [0,1] [using this scikit function](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html), but it is also commonplace to normalize by a series standard deviation.
```
### Load in necessary libraries for data input and normalization
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
%load_ext autoreload
%autoreload 2
from my_answers import *
%load_ext autoreload
%autoreload 2
from my_answers import *
### load in and normalize the dataset
dataset = np.loadtxt('datasets/normalized_apple_prices.csv')
```
Lets take a quick look at the (normalized) time series we'll be performing predictions on.
```
# lets take a look at our time series
plt.plot(dataset)
plt.xlabel('time period')
plt.ylabel('normalized series value')
```
## 1.2 Cutting our time series into sequences
Remember, our time series is a sequence of numbers that we can represent in general mathematically as
$$s_{0},s_{1},s_{2},...,s_{P}$$
where $s_{p}$ is the numerical value of the time series at time period $p$ and where $P$ is the total length of the series. In order to apply our RNN we treat the time series prediction problem as a regression problem, and so need to use a sliding window to construct a set of associated input/output pairs to regress on. This process is animated in the gif below.
<img src="images/timeseries_windowing_training.gif" width=600 height=600/>
For example - using a window of size T = 5 (as illustrated in the gif above) we produce a set of input/output pairs like the one shown in the table below
$$\begin{array}{c|c}
\text{Input} & \text{Output}\\
\hline \color{CornflowerBlue} {\langle s_{1},s_{2},s_{3},s_{4},s_{5}\rangle} & \color{Goldenrod}{ s_{6}} \\
\ \color{CornflowerBlue} {\langle s_{2},s_{3},s_{4},s_{5},s_{6} \rangle } & \color{Goldenrod} {s_{7} } \\
\color{CornflowerBlue} {\vdots} & \color{Goldenrod} {\vdots}\\
\color{CornflowerBlue} { \langle s_{P-5},s_{P-4},s_{P-3},s_{P-2},s_{P-1} \rangle } & \color{Goldenrod} {s_{P}}
\end{array}$$
Notice here that each input is a sequence (or vector) of length 5 (and in general has length equal to the window size T) while each corresponding output is a scalar value. Notice also how given a time series of length P and window size T = 5 as shown above, we created P - 5 input/output pairs. More generally, for a window size T we create P - T such pairs.
Now its time for you to window the input time series as described above!
<a id='TODO_1'></a>
**TODO:** Implement the function called **window_transform_series** in my_answers.py so that it runs a sliding window along the input series and creates associated input/output pairs. Note that this function should input a) the series and b) the window length, and return the input/output subsequences. Make sure to format returned input/output as generally shown in table above (where window_size = 5), and make sure your returned input is a numpy array.
-----
You can test your function on the list of odd numbers given below
```
odd_nums = np.array([1,3,5,7,9,11,13])
```
Here is a hard-coded solution for odd_nums. You can compare its results with what you get from your **window_transform_series** implementation.
```
# run a window of size 2 over the odd number sequence and display the results
window_size = 2
X = []
X.append(odd_nums[0:2])
X.append(odd_nums[1:3])
X.append(odd_nums[2:4])
X.append(odd_nums[3:5])
X.append(odd_nums[4:6])
y = odd_nums[2:]
X = np.asarray(X)
y = np.asarray(y)
y = np.reshape(y, (len(y),1)) #optional
assert(type(X).__name__ == 'ndarray')
assert(type(y).__name__ == 'ndarray')
assert(X.shape == (5,2))
assert(y.shape in [(5,1), (5,)])
# print out input/output pairs --> here input = X, corresponding output = y
print ('--- the input X will look like ----')
print (X)
print ('--- the associated output y will look like ----')
print (y)
```
Again - you can check that your completed **window_transform_series** function works correctly by trying it on the odd_nums sequence - you should get the above output.
```
### TODO: implement the function window_transform_series in the file my_answers.py
from my_answers import window_transform_series
```
With this function in place apply it to the series in the Python cell below. We use a window_size = 7 for these experiments.
```
# window the data using your windowing function
window_size = 7
X,y = window_transform_series(series = dataset,window_size = window_size)
print(X[:2])
print(y[:2])
```
## 1.3 Splitting into training and testing sets
In order to perform proper testing on our dataset we will lop off the last 1/3 of it for validation (or testing). This is that once we train our model we have something to test it on (like any regression problem!). This splitting into training/testing sets is done in the cell below.
Note how here we are **not** splitting the dataset *randomly* as one typically would do when validating a regression model. This is because our input/output pairs *are related temporally*. We don't want to validate our model by training on a random subset of the series and then testing on another random subset, as this simulates the scenario that we receive new points *within the timeframe of our training set*.
We want to train on one solid chunk of the series (in our case, the first full 2/3 of it), and validate on a later chunk (the last 1/3) as this simulates how we would predict *future* values of a time series.
```
# split our dataset into training / testing sets
train_test_split = int(np.ceil(2*len(y)/float(3))) # set the split point
# partition the training set
X_train = X[:train_test_split,:]
y_train = y[:train_test_split]
# keep the last chunk for testing
X_test = X[train_test_split:,:]
y_test = y[train_test_split:]
# NOTE: to use keras's RNN LSTM module our input must be reshaped to [samples, window size, stepsize]
X_train = np.asarray(np.reshape(X_train, (X_train.shape[0], window_size, 1)))
X_test = np.asarray(np.reshape(X_test, (X_test.shape[0], window_size, 1)))
```
<a id='TODO_2'></a>
## 1.4 Build and run an RNN regression model
Having created input/output pairs out of our time series and cut this into training/testing sets, we can now begin setting up our RNN. We use Keras to quickly build a two hidden layer RNN of the following specifications
- layer 1 uses an LSTM module with 5 hidden units (note here the input_shape = (window_size,1))
- layer 2 uses a fully connected module with one unit
- the 'mean_squared_error' loss should be used (remember: we are performing regression here)
This can be constructed using just a few lines - see e.g., the [general Keras documentation](https://keras.io/getting-started/sequential-model-guide/) and the [LSTM documentation in particular](https://keras.io/layers/recurrent/) for examples of how to quickly use Keras to build neural network models. Make sure you are initializing your optimizer given the [keras-recommended approach for RNNs](https://keras.io/optimizers/)
(given in the cell below). (remember to copy your completed function into the script *my_answers.py* function titled *build_part1_RNN* before submitting your project)
```
### TODO: create required RNN model
# import keras network libraries
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import keras
# given - fix random seed - so we can all reproduce the same results on our default time series
np.random.seed(0)
# TODO: implement build_part1_RNN in my_answers.py
from my_answers import build_part1_RNN
model = build_part1_RNN(window_size)
# build model using keras documentation recommended optimizer initialization
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
```
With your model built you can now fit the model by activating the cell below! Note: the number of epochs (np_epochs) and batch_size are preset (so we can all produce the same results). You can choose to toggle the verbose parameter - which gives you regular updates on the progress of the algorithm - on and off by setting it to 1 or 0 respectively.
```
# run your model!
model.fit(X_train, y_train, epochs=1000, batch_size=50, verbose=0)
```
## 1.5 Checking model performance
With your model fit we can now make predictions on both our training and testing sets.
```
# generate predictions for training
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
```
In the next cell we compute training and testing errors using our trained model - you should be able to achieve at least
*training_error* < 0.02
and
*testing_error* < 0.02
with your fully trained model.
If either or both of your accuracies are larger than 0.02 re-train your model - increasing the number of epochs you take (a maximum of around 1,000 should do the job) and/or adjusting your batch_size.
```
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
print('training error = ' + str(training_error))
testing_error = model.evaluate(X_test, y_test, verbose=0)
print('testing error = ' + str(testing_error))
```
Activating the next cell plots the original data, as well as both predictions on the training and testing sets.
```
### Plot everything - the original series as well as predictions on training and testing sets
import matplotlib.pyplot as plt
%matplotlib inline
# plot original series
plt.plot(dataset,color = 'k')
# plot training set prediction
split_pt = train_test_split + window_size
plt.plot(np.arange(window_size,split_pt,1),train_predict,color = 'b')
# plot testing set prediction
plt.plot(np.arange(split_pt,split_pt + len(test_predict),1),test_predict,color = 'r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price of Apple stock')
plt.legend(['original series','training fit','testing fit'],loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
```
**Note:** you can try out any time series for this exercise! If you would like to try another see e.g., [this site containing thousands of time series](https://datamarket.com/data/list/?q=provider%3Atsdl) and pick another one!
# Problem 2: Create a sequence generator
## 2.1 Getting started
In this project you will implement a popular Recurrent Neural Network (RNN) architecture to create an English language sequence generator capable of building semi-coherent English sentences from scratch by building them up character-by-character. This will require a substantial amount amount of parameter tuning on a large training corpus (at least 100,000 characters long). In particular for this project we will be using a complete version of Sir Arthur Conan Doyle's classic book The Adventures of Sherlock Holmes.
How can we train a machine learning model to generate text automatically, character-by-character? *By showing the model many training examples so it can learn a pattern between input and output.* With this type of text generation each input is a string of valid characters like this one
*dogs are grea*
while the corresponding output is the next character in the sentence - which here is 't' (since the complete sentence is 'dogs are great'). We need to show a model many such examples in order for it to make reasonable predictions.
**Fun note:** For those interested in how text generation is being used check out some of the following fun resources:
- [Generate wacky sentences](http://www.cs.toronto.edu/~ilya/rnn.html) with this academic RNN text generator
- Various twitter bots that tweet automatically generated text like[this one](http://tweet-generator-alex.herokuapp.com/).
- the [NanoGenMo](https://github.com/NaNoGenMo/2016) annual contest to automatically produce a 50,000+ novel automatically
- [Robot Shakespeare](https://github.com/genekogan/RobotShakespeare) a text generator that automatically produces Shakespear-esk sentences
## 2.2 Preprocessing a text dataset
Our first task is to get a large text corpus for use in training, and on it we perform a several light pre-processing tasks. The default corpus we will use is the classic book Sherlock Holmes, but you can use a variety of others as well - so long as they are fairly large (around 100,000 characters or more).
```
# read in the text, transforming everything to lower case
text = open('datasets/holmes.txt').read().lower()
print('our original text has ' + str(len(text)) + ' characters')
```
Next, lets examine a bit of the raw text. Because we are interested in creating sentences of English words automatically by building up each word character-by-character, we only want to train on valid English words. In other words - we need to remove all of the other characters that are not part of English words.
```
### print out the first 1000 characters of the raw text to get a sense of what we need to throw out
text[:2000]
```
Wow - there's a lot of junk here (i.e., weird uncommon character combinations - as this first character chunk contains the title and author page, as well as table of contents)! To keep things simple, we want to train our RNN on a large chunk of more typical English sentences - we don't want it to start thinking non-english words or strange characters are valid! - so lets clean up the data a bit.
First, since the dataset is so large and the first few hundred characters contain a lot of junk, lets cut it out. Lets also find-and-replace those newline tags with empty spaces.
```
### find and replace '\n' and '\r' symbols - replacing them
text = text[1302:]
text = text.replace('\n',' ') # replacing '\n' with '' simply removes the sequence
text = text.replace('\r',' ')
```
Lets see how the first 1000 characters of our text looks now!
```
### print out the first 1000 characters of the raw text to get a sense of what we need to throw out
text[:1000]
```
<a id='TODO_3'></a>
#### TODO: finish cleaning the text
Lets make sure we haven't left any other atypical characters (commas, periods, etc., are ok) lurking around in the depths of the text. You can do this by enumerating all the text's unique characters, examining them, and then replacing any unwanted characters with empty spaces! Once we find all of the text's unique characters, we can remove all of the atypical ones in the next cell. Note: don't remove the punctuation marks given in my_answers.py.
```
### TODO: implement cleaned_text in my_answers.py
from my_answers import cleaned_text
text = cleaned_text(text)
# shorten any extra dead space created above
text = text.replace(' ',' ')
```
With your chosen characters removed print out the first few hundred lines again just to double check that everything looks good.
```
### print out the first 2000 characters of the raw text to get a sense of what we need to throw out
text[:2000]
```
Now that we have thrown out a good number of non-English characters/character sequences lets print out some statistics about the dataset - including number of total characters and number of unique characters.
```
# count the number of unique characters in the text
chars = sorted(list(set(text)))
# print some of the text, as well as statistics
print ("this corpus has " + str(len(text)) + " total number of characters")
print ("this corpus has " + str(len(chars)) + " unique characters")
```
## 2.3 Cutting data into input/output pairs
Now that we have our text all cleaned up, how can we use it to train a model to generate sentences automatically? First we need to train a machine learning model - and in order to do that we need a set of input/output pairs for a model to train on. How can we create a set of input/output pairs from our text to train on?
Remember in part 1 of this notebook how we used a sliding window to extract input/output pairs from a time series? We do the same thing here! We slide a window of length $T$ along our giant text corpus - everything in the window becomes one input while the character following becomes its corresponding output. This process of extracting input/output pairs is illustrated in the gif below on a small example text using a window size of T = 5.
<img src="images/text_windowing_training.gif" width=400 height=400/>
Notice one aspect of the sliding window in this gif that does not mirror the analogous gif for time series shown in part 1 of the notebook - we do not need to slide the window along one character at a time but can move by a fixed step size $M$ greater than 1 (in the gif indeed $M = 1$). This is done with large input texts (like ours which has over 500,000 characters!) when sliding the window along one character at a time we would create far too many input/output pairs to be able to reasonably compute with.
More formally lets denote our text corpus - which is one long string of characters - as follows
$$s_{0},s_{1},s_{2},...,s_{P}$$
where $P$ is the length of the text (again for our text $P \approx 500,000!$). Sliding a window of size T = 5 with a step length of M = 1 (these are the parameters shown in the gif above) over this sequence produces the following list of input/output pairs
$$\begin{array}{c|c}
\text{Input} & \text{Output}\\
\hline \color{CornflowerBlue} {\langle s_{1},s_{2},s_{3},s_{4},s_{5}\rangle} & \color{Goldenrod}{ s_{6}} \\
\ \color{CornflowerBlue} {\langle s_{2},s_{3},s_{4},s_{5},s_{6} \rangle } & \color{Goldenrod} {s_{7} } \\
\color{CornflowerBlue} {\vdots} & \color{Goldenrod} {\vdots}\\
\color{CornflowerBlue} { \langle s_{P-5},s_{P-4},s_{P-3},s_{P-2},s_{P-1} \rangle } & \color{Goldenrod} {s_{P}}
\end{array}$$
Notice here that each input is a sequence (or vector) of 5 characters (and in general has length equal to the window size T) while each corresponding output is a single character. We created around P total number of input/output pairs (for general step size M we create around ceil(P/M) pairs).
<a id='TODO_4'></a>
Now its time for you to window the input time series as described above!
**TODO:** Create a function that runs a sliding window along the input text and creates associated input/output pairs. A skeleton function has been provided for you. Note that this function should input a) the text b) the window size and c) the step size, and return the input/output sequences. Note: the return items should be *lists* - not numpy arrays.
(remember to copy your completed function into the script *my_answers.py* function titled *window_transform_text* before submitting your project)
```
### TODO: implement window_transform_series in my_answers.py
from my_answers import window_transform_series
```
With our function complete we can now use it to produce input/output pairs! We employ the function in the next cell, where the window_size = 50 and step_size = 5.
```
# run your text window-ing function
window_size = 100
step_size = 5
inputs, outputs = window_transform_text(text,window_size,step_size)
```
Lets print out a few input/output pairs to verify that we have made the right sort of stuff!
```
# print out a few of the input/output pairs to verify that we've made the right kind of stuff to learn from
print('input = ' + inputs[2])
print('output = ' + outputs[2])
print('--------------')
print('input = ' + inputs[100])
print('output = ' + outputs[100])
```
Looks good!
## 2.4 Wait, what kind of problem is text generation again?
In part 1 of this notebook we used the same pre-processing technique - the sliding window - to produce a set of training input/output pairs to tackle the problem of time series prediction *by treating the problem as one of regression*. So what sort of problem do we have here now, with text generation? Well, the time series prediction was a regression problem because the output (one value of the time series) was a continuous value. Here - for character-by-character text generation - each output is a *single character*. This isn't a continuous value - but a distinct class - therefore **character-by-character text generation is a classification problem**.
How many classes are there in the data? Well, the number of classes is equal to the number of unique characters we have to predict! How many of those were there in our dataset again? Lets print out the value again.
```
# print out the number of unique characters in the dataset
chars = sorted(list(set(text)))
print ("this corpus has " + str(len(chars)) + " unique characters")
print ('and these characters are ')
print (chars)
```
Rockin' - so we have a multiclass classification problem on our hands!
## 2.5 One-hot encoding characters
The last issue we have to deal with is representing our text data as numerical data so that we can use it as an input to a neural network. One of the conceptually simplest ways of doing this is via a 'one-hot encoding' scheme. Here's how it works.
We transform each character in our inputs/outputs into a vector with length equal to the number of unique characters in our text. This vector is all zeros except one location where we place a 1 - and this location is unique to each character type. e.g., we transform 'a', 'b', and 'c' as follows
$$a\longleftarrow\left[\begin{array}{c}
1\\
0\\
0\\
\vdots\\
0\\
0
\end{array}\right]\,\,\,\,\,\,\,b\longleftarrow\left[\begin{array}{c}
0\\
1\\
0\\
\vdots\\
0\\
0
\end{array}\right]\,\,\,\,\,c\longleftarrow\left[\begin{array}{c}
0\\
0\\
1\\
\vdots\\
0\\
0
\end{array}\right]\cdots$$
where each vector has 32 entries (or in general: number of entries = number of unique characters in text).
The first practical step towards doing this one-hot encoding is to form a dictionary mapping each unique character to a unique integer, and one dictionary to do the reverse mapping. We can then use these dictionaries to quickly make our one-hot encodings, as well as re-translate (from integers to characters) the results of our trained RNN classification model.
```
# this dictionary is a function mapping each unique character to a unique integer
chars_to_indices = dict((c, i) for i, c in enumerate(chars)) # map each unique character to unique integer
# this dictionary is a function mapping each unique integer back to a unique character
indices_to_chars = dict((i, c) for i, c in enumerate(chars)) # map each unique integer back to unique character
```
Now we can transform our input/output pairs - consisting of characters - to equivalent input/output pairs made up of one-hot encoded vectors. In the next cell we provide a function for doing just this: it takes in the raw character input/outputs and returns their numerical versions. In particular the numerical input is given as $\bf{X}$, and numerical output is given as the $\bf{y}$
```
# transform character-based input/output into equivalent numerical versions
def encode_io_pairs(text,window_size,step_size):
# number of unique chars
chars = sorted(list(set(text)))
num_chars = len(chars)
# cut up text into character input/output pairs
inputs, outputs = window_transform_text(text,window_size,step_size)
# create empty vessels for one-hot encoded input/output
X = np.zeros((len(inputs), window_size, num_chars), dtype=np.bool)
y = np.zeros((len(inputs), num_chars), dtype=np.bool)
# loop over inputs/outputs and transform and store in X/y
for i, sentence in enumerate(inputs):
for t, char in enumerate(sentence):
X[i, t, chars_to_indices[char]] = 1
y[i, chars_to_indices[outputs[i]]] = 1
return X,y
```
Now run the one-hot encoding function by activating the cell below and transform our input/output pairs!
```
# use your function
window_size = 100
step_size = 5
X,y = encode_io_pairs(text,window_size,step_size)
```
<a id='TODO_5'></a>
## 2.6 Setting up our RNN
With our dataset loaded and the input/output pairs extracted / transformed we can now begin setting up our RNN for training. Again we will use Keras to quickly build a single hidden layer RNN - where our hidden layer consists of LSTM modules.
Time to get to work: build a 3 layer RNN model of the following specification
- layer 1 should be an LSTM module with 200 hidden units --> note this should have input_shape = (window_size,len(chars)) where len(chars) = number of unique characters in your cleaned text
- layer 2 should be a linear module, fully connected, with len(chars) hidden units --> where len(chars) = number of unique characters in your cleaned text
- layer 3 should be a softmax activation ( since we are solving a *multiclass classification*)
- Use the **categorical_crossentropy** loss
This network can be constructed using just a few lines - as with the RNN network you made in part 1 of this notebook. See e.g., the [general Keras documentation](https://keras.io/getting-started/sequential-model-guide/) and the [LSTM documentation in particular](https://keras.io/layers/recurrent/) for examples of how to quickly use Keras to build neural network models.
```
### necessary functions from the keras library
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import keras
import random
# TODO implement build_part2_RNN in my_answers.py
from my_answers import build_part2_RNN
model = build_part2_RNN(window_size, len(chars))
# initialize optimizer
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile model --> make sure initialized optimizer and callbacks - as defined above - are used
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
```
## 2.7 Training our RNN model for text generation
With our RNN setup we can now train it! Lets begin by trying it out on a small subset of the larger version. In the next cell we take the first 10,000 input/output pairs from our training database to learn on.
```
# a small subset of our input/output pairs
Xsmall = X[:10000,:,:]
ysmall = y[:10000,:]
```
Now lets fit our model!
```
# train the model
model.fit(Xsmall, ysmall, batch_size=500, epochs=40,verbose = 1)
# save weights
model.save_weights('model_weights/best_RNN_small_textdata_weights.hdf5')
```
How do we make a given number of predictions (characters) based on this fitted model?
First we predict the next character after following any chunk of characters in the text of length equal to our chosen window size. Then we remove the first character in our input sequence and tack our prediction onto the end. This gives us a slightly changed sequence of inputs that still has length equal to the size of our window. We then feed in this updated input sequence into the model to predict the another character. Together then we have two predicted characters following our original input sequence. Repeating this process N times gives us N predicted characters.
In the next Python cell we provide you with a completed function that does just this - it makes predictions when given a) a trained RNN model, b) a subset of (window_size) characters from the text, and c) a number of characters to predict (to follow our input subset).
```
# function that uses trained model to predict a desired number of future characters
def predict_next_chars(model,input_chars,num_to_predict):
# create output
predicted_chars = ''
for i in range(num_to_predict):
# convert this round's predicted characters to numerical input
x_test = np.zeros((1, window_size, len(chars)))
for t, char in enumerate(input_chars):
x_test[0, t, chars_to_indices[char]] = 1.
# make this round's prediction
test_predict = model.predict(x_test,verbose = 0)[0]
# translate numerical prediction back to characters
r = np.argmax(test_predict) # predict class of each test input
d = indices_to_chars[r]
# update predicted_chars and input
predicted_chars+=d
input_chars+=d
input_chars = input_chars[1:]
return predicted_chars
```
<a id='TODO_6'></a>
With your trained model try a few subsets of the complete text as input - note the length of each must be exactly equal to the window size. For each subset use the function above to predict the next 100 characters that follow each input.
```
# TODO: choose an input sequence and use the prediction function in the previous Python cell to predict 100 characters following it
# get an appropriately sized chunk of characters from the text
start_inds = [150]
# load in weights
model.load_weights('model_weights/best_RNN_small_textdata_weights.hdf5')
for s in start_inds:
start_index = s
input_chars = text[start_index: start_index + window_size]
# use the prediction function
predict_input = predict_next_chars(model,input_chars,num_to_predict = 100)
# print out input characters
print('------------------')
input_line = 'input chars = ' + '\n' + input_chars + '"' + '\n'
print(input_line)
# print out predicted characters
line = 'predicted chars = ' + '\n' + predict_input + '"' + '\n'
print(line)
```
This looks ok, but not great. Now lets try the same experiment with a larger chunk of the data - with the first 100,000 input/output pairs.
Tuning RNNs for a typical character dataset like the one we will use here is a computationally intensive endeavour and thus timely on a typical CPU. Using a reasonably sized cloud-based GPU can speed up training by a factor of 10. Also because of the long training time it is highly recommended that you carefully write the output of each step of your process to file. This is so that all of your results are saved even if you close the web browser you're working out of, as the processes will continue processing in the background but variables/output in the notebook system will not update when you open it again.
In the next cell we show you how to create a text file in Python and record data to it. This sort of setup can be used to record your final predictions.
```
### A simple way to write output to file
f = open('my_test_output.txt', 'w') # create an output file to write too
f.write('this is only a test ' + '\n') # print some output text
x = 2
f.write('the value of x is ' + str(x) + '\n') # record a variable value
f.close()
# print out the contents of my_test_output.txt
f = open('my_test_output.txt', 'r') # create an output file to write too
f.read()
```
With this recording devices we can now more safely perform experiments on larger portions of the text. In the next cell we will use the first 100,000 input/output pairs to train our RNN model.
First we fit our model to the dataset, then generate text using the trained model in precisely the same generation method applied before on the small dataset.
**Note:** your generated words should be - by and large - more realistic than with the small dataset, but you won't be able to generate perfect English sentences even with this amount of data. A rule of thumb: your model is working well if you generate sentences that largely contain real English words.
```
# a small subset of our input/output pairs
Xlarge = X[:100000,:,:]
ylarge = y[:100000,:]
# TODO: fit to our larger dataset
model.fit(Xlarge, ylarge, batch_size=500, epochs=30, verbose=1)
# save weights
model.save_weights('model_weights/best_RNN_large_textdata_weights.hdf5')
# TODO: choose an input sequence and use the prediction function in the previous Python cell to predict 100 characters following it
# get an appropriately sized chunk of characters from the text
start_inds = [150, 200, 300, 15000]
# save output
f = open('text_gen_output/RNN_large_textdata_output.txt', 'w') # create an output file to write too
# load weights
model.load_weights('model_weights/best_RNN_large_textdata_weights.hdf5')
for s in start_inds:
start_index = s
input_chars = text[start_index: start_index + window_size]
# use the prediction function
predict_input = predict_next_chars(model,input_chars,num_to_predict = 100)
# print out input characters
line = '-------------------' + '\n'
print(line)
f.write(line)
input_line = 'input chars = ' + '\n' + input_chars + '"' + '\n'
print(input_line)
f.write(input_line)
# print out predicted characters
predict_line = 'predicted chars = ' + '\n' + predict_input + '"' + '\n'
print(predict_line)
f.write(predict_line)
f.close()
```
| github_jupyter |
# VacationPy
----
#### Note
* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import json
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import (weather_api_key, g_key)
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
# import the weather data for the cities from WeatherPy homework
# Load the csv exported in Part I to a DataFrame
weather_cities_data = pd.read_csv("../WeatherPy/weather_data.csv")
weather_cities_data
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
# Configure the gmaps
gmaps.configure(api_key = g_key)
# Store the Latitude and Longitude in Locations
locations = weather_cities_data[["Latitude", "Longitude"]]
weight = weather_cities_data["Humidity"]
# create heat layer
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights = weight,
dissipating=False, max_intensity=100,
point_radius = 1)
# add layer
fig.add_layer(heat_layer)
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
# Create new DataFrame fitting weather criteria
# Narrow down the cities to fit weather conditions.
# Drop any rows will null values
new_city_df = weather_cities_data.loc[(weather_cities_data["Wind Speed"] <= 10) & (weather_cities_data["Clouds"] == 0) & \
(weather_cities_data["Max Temperature"] >= 70) & (weather_cities_data["Max Temperature"] <= 80)].dropna()
new_city_df.dropna(inplace = True)
new_city_df
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
# create hotel_df
hotel_df = new_city_df.loc[:, ["City", "Country", "Latitude", "Longitude"]]
# add hotel name to hotel_df
hotel_df["Hotel Name"] = ""
hotel_df
# print the name of the hotel for each city
# base url
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# set up a parameteres dictonary
params = {"type": "hotel",
"Keyword": "hotel",
"radius": 5000,
"Key": g_key
}
# loop through the hotel_df and run a lat/long search for each city
for index, row in hotel_df.iterrows():
#get the city name, lat, and longtitude from df
latitude = row["Latitude"]
longitude = row["Longitude"]
city_name = row["City"]
# add ketword to params dict
params["Location"] = f"{latitude},{longitude}"
# create url and make API calls
print(f"Retriving Results for Index {index}: {city_name}.")
# get the response
response = requests.get(base_url, params=params).json()
# get the results from the response
results = response["results"]
try:
print(f"Closet hotel in {city_name} is {results[0]['name']}.")
hotel_df.loc[index, "Hotel Name"] = results[0]['name']
except (KeyError, IndexError):
print("Missing field/result ..... skipping.")
print("-----------")
print("--------End of Search ----------")
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Latitude", "Longitude"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Add the layer to the map
fig.add_layer(markers)
# Display figure
fig
```
| github_jupyter |
# Simple Quantum Implementation using Qiskit Aqua for Boolean satisfiability problems
This Jupyter notebook demonstrates how easy it is to use quantum algorithms from [Qiskit Aqua](https://qiskit.org/aqua) to solve Boolean satisfiability problems [(SAT)](https://en.wikipedia.org/wiki/Boolean_satisfiability_problem).
It is based on the Qiskit tutorial [Using Grover search for 3-SAT problems](https://github.com/Qiskit/qiskit-tutorials/blob/master/community/aqua/optimization/grover.ipynb) by [Jay Gambetta](https://github.com/jaygambetta) and [Richard Chen](https://github.com/chunfuchen) and a hands-on workshop by David Mesterhazy.
Implemented by [Jan-R. Lahmann](http://twitter.com/JanLahmann) using Qiskit, binder and RISE.
## Boolean Satisfiabilty problems (SAT)
The Boolean satisfiability problem [(SAT)](https://en.wikipedia.org/wiki/Boolean_satisfiability_problem) considers a Boolean expression with N boolean variables involving negation (NOT, $\neg$), conjunction (AND, $\wedge$) and disjunction (OR, $\vee$), as in the following (simple) example:
$$ f(x_1, x_2) = (x_1 \vee x_2) \wedge (x_1 \vee \neg x_2) . $$
The problem is to determine whether there is any assignment of values (TRUE, FALSE) to the Boolean variables which makes the formula true.
It's something like trying to flip a bunch of switches to find the setting that makes a light bulb turn on.
SAT is of central importance in many areas of computer science, including complexity theory, algorithmics, cryptography, artificial intelligence, circuit design, and automatic theorem proving.
SAT was the first problem proven to be NP-complete.
This means that all problems in the [complexity class NP](https://en.wikipedia.org/wiki/NP_(complexity)) are at most as difficult to solve as SAT.
There is no known classical algorithm that efficiently solves each SAT problem, and it is generally believed that no such algorithm exists.
Whether Boolean satisfiability problems have a classical algorithm that is polynomial in time is equivalent to the [P vs. NP problem](https://en.wikipedia.org/wiki/P_versus_NP_problem).
While [Grover's quantum search algorithm](https://en.wikipedia.org/wiki/Grover's_algorithm) does not provide exponential speed-up to this problem, it may nevertheless provide some speed-up in contrast to classical black-box search strategies.
### Basic definitions and terminology
A *literal* is either a variable, or the negation of a variable.
A *clause* is a disjunction (OR, $\vee$) of literals, or a single literal.
A formula is in *conjunctive normal form* [(CNF)](https://en.wikipedia.org/wiki/Conjunctive_normal_form) if it is a conjunction (AND, $\wedge$) of clauses, or a single clause.
A problem in conjunctive normal form is called *3-SAT* if each clause is limited to at most three literals.
3-SAT is also NP-complete.
Example for 3-SAT: $ (x_1 ∨ ¬x_2) ∧ (¬x_1 ∨ x_2 ∨ x_3) ∧ ¬x_1 $.
## Solving 3-SAT using Qiskit Aqua
We will show how to solve a 3-SAT problem using quantum algorithms from [Qiskit Aqua](https://qiskit.org/aqua).
Let us consider three Boolean variables $x_1, x_2, x_3$ and a Boolean function $f$ given by:
\begin{align*}
f(x_1, x_2, x_3) \;= &\;\;\;\;
\;(\neg x_1 \vee \neg x_2 \vee \neg x_3) \\
&\;\; \wedge \; ( x_1 \vee \neg x_2 \vee x_3) \\
&\;\; \wedge \;( x_1 \vee x_2 \vee \neg x_3) \\
&\;\; \wedge \;( x_1 \vee \neg x_2 \vee \neg x_3) \\
&\;\; \wedge \;(\neg x_1 \vee x_2 \vee x_3)
\end{align*}
It is common, to state 3-SAT problems in [DIMACS CNF format](https://people.sc.fsu.edu/~jburkardt/data/cnf/cnf.html):
1. The file may begin with comment lines.
* The "problem" line begins with "p", followed by the problem type "cnf", the number of variables and the number of clauses.
* The remainder of the file contains lines defining the clauses.
* A clause is defined by listing the index of each positive literal, and the negative index of each negative literal.
```
# import the problem in DIMACS CNF format
import os
import wget
if not '3sat3-5.cnf' in os.listdir():
wget.download('https://raw.githubusercontent.com/Qiskit/qiskit-tutorials/master/community/aqua/optimization/3sat3-5.cnf')
with open('3sat3-5.cnf', 'r') as f:
sat_cnf = f.read()
print(sat_cnf)
```
To apply a quantum algorithm from Qiskit Aqua to this problem, we simply need to import the Qiskit libraries and run the algorithm with the appropriate parameters.
```
# import Qiskit quantum libraries
from qiskit import BasicAer
from qiskit.visualization import plot_histogram
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import Grover
from qiskit.aqua.components.oracles import LogicalExpressionOracle, TruthTableOracle
oracle = LogicalExpressionOracle(sat_cnf)
grover = Grover(oracle)
backend = BasicAer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=200)
result = grover.run(quantum_instance)
plot_histogram(result['measurement'])
```
The result shows that the assignments $000, 101, 110$ for $x_1 x_2 x_3$ are potential solutions to the problem.
Whether or not these are correct solutions to the problem can be verified efficiently, as 3-SAT is in NP.
Note that the variables in the histogram are in reverse order: $x_3, x_2, x_1$ instead of $x_1, x_2, x_3$.
## Classical brute force algorithm
The solutions to the problem can also be derived with a classical (non-quantum) algorithm by simply trying every possible combination of input values $x_1, x_2, x_3$of $f$.
We find again, that the solutions for the given 3-SAT problem are the assignments $000, 101, 110$ for $x_1 x_2 x_3$.
```
from IPython.display import HTML, display
import tabulate
nbr = 3 # number of Boolean variables in Boolean function
table = []
for i in range(2**nbr):
x1, x2, x3 = [int(x) for x in '{0:03b}'.format(i)] # Boolean variables
# define clauses
c1 = [not x1, not x2, not x3] # -1 -2 -3
c2 = [ x1, not x2, x3] # 1 -2 3
c3 = [ x1, x2, not x3] # 1 2 -3
c4 = [ x1, not x2, not x3] # 1 -2 -3
c5 = [not x1, x2, x3] # -1 2 3
f = all([any(c1), any(c2), any(c3), any(c4), any(c5)]) # Boolean function
table.append([x1, x2, x3, f])
display(HTML(tabulate.tabulate(table, tablefmt = 'html',
headers = ['$x_1$', '$x_2$', '$x_3$', '$f$'])))
```
Remark: this is obviously not the most efficient classical algorithm that exists. Heuristic SAT-algorithms are able to solve problem instances involving tens of thousands of variables and formulas consisting of millions of symbols, which is sufficient for many practical SAT problems.
| github_jupyter |
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
# Let us explicitly ask for TensorFlow2.
# This installs a lot of stuff - and will take a while.
!pip3 install tensorflow==2.0.1
import base64
import collections
import dataclasses
import hashlib
import itertools
import math
import numpy
import pprint
import scipy.optimize
import sys
import tensorflow as tf
print('TF version is:', tf.__version__)
print('NumPy version is:', numpy.__version__)
@dataclasses.dataclass(frozen=True)
class Solution(object):
potential: float
stationarity: float
pos: numpy.ndarray
def np_esum(spec, *arrays, optimize='greedy'):
"""Numpy einsum with default greedy optimization."""
return numpy.einsum(spec, *arrays, optimize=optimize)
def get_onb_transform(k_ab):
if not numpy.allclose(k_ab, k_ab.real) or not numpy.allclose(k_ab, k_ab.T):
raise ValueError('Bad Gramian.')
eigvals, eigvecsT = numpy.linalg.eigh(k_ab)
if not all(v * eigvals[0] > 0 for v in eigvals):
raise ValueError('Non-definite Gramian.')
onb_transform = numpy.einsum('a,na->an', eigvals**(-.5), eigvecsT)
g = np_esum('ab,Aa,Bb->AB', k_ab, onb_transform, onb_transform)
assert numpy.allclose(
g, numpy.eye(g.shape[0]) * ((-1, 1)[int(eigvals[0] > 0)])
), 'Bad ONB-transform.'
return onb_transform, numpy.linalg.inv(onb_transform)
def numpy_signature(a, digits=3):
"""Produces a signature-fingerprint of a numpy array."""
# Hack to ensure that -0.0 gets consistently shown as 0.0.
minus_zero_hack = 1e-100+1e-100j
return base64.b64encode(
hashlib.sha256(
str((a.shape,
','.join(repr(x)
for x in numpy.round(a + minus_zero_hack, digits).flat))
).encode('utf-8')
).digest()).decode('utf-8').strip('\n=')
def tformat(array,
name=None,
elem_filter=lambda x: abs(x) > 1e-8,
fmt='%s',
max_rows=numpy.inf,
cols=120):
"""Formats a numpy-array in human readable table form."""
# Leading row will be replaced if caller asked for a name-row.
dim_widths = [
max(1, int(math.ceil(math.log(dim + 1e-100, 10))))
for dim in array.shape]
format_str = '%s: %s' % (' '.join('%%%dd' % w for w in dim_widths), fmt)
rows = []
for indices in itertools.product(*[range(dim) for dim in array.shape]):
v = array[indices]
if elem_filter(v):
rows.append(format_str % (indices + (v, )))
num_entries = len(rows)
if num_entries > max_rows:
rows = rows[:max_rows]
if cols is not None:
width = max(map(len, rows))
num_cols = max(1, cols // (3 + width))
num_xrows = int(math.ceil(len(rows) / num_cols))
padded = [('%%-%ds' % width) % s
for s in rows + [''] * (num_cols * num_xrows - len(rows))]
table = numpy.array(padded, dtype=object).reshape(num_cols, num_xrows).T
xrows = [' | '.join(row) for row in table]
else:
xrows = rows
if name is not None:
return '\n'.join(
['=== %s, shape=%r, %d%s / %d non-small entries ===' % (
name, array.shape,
num_entries,
'' if num_entries == len(rows) else '(%d shown)' % num_entries,
array.size)] +
[r.strip() for r in xrows])
return '\n'.join(xrows)
def tprint(array, sep=' ', end='\n', file=sys.stdout, **tformat_kwargs):
"""Prints a numpy array in human readable table form."""
print(tformat(array, **tformat_kwargs), sep=sep, end=end, file=file)
### Lie Algebra definitions for Spin(8), SU(8), E7.
def permutation_sign(p):
q = [x for x in p] # Copy to list.
parity = 1
for n in range(len(p)):
while n != q[n]:
qn = q[n]
q[n], q[qn] = q[qn], q[n] # Swap to make q[qn] = qn.
parity = -parity
return parity
def asymm2(a, einsum_spec):
"""Antisymmetrizes."""
return 0.5 * (a - numpy.einsum(einsum_spec, a))
class Spin8(object):
"""Container class for Spin(8) tensor invariants."""
def __init__(self):
r8 = range(8)
self.gamma_vsc = gamma_vsc = self._get_gamma_vsc()
#
# The gamma^{ab}_{alpha beta} tensor that translates between antisymmetric
# matrices over vectors [ij] and antisymmetric matrices over spinors [sS].
self.gamma_vvss = asymm2(
numpy.einsum('isc,jSc->ijsS', gamma_vsc, gamma_vsc), 'ijsS->jisS')
# The gamma^{ab}_{alpha* beta*} tensor that translates between antisymmetric
# matrices over vectors [ij] and antisymmetric matrices over cospinors [cC].
self.gamma_vvcc = asymm2(
numpy.einsum('isc,jsC->ijcC', gamma_vsc, gamma_vsc), 'ijcC->jicC')
#
# The gamma^{ijkl}_{alpha beta} tensor that translates between antisymmetric
# 4-forms [ijkl] and symmetric traceless matrices over the spinors (sS).
g_ijsS = numpy.einsum('isc,jSc->ijsS', self.gamma_vsc, self.gamma_vsc)
g_ijcC = numpy.einsum('isc,jsC->ijcC', self.gamma_vsc, self.gamma_vsc)
g_ijklsS = numpy.einsum('ijst,kltS->ijklsS', g_ijsS, g_ijsS)
g_ijklcC = numpy.einsum('ijcd,kldC->ijklcC', g_ijcC, g_ijcC)
gamma_vvvvss = numpy.zeros([8] * 6)
gamma_vvvvcc = numpy.zeros([8] * 6)
for perm in itertools.permutations(range(4)):
perm_ijkl = ''.join('ijkl'[p] for p in perm)
sign = permutation_sign(perm)
gamma_vvvvss += sign * numpy.einsum(perm_ijkl + 'sS->ijklsS', g_ijklsS)
gamma_vvvvcc += sign * numpy.einsum(perm_ijkl + 'cC->ijklcC', g_ijklcC)
self.gamma_vvvvss = gamma_vvvvss / 24.0
self.gamma_vvvvcc = gamma_vvvvcc / 24.0
def _get_gamma_vsc(self):
"""Computes SO(8) gamma-matrices."""
# Conventions match Green, Schwarz, Witten's, but with index-counting
# starting at zero.
entries = (
"007+ 016- 025- 034+ 043- 052+ 061+ 070- "
"101+ 110- 123- 132+ 145+ 154- 167- 176+ "
"204+ 215- 226+ 237- 240- 251+ 262- 273+ "
"302+ 313+ 320- 331- 346- 357- 364+ 375+ "
"403+ 412- 421+ 430- 447+ 456- 465+ 474- "
"505+ 514+ 527+ 536+ 541- 550- 563- 572- "
"606+ 617+ 624- 635- 642+ 653+ 660- 671- "
"700+ 711+ 722+ 733+ 744+ 755+ 766+ 777+")
ret = numpy.zeros([8, 8, 8])
for ijkc in entries.split():
ijk = tuple(map(int, ijkc[:-1]))
ret[ijk] = +1 if ijkc[-1] == '+' else -1
return ret
class SU8(object):
"""Container class for su(8) tensor invariants."""
def __init__(self):
# Tensor that translates between adjoint indices 'a' and
# (vector) x (vector) indices 'ij'
ij_map = [(i, j) for i in range(8) for j in range(8) if i < j]
#
# We also need the mapping between 8 x 8 and 35 representations, using
# common conventions for a basis of the 35-representation, and likewise
# for 8 x 8 and 28.
m_35_8_8 = numpy.zeros([35, 8, 8], dtype=numpy.complex128)
m_28_8_8 = numpy.zeros([28, 8, 8], dtype=numpy.complex128)
for n in range(7):
m_35_8_8[n, n, n] = +1.0
m_35_8_8[n, n + 1, n + 1] = -1.0
for a, (m, n) in enumerate(ij_map):
m_35_8_8[a + 7, m, n] = m_35_8_8[a + 7, n, m] = 1.0
m_28_8_8[a, m, n] = 1.0
m_28_8_8[a, n, m] = -1.0
#
# The su8 'Generator Matrices'.
t_aij = numpy.zeros([63, 8, 8], dtype=numpy.complex128)
t_aij[:35, :, :] = 1.0j * m_35_8_8
for a, (i, j) in enumerate(ij_map):
t_aij[a + 35, i, j] = -1.0
t_aij[a + 35, j, i] = 1.0
self.ij_map = ij_map
self.m_35_8_8 = m_35_8_8
self.m_28_8_8 = m_28_8_8
self.t_aij = t_aij
class E7(object):
"""Container class for e7 tensor invariants."""
def __init__(self, spin8, su8):
self._spin8 = spin8
self._su8 = su8
ij_map = su8.ij_map
t_a_ij_kl = numpy.zeros([133, 56, 56], dtype=numpy.complex128)
t_a_ij_kl[:35, 28:, :28] = (1 / 8.0) * (
np_esum('ijklsS,qsS,Iij,Kkl->qIK',
spin8.gamma_vvvvss, su8.m_35_8_8, su8.m_28_8_8, su8.m_28_8_8))
t_a_ij_kl[:35, :28, 28:] = t_a_ij_kl[:35, 28:, :28]
t_a_ij_kl[35:70, 28:, :28] = (1.0j / 8.0) * (
np_esum('ijklcC,qcC,Iij,Kkl->qIK',
spin8.gamma_vvvvcc, su8.m_35_8_8, su8.m_28_8_8, su8.m_28_8_8))
t_a_ij_kl[35:70, :28, 28:] = -t_a_ij_kl[35:70, 28:, :28]
#
# We need to find the action of the su(8) algebra on the
# 28-representation.
su8_28 = 2 * np_esum('aij,mn,Iim,Jjn->aIJ',
su8.t_aij,
numpy.eye(8, dtype=numpy.complex128),
su8.m_28_8_8, su8.m_28_8_8)
t_a_ij_kl[70:, :28, :28] = su8_28
t_a_ij_kl[70:, 28:, 28:] = su8_28.conjugate()
self.t_a_ij_kl = t_a_ij_kl
#
self.k_ab = numpy.einsum('aMN,bNM->ab', t_a_ij_kl, t_a_ij_kl)
self.v70_as_sc8x8 = numpy.einsum('sc,xab->sxcab',
numpy.eye(2),
su8.m_35_8_8).reshape(70, 2, 8, 8)
# For e7, there actually is a better orthonormal basis:
# the sd/asd 4-forms. The approach used here however readily generalizes
# to all other groups.
self.v70_onb_onbinv = get_onb_transform(self.k_ab[:70, :70])
def get_proj_35_8888(want_selfdual=True):
"""Computes the (35, 8, 8, 8, 8)-projector to the (anti)self-dual 4-forms."""
# We first need some basis for the 35 self-dual 4-forms.
# Our convention is that we lexicographically list those 8-choose-4
# combinations that contain the index 0.
sign_selfdual = 1 if want_selfdual else -1
ret = numpy.zeros([35, 8, 8, 8, 8], dtype=numpy.float64)
#
def get_selfdual(ijkl):
mnpq = tuple(n for n in range(8) if n not in ijkl)
return (sign_selfdual * permutation_sign(ijkl + mnpq),
ijkl, mnpq)
selfduals = [get_selfdual(ijkl)
for ijkl in itertools.combinations(range(8), 4)
if 0 in ijkl]
for num_sd, (sign_sd, ijkl, mnpq) in enumerate(selfduals):
for abcd in itertools.permutations(range(4)):
sign_abcd = permutation_sign(abcd)
ret[num_sd,
ijkl[abcd[0]],
ijkl[abcd[1]],
ijkl[abcd[2]],
ijkl[abcd[3]]] = sign_abcd
ret[num_sd,
mnpq[abcd[0]],
mnpq[abcd[1]],
mnpq[abcd[2]],
mnpq[abcd[3]]] = sign_abcd * sign_sd
return ret / 24.0
spin8 = Spin8()
su8 = SU8()
e7 = E7(spin8, su8)
assert (numpy_signature(e7.t_a_ij_kl) ==
'MMExYjC6Qr6gunZIYfRLLgM2PDtwUDYujBNzAIukAVY'), 'Bad E7(7) definitions.'
### SO(p, 8-p) gaugings
def get_so_pq_E(p=8):
if p == 8 or p == 0:
return numpy.eye(56, dtype=complex)
q = 8 - p
pq_ratio = p / q
x88 = numpy.diag([-1.0] * p + [1.0 * pq_ratio] * q)
t = 0.25j * numpy.pi / (1 + pq_ratio)
k_ab = numpy.einsum('aij,bij->ab', su8.m_35_8_8, su8.m_35_8_8)
v35 = numpy.einsum('mab,ab,mM->M', su8.m_35_8_8, x88, numpy.linalg.inv(k_ab))
gen_E = numpy.einsum(
'aMN,a->NM',
e7.t_a_ij_kl,
numpy.pad(v35, [(0, 133 - 35)], 'constant'))
return scipy.linalg.expm(-t * gen_E)
### Supergravity.
@dataclasses.dataclass(frozen=True)
class SUGRATensors(object):
v70: tf.Tensor
vielbein: tf.Tensor
tee_tensor: tf.Tensor
a1: tf.Tensor
a2: tf.Tensor
potential: tf.Tensor
def get_tf_stationarity(fn_potential, **fn_potential_kwargs):
"""Returns a @tf.function that computes |grad potential|^2."""
@tf.function
def stationarity(pos):
tape = tf.GradientTape()
with tape:
tape.watch(pos)
potential = fn_potential(pos, **fn_potential_kwargs)
grad_potential = tape.gradient(potential, pos)
return tf.reduce_sum(grad_potential * grad_potential)
return stationarity
@tf.function
def dwn_stationarity(t_a1, t_a2):
"""Computes the de Wit-Nicolai stationarity-condition tensor."""
# See: https://arxiv.org/pdf/1302.6219.pdf, text after (3.2).
t_x0 = (
+4.0 * tf.einsum('mi,mjkl->ijkl', t_a1, t_a2)
-3.0 * tf.einsum('mnij,nklm->ijkl', t_a2, t_a2))
t_x0_real = tf.math.real(t_x0)
t_x0_imag = tf.math.imag(t_x0)
tc_sd = tf.constant(get_proj_35_8888(True))
tc_asd = tf.constant(get_proj_35_8888(False))
t_x_real_sd = tf.einsum('aijkl,ijkl->a', tc_sd, t_x0_real)
t_x_imag_asd = tf.einsum('aijkl,ijkl->a', tc_asd, t_x0_imag)
return (tf.einsum('a,a->', t_x_real_sd, t_x_real_sd) +
tf.einsum('a,a->', t_x_imag_asd, t_x_imag_asd))
def tf_sugra_tensors(t_v70, compute_masses, t_lhs_vielbein, t_rhs_E):
"""Returns key tensors for D=4 supergravity."""
tc_28_8_8 = tf.constant(su8.m_28_8_8)
t_e7_generator_v70 = tf.einsum(
'v,vIJ->JI',
tf.complex(t_v70, tf.constant([0.0] * 70, dtype=tf.float64)),
tf.constant(e7.t_a_ij_kl[:70, :, :], dtype=tf.complex128))
t_complex_vielbein0 = tf.linalg.expm(t_e7_generator_v70) @ t_rhs_E
if compute_masses:
t_complex_vielbein = t_lhs_vielbein @ t_complex_vielbein0
else:
t_complex_vielbein = t_complex_vielbein0
@tf.function
def expand_ijkl(t_ab):
return 0.5 * tf.einsum(
'ijB,BIJ->ijIJ',
tf.einsum('AB,Aij->ijB', t_ab, tc_28_8_8),
tc_28_8_8)
#
t_u_ijIJ = expand_ijkl(t_complex_vielbein[:28, :28])
t_u_klKL = expand_ijkl(t_complex_vielbein[28:, 28:])
t_v_ijKL = expand_ijkl(t_complex_vielbein[:28, 28:])
t_v_klIJ = expand_ijkl(t_complex_vielbein[28:, :28])
#
t_uv = t_u_klKL + t_v_klIJ
t_uuvv = (tf.einsum('lmJK,kmKI->lkIJ', t_u_ijIJ, t_u_klKL) -
tf.einsum('lmJK,kmKI->lkIJ', t_v_ijKL, t_v_klIJ))
t_T = tf.einsum('ijIJ,lkIJ->lkij', t_uv, t_uuvv)
t_A1 = (-4.0 / 21.0) * tf.linalg.trace(tf.einsum('mijn->ijmn', t_T))
t_A2 = (-4.0 / (3 * 3)) * (
# Antisymmetrize in last 3 indices, taking into account antisymmetry
# in last two indices.
t_T
+ tf.einsum('lijk->ljki', t_T)
+ tf.einsum('lijk->lkij', t_T))
t_A1_real = tf.math.real(t_A1)
t_A1_imag = tf.math.imag(t_A1)
t_A2_real = tf.math.real(t_A2)
t_A2_imag = tf.math.imag(t_A2)
t_A1_potential = (-3.0 / 4) * (
tf.einsum('ij,ij->', t_A1_real, t_A1_real) +
tf.einsum('ij,ij->', t_A1_imag, t_A1_imag))
t_A2_potential = (1.0 / 24) * (
tf.einsum('ijkl,ijkl->', t_A2_real, t_A2_real) +
tf.einsum('ijkl,ijkl->', t_A2_imag, t_A2_imag))
t_potential = t_A1_potential + t_A2_potential
#
return t_v70, t_complex_vielbein, t_T, t_A1, t_A2, t_potential
def so8_sugra_tensors(t_v70, tc_rhs_E):
t_v70, t_complex_vielbein, t_T, t_A1, t_A2, t_potential = (
tf_sugra_tensors(t_v70, False, 0.0, tc_rhs_E))
return SUGRATensors(
v70=t_v70,
vielbein=t_complex_vielbein,
tee_tensor=t_T,
a1=t_A1,
a2=t_A2,
potential=t_potential)
def so8_sugra_scalar_masses(v70, so_pq_p):
# Note: In some situations, small deviations in the input give quite
# noticeable deviations in the scalar mass-spectrum.
# Getting reliable numbers here really requires satisfying
# the stationarity-condition to high accuracy.
tc_rhs_E = tf.constant(get_so_pq_E(so_pq_p), dtype=tf.complex128)
tc_e7_onb = tf.constant(e7.v70_onb_onbinv[0], dtype=tf.complex128)
tc_e7_taMN = tf.constant(e7.t_a_ij_kl[:70, :, :], dtype=tf.complex128)
t_v70 = tf.constant(v70, dtype=tf.float64)
#
def tf_grad_potential_lhs_onb(t_d_v70_onb):
tape = tf.GradientTape()
with tape:
tape.watch(t_d_v70_onb)
t_d_gen_e7 = tf.einsum(
'a,aMN->NM',
tf.einsum('Aa,A->a',
tc_e7_onb,
tf.complex(t_d_v70_onb, tf.zeros_like(t_d_v70_onb))),
tc_e7_taMN)
t_lhs_vielbein = (tf.eye(56, dtype=tf.complex128) +
t_d_gen_e7 + 0.5 * t_d_gen_e7 @ t_d_gen_e7)
t_potential = (
tf_sugra_tensors(t_v70,
tf.constant(True),
t_lhs_vielbein,
tc_rhs_E))[-1]
return tape.gradient(t_potential, t_d_v70_onb)
#
t_d_v70_onb = tf.Variable(numpy.zeros(70), dtype=tf.float64)
tape = tf.GradientTape(persistent=True)
with tape:
tape.watch(t_d_v70_onb)
grad_potential = tf.unstack(tf_grad_potential_lhs_onb(t_d_v70_onb))
t_mm = tf.stack([tape.gradient(grad_potential[k], t_d_v70_onb)
for k in range(70)], axis=1)
stensors = so8_sugra_tensors(t_v70, tc_rhs_E)
return (t_mm * (36.0 / tf.abs(stensors.potential))).numpy()
### Scanning
def scanner(
use_dwn_stationarity=True,
so_pq_p=8,
seed=1,
scale=0.15,
stationarity_threshold=1e-4,
relu_coordinate_threshold=3.0,
gtol=1e-4,
f_squashed=tf.math.asinh):
"""Scans for critical points in the scalar potential.
Args:
use_dwn_stationarity: Whether to use the explicit stationarity condition
from `dwn_stationarity`.
so_pq_p: SO(p, 8-p) non-compact form of the gauge group to use.
seed: Random number generator seed for generating starting points.
scale: Scale for normal-distributed search starting point coordinates.
stationarity_threshold: Upper bound on permissible post-optimization
stationarity for a solution to be considered good.
relu_coordinate_threshold: Threshold for any coordinate-value at which
a ReLU-term kicks in, in order to move coordinates back to near zero.
(This is relevant for noncompact gaugings with flat directions,
where solutions can move 'very far out'.)
gtol: `gtol` parameter for scipy.optimize.fmin_bfgs.
f_squashed: Squashing-function for stationarity.
Should be approximately linear near zero, monotonic, and not growing
faster than logarithmic.
Yields:
`Solution` numerical solutions.
"""
# Use a seeded random number generator for better reproducibility
# (but note that scipy's optimizers may themselves use independent
# and not-easily-controllable random state).
rng = numpy.random.RandomState(seed=seed)
def get_x0():
return rng.normal(scale=scale, size=70)
#
tc_rhs_E = tf.constant(get_so_pq_E(so_pq_p), dtype=tf.complex128)
def f_potential(scalars):
return so8_sugra_tensors(tf.constant(scalars), tc_rhs_E).potential.numpy()
#
f_grad_pot_sq_stationarity = (
None if use_dwn_stationarity
else get_tf_stationarity(
lambda t_pos: so8_sugra_tensors(t_pos, tc_rhs_E).potential))
#
def f_t_stationarity(t_pos):
if use_dwn_stationarity:
stensors = so8_sugra_tensors(t_pos, tc_rhs_E)
stationarity = dwn_stationarity(stensors.a1, stensors.a2)
else:
stationarity = f_grad_pot_sq_stationarity(t_pos)
eff_stationarity = stationarity + tf.reduce_sum(
tf.nn.relu(abs(t_pos) - relu_coordinate_threshold))
return eff_stationarity
#
def f_opt(pos):
t_pos = tf.constant(pos)
t_stationarity = f_squashed(f_t_stationarity(t_pos))
return t_stationarity.numpy()
#
def fprime_opt(pos):
t_pos = tf.constant(pos)
tape = tf.GradientTape()
with tape:
tape.watch(t_pos)
t_stationarity = f_squashed(f_t_stationarity(t_pos))
t_grad_opt = tape.gradient(t_stationarity, t_pos)
return t_grad_opt.numpy()
#
while True:
opt = scipy.optimize.fmin_bfgs(
f_opt, get_x0(), fprime=fprime_opt, gtol=gtol, maxiter=10**4, disp=0)
opt_pot = f_potential(opt)
opt_stat = f_opt(opt)
if numpy.isnan(opt_pot) or not opt_stat < stationarity_threshold:
continue # Optimization ran into a bad solution.
solution = Solution(potential=opt_pot,
stationarity=opt_stat,
pos=opt)
yield solution
### Demo.
def demo(seed=0,
scale=0.2,
use_dwn_stationarity=True,
so_pq_p=8,
num_solutions=5,
f_squashed=tf.math.asinh):
solutions_iter = scanner(scale=scale, seed=seed,
use_dwn_stationarity=use_dwn_stationarity,
so_pq_p=so_pq_p, f_squashed=f_squashed)
for num_solution in range(num_solutions):
sol = next(solutions_iter)
print('=== Solution ===')
pprint.pprint(sol)
mm0 = so8_sugra_scalar_masses(sol.pos, so_pq_p)
print('\nScalar Masses for: V/g^2=%s:' % sol.potential)
print(sorted(collections.Counter(
numpy.round(numpy.linalg.eigh(mm0)[0], 3)).items()))
demo()
```
| github_jupyter |
$
\DeclareMathOperator{\E}{\mathbb{E}}
\DeclareMathOperator{\R}{\mathcal{R}}
\DeclareMathOperator{\wv}{\mathbf{w}}
\newcommand{\bm}{\boldsymbol}
$
# ITCS 6010: Assignment #3 (V1)
<font color="red">(Due: 11 pm on Dec 3rd) </font>
### 1. The value of an action, $Q^\pi(s,a)$, depends on the expected next reward and the expected sum of the remaining rewards. Again we can think of this in terms of a small backup diagram, this one rooted at an action (state–action pair) and branching to the possible next states:

### Give the equation corresponding to this intuition and diagram for the value at the root node, $V^\pi(s)$, in terms of the value at the expected leaf node, $Q^\pi(s,a)$, given $s_t =s$. This expectation dpends on the policy, $\pi$. Then give a second equation in which the expected value is written out explicitly in terms of $\pi(a|s)$ such that no expected value notation appears in the equation.
$$V^{\pi}(s)=\mathbb{E}_{\pi}[Q^{\pi}(s, a)~|~s_t=s]$$<br>
$$V^{\pi}(s)=\sum_{a} \pi(a | s)Q^{\pi}(s, a)$$
### 2. The compatible function approximation theorem states that the value function approximator is compatible to the policy, i.e., $\nabla_{\wv} Q_{\wv}(s,a) = \nabla_{\bm\theta} \log \pi_{\bm\theta}(s,a)$, and its parameter $\wv$ minimizes the mean-square error, $\E_{\pi_{\bm\theta}} \big[\big(Q^{\pi_\theta}(s,a) - Q_{\wv}(s,a) \big)^2 \big]$. Then the policy gradient is exact, $ \nabla_{\bm\theta} J(\bm\theta) = \E_{\pi_{\bm\theta}} \big[ \nabla_{\bm\theta} \log \pi(a | s, \bm\theta) Q_{\wv} (s, a) \big]$. Show your proof.
If we minimize parameters $\bf{w}$ with repect to the mean squared error:
$$m = \E_{\pi_{\bm\theta}} \big[\big(Q^{\pi_\theta}(s,a) - Q_{\wv}(s,a) \big)^2 \big]$$
Then we are moving in the direction where:
$$\nabla_{\bf{w}} m=0$$
As such, we rewrite the expectation:
$$\mathbb{E}_{\pi_{\theta}}\left[\left(Q^{\pi_\theta}(s, a)-Q_{\bf{w}}(s, a)\right) \nabla_{\bf{w}} Q_{\bf{w}}(s, a)\right]=0$$
$$\mathbb{E}_{\pi_{\theta}}\left[\left(Q^{\pi_\theta}(s, a)-Q_{\bf{w}}(s, a)\right) \nabla_{\theta} \log \pi_{\theta}(s, a)\right]=0$$
$$\mathbb{E}_{\pi_{\theta}}\left[Q^{\pi_\theta}(s, a) \nabla_{\theta} \log \pi_{\theta}(s, a)\right]=\mathbb{E}_{\pi_{\theta}}\left[Q_{\bf{w}}(s, a) \nabla_{\theta} \log \pi_{\theta}(s, a)\right]$$
<br>Therefore, we can then substitute $Q^{\pi_\theta}(s, a)$ with $Q_{\wv} (s, a)$ in the policy gradient:
$$\nabla_{\bm\theta} J(\bm\theta) = \E_{\pi_{\bm\theta}} \big[ \nabla_{\bm\theta} \log \pi(a | s, \bm\theta) Q_{\wv} (s, a) \big]$$
### 3. (Programming)
1) Implement REINFORCE with Baseline (Value function for advantage) with neural network policy approximation to solve the Maze problem.
2) Now, implement AC Methods with NN approximators for both actor and critic to solve the Maze problem. How are the solutions different each other?
#### Maze Problem (Practice)
```
import collections
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import sys
from collections import defaultdict
import random
import math
import torch
from statistics import mean
import torch.nn.functional as F
```
### Sample Grid Environment File
Simple text file with three characters, 'O', 'H', and 'G'.
- 'O': open space
- 'H': Wall or obstacles
- 'G': Goal location
```
%%bash
cat ../grid.txt
```
### GridWorld Class
```
# maze example
#This environment is from the course material at:
#https://nbviewer.jupyter.org/url/webpages.uncc.edu/mlee173/teach/itcs6010/notebooks/assign/Assign3.ipynb
class GridWorld:
""" Grid World environment
there are four actions (left, right, up, and down) to move an agent
In a grid, if it reaches a goal, it get 30 points of reward.
If it falls in a hole or moves out of the grid world, it gets -5.
Each step costs -1 point.
to test GridWorld, run the following sample codes:
env = GridWorld('grid.txt')
env.print_map()
print [2,3], env.check_state([2,3])
print [0,0], env.check_state([0,0])
print [3,4], env.check_state([3,4])
print [10,3], env.check_state([10,3])
env.init([0,0])
print env.next(1) # right
print env.next(3) # down
print env.next(0) # left
print env.next(2) # up
print env.next(2) # up
Parameters
==========
_map ndarray
string array read from a file input
_size 1d array
the size of _map in ndarray
goal_pos tuple
the index for the goal location
_actions list
list of actions for 4 actions
_s 1d array
current state
"""
def __init__(self, fn):
# read a map from a file
self._map = self.read_map(fn)
self._size = np.asarray(self._map.shape)
self.goal_pos = np.where(self._map == 'G')
# definition of actions (left, right, up, and down repectively)
self._actions = [[0, -1], [0, 1], [-1, 0], [1, 0]]
self._s = None
def get_cur_state(self):
return self._s
def get_size(self):
return self._size
def read_map(self, fn):
grid = []
with open(fn) as f:
for line in f:
grid.append(list(line.strip()))
return np.asarray(grid)
def print_map(self):
print( self._map )
def check_state(self, s):
if isinstance(s, collections.Iterable) and len(s) == 2:
if s[0] < 0 or s[1] < 0 or\
s[0] >= self._size[0] or s[1] >= self._size[1]:
return 'N'
return self._map[tuple(s)].upper()
else:
return 'F' # wrong input
def init(self, state=None):
if state is None:
s = [0, 0]
else:
s = state
if self.check_state(s) == 'O':
self._s = np.asarray(state)
else:
raise ValueError("Invalid state for init")
def next(self, a):
s1 = self._s + self._actions[a]
# state transition
curr = self.check_state(s1)
if curr == 'H' or curr == 'N':
return -5
elif curr == 'F':
warnings.warn("invalid state " + str(s1))
return -5
elif curr == 'G':
self._s = s1
return 30
else:
self._s = s1
return -1
def is_goal(self):
return self.check_state(self._s) == 'G'
def get_actions(self):
return self._actions
# top-left to (0,0)
def coord_convert(s, sz):
return [s[1], sz[0]-s[0]-1]
class Policy(torch.nn.Module):
def __init__(self,input_size, hidden_size, output_size):
super(Policy, self).__init__()
#input:state
self.l1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.l3 = torch.nn.Linear(hidden_size, output_size)
self.out = torch.nn.Softmax(dim=0)
#output: action probabilities
def forward(self, x):
x = torch.from_numpy(x).float()
x = self.l1(x)
x = self.relu(x)
x = self.l3(x)
x = self.out(x)
return x
def update(self, advantage, action_prob, optimizer):
#policy_net.update(advantage, action_prob)
loss = -(torch.log(action_prob)*advantage).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
def policy_init(input_size, hidden_size, output_size, lr):
model = Policy(input_size, hidden_size, output_size)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-4)
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
return model, optimizer
class Value(torch.nn.Module):
def __init__(self,input_size, hidden_size, output_size):
super(Value, self).__init__()
#input:state
self.l1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.l3 = torch.nn.Linear(hidden_size, output_size)
#output: value
def forward(self, x):
x = torch.from_numpy(x).float()
x = self.l1(x)
x = self.relu(x)
x = self.l3(x)
return x
def update(self, advantage, optimizer):
#value_net.update(baseline_value, G_t)
loss = advantage.pow(2).mean()
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
def value_init(input_size, hidden_size, output_size, lr):
model = Value(input_size, hidden_size, output_size)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-4)
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
return model, optimizer
def policy_gradient(num_episodes=20000, epsilon=1, final_epsilon=0.1, gamma=0.9, lr=0.002, algo='rf'):
terminal_state_indicators = ["H","N","F","G"]
policy_net, pol_opt = policy_init(2,20,len(env._actions),lr)
policy_net.train()
value_net, val_opt = value_init(2,20,1,lr)
value_net.train()
epsilon_decay = np.exp(np.log(final_epsilon) / num_episodes)
all_Gt = []
all_avg = []
for ep in range(0, num_episodes):
reward_sum = 0
episode = []
#Randon Starts
start_row = random.randint(0,env._size[0])
start_col = random.randint(0,env._size[1])
if(env.check_state([start_row,start_col]) == 'O' and (np.random.rand() < epsilon)):
env.init([start_row,start_col])
else:
env.init([0,0])
done = 0
for steps in range(0,100):
state = env.get_cur_state()
action_probs = policy_net.forward(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs.detach().numpy())
reward = env.next(action)
reward_sum += reward
next_state = env._s
curr = env.check_state(next_state)
if(curr in terminal_state_indicators):
done = 1
#store experience
episode.append((state, action, reward, action_probs[action], next_state))
#if done, break
if done:
break
state = next_state
all_Gt.append(reward_sum)
step_count = 0
advantages = []
picked_actp = []
for traj in episode:
state = traj[0]
action = traj[1]
action_prob = traj[3]
next_state = traj[4]
if(algo=='rf'):
G_t = 0
for i in range(step_count, len(episode)):
reward = episode[i][2]
G_t += reward*(gamma**(i-step_count))
elif(algo=='ac'):
reward = traj[2]
G_t = reward + gamma*value_net.forward(next_state).detach()
else:
print("Invalid algorithm: Use 'rf' or 'ac'")
baseline_value = value_net.forward(state)
advantage = G_t - baseline_value
advantages.append(advantage)
picked_actp.append(action_prob)
step_count += 1
value_net.update(torch.stack(advantages), val_opt)
policy_net.update(torch.stack(advantages), torch.stack(picked_actp), pol_opt)
epsilon *= epsilon_decay
avg = mean(all_Gt[max(-50,-len(all_Gt)):])
all_avg.append(avg)
if ep>50 and avg > 20:
print('Converged in episode '+str(ep))
break
return policy_net, all_Gt, all_avg
def print_policy(policy):
print_value = np.zeros((env._size[0],env._size[1]))
bad_state_indicators = ["H","N","F"]
policy.eval()
for row in range(0,env._size[0]):
for col in range(0,env._size[1]):
state = np.asarray([row,col])
action_probs = policy.forward(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs.detach().numpy())
curr = env.check_state(state)
if(curr in bad_state_indicators):
print_value[tuple(state)] = 0
elif(curr in "G"):
print_value[tuple(state)] = 9
else:
print_value[tuple(state)] = int(action)+1
print("0: Hole 1: Left 2: Right 3: Up 4: Down 9: Goal")
print(print_value)
np.set_printoptions(suppress=True)
env = GridWorld("../grid.txt")
env.print_map()
#******************* REINFORCE with Baseline ********************************
policy, all_reward, avg_reward = policy_gradient(num_episodes=20000, epsilon=1,
final_epsilon=0.8,gamma=0.99, lr=0.002, algo='rf')
print_policy(policy)
plt.plot(avg_reward)
plt.title('REINFORCE with Baseline', fontsize=24)
plt.ylabel('Running Average Reward', fontsize=18)
plt.xlabel('Episodes', fontsize=18)
print("Final Average Reward: " + str(avg_reward[-1]))
plt.show()
#******************* Actor-Critic ********************************
policy, all_reward, avg_reward = policy_gradient(num_episodes=20000, epsilon=0.5,
final_epsilon=0.1, gamma=0.99, lr=0.002, algo='ac')
print_policy(policy)
plt.plot(avg_reward)
plt.title('Actor-Critic', fontsize=24)
plt.ylabel('Running Average Reward', fontsize=18)
plt.xlabel('Episodes', fontsize=18)
print("Final Average Reward: " + str(avg_reward[-1]))
plt.show()
```
The generic policy update for REINFORCE with Baseline is shown in the follwing equation:<br><br>
$$\theta_{t+1}= \theta_t + \alpha\left(G_{t}-\hat{V}\left(s_{t}\right)\right) \nabla_{\theta} \log \pi_{\theta}\left(s_{t}, a_{t}\right)$$
Where $\alpha$ is the learning rate, and $\theta$ represents the policy function parameters.
Here, we see that REINFORCE with Baseline takes the true return $G_t$, and subtracts the approximated value $\hat{V}\left(s_{t}\right)$ as the baseline. This forms its advantage, which is used to push the gradient with respect to the difference in the true return and approximated state value.<br><br>
Basic Actor-Critic is different than REINFORCE with Baseline in its advantage, as shown in the following equation:<br>
$$\theta_{t+1}= \theta_t + \alpha\left(r + \gamma \hat{V}\left(s_{t+1}\right)-\hat{V}\left(s_{t}\right)\right) \nabla_{\theta} \log \pi_{\theta}\left(s_{t}, a_{t}\right)$$
Here, we can see that the advantage is calculated with the TD error using the value function approximate for the next state, rather than the full return used by REINFORCE.
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Deploy models to Azure Kubernetes Service (AKS) using controlled roll out
This notebook will show you how to deploy mulitple AKS webservices with the same scoring endpoint and how to roll out your models in a controlled manner by configuring % of scoring traffic going to each webservice. If you are using a Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to install the Azure Machine Learning Python SDK and create an Azure ML Workspace.
```
# Check for latest version
import azureml.core
print(azureml.core.VERSION)
```
## Initialize workspace
Create a [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace%28class%29?view=azure-ml-py) object from your persisted configuration.
```
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
```
## Register the model
Register a file or folder as a model by calling [Model.register()](https://docs.microsoft.com/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#register-workspace--model-path--model-name--tags-none--properties-none--description-none--datasets-none--model-framework-none--model-framework-version-none--child-paths-none-).
In addition to the content of the model file itself, your registered model will also store model metadata -- model description, tags, and framework information -- that will be useful when managing and deploying models in your workspace. Using tags, for instance, you can categorize your models and apply filters when listing models in your workspace.
```
from azureml.core import Model
model = Model.register(workspace=ws,
model_name='sklearn_regression_model.pkl', # Name of the registered model in your workspace.
model_path='./sklearn_regression_model.pkl', # Local file to upload and register as a model.
model_framework=Model.Framework.SCIKITLEARN, # Framework used to create the model.
model_framework_version='0.19.1', # Version of scikit-learn used to create the model.
description='Ridge regression model to predict diabetes progression.',
tags={'area': 'diabetes', 'type': 'regression'})
print('Name:', model.name)
print('Version:', model.version)
```
## Register an environment (for all models)
If you control over how your model is run, or if it has special runtime requirements, you can specify your own environment and scoring method.
Specify the model's runtime environment by creating an [Environment](https://docs.microsoft.com/python/api/azureml-core/azureml.core.environment%28class%29?view=azure-ml-py) object and providing the [CondaDependencies](https://docs.microsoft.com/python/api/azureml-core/azureml.core.conda_dependencies.condadependencies?view=azure-ml-py) needed by your model.
```
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
environment=Environment('my-sklearn-environment')
environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[
'azureml-defaults',
'inference-schema[numpy-support]',
'numpy',
'scikit-learn==0.19.1',
'scipy'
])
```
When using a custom environment, you must also provide Python code for initializing and running your model. An example script is included with this notebook.
```
with open('score.py') as f:
print(f.read())
```
## Create the InferenceConfig
Create the inference configuration to reference your environment and entry script during deployment
```
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(entry_script='score.py',
source_directory='.',
environment=environment)
```
## Provision the AKS Cluster
If you already have an AKS cluster attached to this workspace, skip the step below and provide the name of the cluster.
```
from azureml.core.compute import AksCompute
from azureml.core.compute import ComputeTarget
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
aks_target.wait_for_completion(show_output=True)
```
## Create an Endpoint and add a version (AKS service)
This creates a new endpoint and adds a version behind it. By default the first version added is the default version. You can specify the traffic percentile a version takes behind an endpoint.
```
# deploying the model and create a new endpoint
from azureml.core.webservice import AksEndpoint
# from azureml.core.compute import ComputeTarget
#select a created compute
compute = ComputeTarget(ws, 'my-aks')
namespace_name="endpointnamespace"
# define the endpoint name
endpoint_name = "myendpoint2"
# define the service name
version_name= "versiona"
endpoint_deployment_config = AksEndpoint.deploy_configuration(tags = {'modelVersion':'firstversion', 'department':'finance'},
description = "my first version", namespace = namespace_name,
version_name = version_name, traffic_percentile = 40)
endpoint = Model.deploy(ws, endpoint_name, [model], inference_config, endpoint_deployment_config, compute)
endpoint.wait_for_deployment(True)
endpoint.get_logs()
```
## Add another version of the service to an existing endpoint
This adds another version behind an existing endpoint. You can specify the traffic percentile the new version takes. If no traffic_percentile is specified then it defaults to 0. All the unspecified traffic percentile (in this example 50) across all versions goes to default version.
```
# Adding a new version to an existing Endpoint.
version_name_add="versionb"
endpoint.create_version(version_name = version_name_add, inference_config=inference_config, models=[model], tags = {'modelVersion':'secondversion', 'department':'finance'},
description = "my second version", traffic_percentile = 50)
endpoint.wait_for_deployment(True)
```
## Update an existing version in an endpoint
There are two types of versions: control and treatment. An endpoint contains one or more treatment versions but only one control version. This categorization helps compare the different versions against the defined control version.
```
endpoint.update_version(version_name=endpoint.versions[version_name_add].name, description="my second version update", traffic_percentile=40, is_default=True, is_control_version_type=True)
endpoint.wait_for_deployment(True)
```
## Test the web service using run method
Test the web sevice by passing in data. Run() method retrieves API keys behind the scenes to make sure that call is authenticated.
```
# Scoring on endpoint
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample_encoded = bytes(test_sample, encoding='utf8')
prediction = endpoint.run(input_data=test_sample_encoded)
print(prediction)
```
## Delete Resources
```
# deleting a version in an endpoint
endpoint.delete_version(version_name=version_name)
endpoint.wait_for_deployment(True)
# deleting an endpoint, this will delete all versions in the endpoint and the endpoint itself
#endpoint.delete()
```
| github_jupyter |
# Examining Racial Discrimination in the US Job Market
### Background
Racial discrimination continues to be pervasive in cultures throughout the world. Researchers examined the level of racial discrimination in the United States labor market by randomly assigning identical résumés to black-sounding or white-sounding names and observing the impact on requests for interviews from employers.
### Data
In the dataset provided, each row represents a resume. The 'race' column has two values, 'b' and 'w', indicating black-sounding and white-sounding. The column 'call' has two values, 1 and 0, indicating whether the resume received a call from employers or not.
Note that the 'b' and 'w' values in race are assigned randomly to the resumes when presented to the employer.
<div class="span5 alert alert-info">
### Exercises
You will perform a statistical analysis to establish whether race has a significant impact on the rate of callbacks for resumes.
Answer the following questions **in this notebook below and submit to your Github account**.
1. What test is appropriate for this problem? Does CLT apply?
2. What are the null and alternate hypotheses?
3. Compute margin of error, confidence interval, and p-value.
4. Write a story describing the statistical significance in the context or the original problem.
5. Does your analysis mean that race/name is the most important factor in callback success? Why or why not? If not, how would you amend your analysis?
You can include written notes in notebook cells using Markdown:
- In the control panel at the top, choose Cell > Cell Type > Markdown
- Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet
#### Resources
+ Experiment information and data source: http://www.povertyactionlab.org/evaluation/discrimination-job-market-united-states
+ Scipy statistical methods: http://docs.scipy.org/doc/scipy/reference/stats.html
+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet
</div>
****
```
%matplotlib inline
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.stats.proportion import proportions_ztest
# read the data
data = pd.io.stata.read_stata('data/us_job_market_discrimination.dta')
# split data into black and nonblack sounding names
dfblack = data[data.race=='b']
dfnonblack = data[data.race!='b']
# display some basic statistics
data.describe()
# count the number of blacks and nonblack sounding names and calls / noncalls
black_call=sum(dfblack.call)
black_nocall=len(dfblack)-black_call
nonblack_call=sum(dfnonblack.call)
nonblack_nocall=len(dfnonblack)-nonblack_call
# number of callbacks for black and non black-sounding names
print("callbacks for black-sounding names", black_call)
print("noncallbacks for black-sounding names", black_nocall)
print("callbacks for non black-sounding names", nonblack_call)
print("noncallbacks for non black-sounding names", nonblack_nocall)
#
# create bar chart
#
call = (black_call, nonblack_call)
noncall = (black_nocall, nonblack_nocall)
fig, ax = plt.subplots()
index = np.arange(2)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, call, bar_width,
alpha=opacity,
color='b',
error_kw=error_config,
label='call')
rects2 = plt.bar(index + bar_width, noncall, bar_width,
alpha=opacity,
color='r',
error_kw=error_config,
label='noncall')
# put labels to bar chart
plt.xlabel('Race')
plt.ylabel('Calls')
plt.title('Number of calls by race')
plt.xticks(index + bar_width / 2, ('black sounding name', 'nonblack sounding name'))
plt.legend()
plt.tight_layout()
#
# create pie chart
#
labels = 'Black sounding name', 'nonBlack sounding name'
sizes = [black_call, nonblack_call]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.0f%%')
ax1.axis('equal')
plt.show()
# measure the proportions
n1 = len(dfblack)
n2 = len(dfnonblack)
p1 = black_call / n1
p2 = nonblack_call / n2
count_call = np.array([black_call, nonblack_call])
nobs_array = np.array([n1, n2])
ls = .05
stat, pval = proportions_ztest(count=count_call, nobs=nobs_array, value=ls)
# standard error and confidence interval (CI)
se = np.sqrt(p1*(1-p1)/n1 + p1*(1-p2)/n2)
print('margin of error=', se)
print('conf interval=', (p1-p2-1.96*se, p1-p2+1.96*se))
print('p-value=', pval)
# print chi-square test
chi_value = stats.chi2_contingency(np.array([[black_call, black_nocall],[nonblack_call, nonblack_nocall]]))
print('chi_sq p-value=', chi_value[1])
#t-test on education, ofjobs and yearsexp and occupspecific and occupbroad
print('education p-value=', stats.ttest_ind(dfblack['education'], dfnonblack['education'], equal_var = False)[1])
print('ofjobs p-value=', stats.ttest_ind(dfblack['ofjobs'], dfnonblack['ofjobs'], equal_var = False)[1])
print('yearsexp p-value=', stats.ttest_ind(dfblack['yearsexp'], dfnonblack['yearsexp'], equal_var = False)[1])
print('occupspecific p-value=', stats.ttest_ind(dfblack['occupspecific'], dfnonblack['occupspecific'], equal_var = False)[1])
print('occupbroad p-value=', stats.ttest_ind(dfblack['occupbroad'], dfnonblack['occupbroad'], equal_var = False)[1])
#proportion test on honors volunteer military empholes and workinschool
print('honors p-value=', proportions_ztest(count=np.array([sum(dfblack.honors), \
sum(dfnonblack.honors)]),nobs=np.array([n1, n2]), value=ls)[1])
print('volunteer p-value=', proportions_ztest(count=np.array([sum(dfblack.volunteer), \
sum(dfnonblack.volunteer)]),nobs=np.array([n1, n2]), value=ls)[1])
print('military p-value=', proportions_ztest(count=np.array([sum(dfblack.military), \
sum(dfnonblack.military)]),nobs=np.array([n1, n2]), value=ls)[1])
print('empholes p-value=', proportions_ztest(count=np.array([sum(dfblack.empholes), \
sum(dfnonblack.empholes)]),nobs=np.array([n1, n2]), value=ls)[1])
print('workinschool p-value=', proportions_ztest(count=np.array([sum(dfblack.workinschool), \
sum(dfnonblack.workinschool)]),nobs=np.array([n1, n2]), value=ls)[1])
print('computerskills p-value=', proportions_ztest(count=np.array([sum(dfblack.computerskills), \
sum(dfnonblack.computerskills)]),nobs=np.array([n1, n2]), value=ls)[1])
corrmat = data.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=.8, square=True)
f.tight_layout()
```
<div class="span5 alert alert-info">
### ANSWERS:
1. What test is appropriate for this problem? <b> Comparison of two proportions </b>
Does CLT apply? <b> Yes, since np and n(1-p) where n is number of samples and p is the probability that an applicant is called, is more than 10, it can approximate the normal distribution.</b>
2. What are the null and alternate hypotheses? <b> H<sub>o</sub>= the call back for black and non-blacks are the same while H<sub>a</sub>= the call back for black and non-blacks are not the same </b>
3. Compute margin of error, confidence interval, and p-value. <b> margin of error= 0.00697820016119
conf interval= (-0.045710126525379105, -0.018355581893512069)
p-value= 2.36721263361e-25 </b>
4. Write a story describing the statistical significance in the context or the original problem.
<h3> Discrimination in Job Market on Black sounding names </h3>
> Black sounding names have 2% to 4% average less callbacks compared to non-black sounding names.
> Education, years experience and number of previous jobs have no significant difference.
> However, differences are found in honors achieved, military and volunteer work, employment holes, work in school and computer skills.
There is a discrimination in the job market for black sounding names. A study of 4870 job applicants in 2000 to 2002 shows that there is a difference between the number of callbacks for black sounding names compared to non-black sounding names. The study also shows that education, years experience and number of previous jobs are the same between the two groups. Meanwhile, there is a difference shown in honors achieved, military and volunteer work, employment holes, work in school and computer skills.
5. Does your analysis mean that race/name is the most important factor in callback success? Why or why not? If not, how would you amend your analysis?
<b> Race is not the most important factor in callback success. In fact there are differences between black and nonblack sounding names in terms of honors achieved, military and volunteer work, employment holes, work in school and computer skills. These are the reasons why there is a difference on the callbacks between the two groups</b>
#### Resources
+ Experiment information and data source: http://www.povertyactionlab.org/evaluation/discrimination-job-market-united-states
+ Scipy statistical methods: http://docs.scipy.org/doc/scipy/reference/stats.html
</div>
****
| github_jupyter |
<img src="images/JHI_STRAP_Web.png" style="width: 150px; float: right;">
# Supplementary Information: Holmes *et al.* 2020
# 2. Full model fitting
This notebook describes fitting of a Bayesian hierarchical model of the effects of control (growth) and treatment (passage) on individual genes from *E. coli* DH10B (carrier) and Sakai (BAC load), to data obtained using a multi-*E. coli* microarray.
Much of the code for the visualisation, analysis and data manipulation of the fitting results is found in the associated Python module `tools.py`, which should also be present in this directory.
The model fit can be downloaded directly from the [Zenodo](https://zenodo.org) repository, for use in this notebook:
[](https://doi.org/10.5281/zenodo.269638)
A code cell in the notebook below will attempt to make this download for you if the file does not already exist.
## Table of Contents
1. [Experiment summary and interpretation](#summary)
2. [Building the model](#building)
1. [Stan model construction](#build_stan)
2. [Define and fit the Stan model](#fit_stan)
3. [Extract the fit](#extract_stan)
3. [Inspecting the fit](#inspect_fit)
1. [Median parameter estimates](#median_estimates)
4. [Identifying locus tags that confer an advantage under treatment](#locus_tags)
1. [Plotting distribution of effects](#plot_effects)
2. [Identifying candidates](#candidates)
5. [Manuscript Figure 1](#figure_1)
## Experiment summary and interpretation <a id="summary"></a>
The experiment involves measuring changes in microarray probe intensity before and after a pool of bacteria is subjected to one of two processes:
1. a sample from the pool is grown in media to a defined OD, then subsampled. This growth/subsample process is repeated *n* times. [*control*]
2. a sample from the pool is applied to plant leaves, subsampled, and that subsample grown in media to a defined OD, then subsampled. This passage/subsample/growth/subsample process is repeated *n* times. [*treatment*]
In a single replicate, the microarray is exposed to genomic DNA extracted from the pool (i) before the experiment begins, and (ii) after the experiment concludes. Three replicates are performed.
<br /><div class="alert-success">
<b>All genes in all samples go through the *growth and subsampling* part of the experiment, and we wish to estimate the effect of *passage and subsampling* on individual genes.</b>
</div>
The pool of bacteria comprises *E. coli* DH10B as a carrier organism. The pool is heterogeneous, in that individual cells also contain BACs encoding random stretches of the *E. coli* Sakai chromosome. We therefore expect carrier organism genes to be unaffected by passage (treatment), and for any effects to be detectable only for genes that originate from *E. coli* Sakai.
<br /><div class="alert-success">
<b>We expect that genes conferring a phenotypic/selective advantage only for association with the plant should be enriched at the end of the treatment experiment, but not at the end of the control experiment. Sakai genes that are enriched in both treatment and control experiments may be generally advantageous for growth, but those giving a selective advantage on passage through the plant could be specifically adaptive in an environmental context.</b>
</div>
<br /><div class="alert-danger">
<b>As the BACs describe contiguous regions of the *E. coli* Sakai genome, there is the possibility that linkage disequilibrium could result in some genes that do not confer an advantage by themselves apparently displaying enrichment after treatment.</b>
</div>
If the biological function conferring an advantage during passage is encoded by a suite of coregulated genes in an operon, we might expect all members of this suite to show evidence of enrichment after passage. It is likely that clusters of enrichment for operons or regulons post-passage will be seen in the results. Although we are not accounting for this clustering or association by operon directly in this model, it is a possible additional hierarchical term in future iterations of the model.
We should expect there to be a selective burden to the carriage of additional non-functional gDNA as BACs, so we might also anticipate a slightly negative effect on recovery under *control* conditions.
## Python imports
```
%pylab inline
import os
import pickle
import warnings; warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import pystan
import scipy
import seaborn as sns; sns.set_context('notebook')
from Bio import SeqIO
import tools
```
## Building the model <a id="building"></a>
We assume that each array probe $i$ (array probes take a unique values of $i$ in the context of the entire experiment; that is, $i$ is unique for probe X replicate X treatment) measures hybridisation of genomic DNA (gDNA) in the sample that corresponds to a single gene $j[i]$, and that the measured intensity of probe $i$ relates directly to the corresponding amount of gDNA in the sample. There may be multiple probes relating to a single gene, so it is possible that $j[p] = j[q], p \neq q$.
<div class="alert-success">
<b>This establishes a basis for pooling probe-level effects as samples of the gene-level effect.</b>
</div>
We define the (input) measurement of a probe before an experiment as $x_i$, and the (output) measurement of that probe after the experiment as $y_i$. We assume that the measurement of each probe is subject to random experimental/measurement error that is normally-distributed with mean zero and variance $\sigma_y^2$. The actual quantity of DNA measured after the experiment can then be represented as $\hat{y}$, and the irreducible error in this experiment as $\epsilon$ ($\epsilon_i$ serves to include the irreducible errors in measuring both $x_i$ and $y_i$; all errors are assumed to be Normal, so their linear combinations are also Normal).
$$y_i = \hat{y_i} + \epsilon_i$$
$$\epsilon_i \sim N(0, \sigma_y^2) \implies y_i \sim N(\hat{y_i}, \sigma_y^2)$$
The relationship between the input and output DNA quantities measured by a single probe can be represented as $\hat{y_i} = f(x_i)$. That is to say, that the measured input DNA quantity $x_i$ is a *predictor* of the output quantity. This relationship will be modelled as the sum of two linear effects:
$$\textrm{control effect} = \alpha + \beta x$$
$$\textrm{treatment effect} = \gamma + \delta x$$
$$\hat{y_i} = \textrm{control effect}(x_i) + \textrm{treatment effect}(x_i) = \alpha + \beta x_i + \gamma + \delta x_i$$
As these are linear effects, we have intercept/offset parameters ($\alpha$, $\gamma$) and gradient/slope parameters ($\beta$, $\delta$).
<div class="alert-success">
<b>Where $\beta$ or $\delta$ are large, they would indicate large $x_i$-dependent effects of the control (growth) and treatment (passage) parts of the experiment respectively.</b>
</div>
As formulated above, the four parameters would be identical for all probes, but we are interested in estimating the control and treatment effects for individual genes, so we require a set of parameters for each gene (as it corresponds to probe $i$): $j[i]$. This is appropriate for the effects of growth/treatment that are specific to the levels of a single gene: $\beta$ and $\delta$.
The remaining parameters $\alpha$ and $\gamma$, the offsets from zero for each probe, could be considered to be constant across each replicate of both control and treatment experiments. They are possibly more realistically considered to be different for each array (i.e. each combination of replicate and treatment).
<div class="alert-success">
<b>The offset for any particular array can be hierarchically modelled as being drawn from a distribution representing all arrays, and we require one parameter for each of the arrays, so that for probe $i$ the corresponding array for that experiment is $k[i]$.</b>
</div>
As a result, we estimate $\alpha_{k[i]}$, $\beta_{j[i]}$, $\gamma_{k[i]}$, $\delta_{j[i]}$, and the relationship for each probe is modelled as:
$$\hat{y_i} = \textrm{control effect}_{j[i]}(x_i) + \textrm{treatment effect}_{j[i]}(x_i) = \alpha_{k[i]} + \beta_{j[i]} x_i + \gamma_{k[i]} + \delta_{j[i]} x_i$$
The parameters $\alpha_{k[i]}$, $\beta_{j[i]}$, $\gamma_{k[i]}$, $\delta_{j[i]}$ (and $\epsilon_i$) are to be estimated by the model fit.
<br /><div class="alert-success">
<b>We assume that the values of each parameter, e.g. $\alpha_{k[i]}$, are drawn from a single *pooled distribution* for that parameter, $\alpha \sim \textrm{some distribution}$.</b>
</div>
This pooling ensures that our fits are not completely pooled as a single estimate $\alpha_{k[i]} = \alpha$, which would imply that all parameter estimates are constant for all genes/arrays, a situation that would be completely uninformative for our goal to identify gene-level effects, and which would *underfit* our model. It also means that our estimates are not completely unpooled, which would allow all parameter estimates to vary independently. That situation would be equivalent to simultaneously fitting independent linear relationships to each gene, and so risk *overfitting* our model to the measured data.
<br /><div class="alert-warning">
<b>NOTE: By using a *pooled distribution*, we allow a parameter estimate for each gene to influence the estimates of that parameter for all other genes in the experiment, constrained by an expected distribution of that parameter's values. To do this, we define a *prior distribution* for each parameter, but we do not specify its mean or variance, allowing the parameters of these *pooled distributions* also to be estimated when fitting our model.</b>
</div>
For each parameter's *prior* we choose a Cauchy distribution, because it has fat tails and infinite variance. This does not constrain outlying and extreme values (those we are interested in) so much as other distributions (e.g. Normal or Student's *t*):
$$\alpha_{k[i]} \sim Cauchy(\mu_{\alpha}, \sigma_{\alpha}^2)$$
$$\beta_{j[i]} \sim Cauchy(\mu_{\beta}, \sigma_{\beta}^2)$$
$$\gamma_{k[i]} \sim Cauchy(\mu_{\gamma}, \sigma_{\gamma}^2)$$
$$\delta_{j[i]} \sim Cauchy(\mu_{\delta}, \sigma_{\delta}^2)$$
Each parameter's prior distribution requires a fit of both its mean and variance, and these also become parameters in our model. The means are free to vary, but we assume that the variance of each parameter's prior can be drawn from a Uniform distribution on the range (0, 100):
$$\sigma_{\alpha} \sim U(0, 100)$$
$$\sigma_{\beta} \sim U(0, 100)$$
$$\sigma_{\gamma} \sim U(0, 100)$$
$$\sigma_{\delta} \sim U(0, 100)$$
<div class="alert-success">
<b>We therefore construct the following model of the experiment:</b>
$$\hat{y_i} = \alpha_{k[i]} + \beta_{j[i]} x_i + \gamma_{k[i]} t_i + \delta_{j[i]} t_i x_i$$
$$y_i \sim N(\hat{y_i}, \sigma_y^2)$$
$$\alpha_{k[i]} \sim Cauchy(\mu_{\alpha}, \sigma_{\alpha}^2)$$
$$\beta_{j[i]} \sim Cauchy(\mu_{\beta}, \sigma_{\beta}^2)$$
$$\gamma_{k[i]} \sim Cauchy(\mu_{\gamma}, \sigma_{\gamma}^2)$$
$$\delta_{j[i]} \sim Cauchy(\mu_{\delta}, \sigma_{\delta}^2)$$
$$\sigma_{\alpha} \sim U(0, 100)$$
$$\sigma_{\beta} \sim U(0, 100)$$
$$\sigma_{\gamma} \sim U(0, 100)$$
$$\sigma_{\delta} \sim U(0, 100)$$
$$\sigma_y \sim U(0, \infty)$$
<ul>
<li> $y_i$: measured intensity output on the array for probe $i$ (specific to each replicate)
<li> $\hat{y_i}$: actual probe intensity for probe $i$ (specific to each replicate)
<li> $x_i$: measured intensity input on the array for probe $i$ (specific to each replicate)
<li> $t_i$: 0/1 pseudovariable indicating whether the probe $i$ was measured in a control (0) or treatment (1) experiment
<li> $\alpha_{k[i]}$: control effect offset for treatment X replicate $k$ (used for probe $i$)
<li> $\mu_{\alpha}$: mean control effect offset for all arrays
<li> $\sigma_{\alpha}$: control effect offset variance for all arrays
<li> $\beta_{j[i]}$: control effect slope for gene $[j[i]$
<li> $\mu_{\beta}$: mean control effect slope for all genes
<li> $\sigma_{\beta}$: control effect slope variance for all genes
<li> $\gamma_{k[i]}$: treatment effect offset for treatment X replicate $k$ (used for probe $i$)
<li> $\mu_{\gamma}$: mean treatment effect offset for all arrays
<li> $\sigma_{\gamma}$: treatment effect offset variance for all arrays
<li> $\delta_{j[i]}$: treatment effect slope for gene $j[i]$
<li> $\mu_{\delta}$: mean treatment effect slope for all genes
<li> $\sigma_{\delta}$: treatment effect slope variance for all genes
<li> $\sigma_y$: variance in measurement due to irreducible error
</ul>
</div>
### Load input data for fit
In the cells below we load in the data to be fit, and define useful variables for inspecting/analysing the data later:
* `locus_tags`: the unique locus tags represented in the dataset
* `ntags`: the number of unique locus tags
* `arrays`: the arrays (combinations of replicate X treatment) used in the experiment
* `narrays`: the number of arrays used
* `outdir`: path to the directory in which to place model fit output
* `outfile`: path to the model fit output file (pickled dataframe)
```
# load clean, normalised, indexed data
data = pd.read_csv(os.path.join("datasets", "normalised_array_data.tab"), sep="\t") # full dataset
#data = pd.read_csv("datasets/reduced_locus_data.tab", sep="\t") # reduced dataset
#data = data[:100] # uncomment this for debugging
# useful values
locus_tags = data['locus_tag'].unique()
ntags = len(locus_tags)
arrays = data['repXtrt'].unique()
narrays = len(arrays)
# Create output directory and filename to hold the fitted model
outdir = "model_fits"
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, 'full_model_fit.pkl')
```
### Stan model construction <a id="build_stan"></a>
We need to define `data`, `parameters` and our `model` for [`Stan`](http://mc-stan.org/).
<div class="alert-success">
In the `data` block, we have:
<ul>
<li> `N`: `int`, the number of data points
<li> `J`: `int`, the number of unique locus tags (`J` < `N`)
<li> `K`: `int`, the number of unique treatment X replicate combinations (arrays)
<li> `array`: `int[N]`, an index list of arrays
<li> `tag`: `int[N]`, an index list of locus tags
<li> `t`: `vector[N]`, 0/1 control/treatment values for each probe
<li> `x`: `vector[N]`, the input log(intensity) values
<li> `y`: `vector[N]`, the output log(intensity) values
</ul>
In the `parameter` block, we have:
<ul>
<li> `a`: `real vector[K]`, estimated offset effect on log(intensity) of the *control* for each array
<li> `mu_a`: `real`, an unconstrained value to be fit that represents the mean of the Cauchy distribution for the *control* effect offset, for all arrays
<li> `sigma_a`: `real<lower=0,upper=100>`, standard deviation of the Cauchy distribution for the *control* effect offset, for all arrays
<li> `b`: `real vector[J]`, estimated slope effect on log(intensity) of the *control* for each locus tag/gene
<li> `mu_b`: `real`, an unconstrained value to be fit that represents the mean of the Cauchy distribution for the *control* effect slope, for all locus tags
<li> `sigma_b`: `real<lower=0,upper=100>`, standard deviation of the Cauchy distribution for the *control* effect slope, for all locus tags
<li> `g`: `real vector[K]`, estimate of the influence of treatment on the output measured intensity (offset) for array
<li> `mu_g`: `real`, an unconstrained value to be fit that represents the mean of the Cauchy distribution for the offset for all arrays due to *treatment*
<li> `sigma_g`: `real<lower=0,upper=100>`, standard deviation of the Cauchy distribution for the offset for all arrays due to *treatment*
<li> `d`: `real vector[J]`, estimate of the influence of treatment on the output measured intensity (slope) for each locus tag/gene
<li> `mu_d`: `real`, an unconstrained value to be fit that represents the mean of the Cauchy distribution for the slope for all locus tags due to *treatment*
<li> `sigma_d`: `real<lower=0,upper=100>`, standard deviation of the Cauchy distribution for the slope for all locus tags due to *treatment*
<li> `sigma`: `real<lower=0>`, the irreducible error in the experiment/model
</ul>
We also define a `transformed parameter`:
<ul>
<li> `y_hat[i] <- b[tag[i]] * x[i] + a[array[i]] + g[tag[i]] * t[i] + d[array[i]] * t[i] * x[i]`: the linear relationship describing $\hat{y}$, our estimate of experimental output intensity, which is subject to variance `sigma`.
</ul>
</div>
### Define and fit the Stan model <a id="fit_stan"></a>
In the cells below we define the model to be fit, in the Stan language, conduct the fit, and save the fit out to a pickled dataframe (or load it in from one, depending on which code is commented out).
```
# define unpooled stan model
treatment_model = """
data {
int<lower=0> N;
int<lower=0> J;
int<lower=0> K;
int<lower=1, upper=J> tag[N];
int<lower=1, upper=K> array[N];
vector[N] t;
vector[N] x;
vector[N] y;
}
parameters {
vector[K] a;
vector[J] b;
vector[K] g;
vector[J] d;
real mu_a;
real mu_b;
real mu_g;
real mu_d;
real<lower=0> sigma;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_b;
real<lower=0,upper=100> sigma_g;
real<lower=0,upper=100> sigma_d;
}
transformed parameters{
vector[N] y_hat;
for (i in 1:N)
y_hat[i] = a[array[i]] + b[tag[i]] * x[i] + g[array[i]] * t[i] + d[tag[i]] * t[i] * x[i];
}
model {
sigma_a ~ uniform(0, 100);
a ~ cauchy(mu_a, sigma_a);
sigma_b ~ uniform(0, 100);
b ~ cauchy(mu_b, sigma_b);
sigma_g ~ uniform(0, 100);
g ~ cauchy(mu_g, sigma_g);
sigma_d ~ uniform(0, 100);
d ~ cauchy(mu_d, sigma_d);
y ~ normal(y_hat, sigma);
}
"""
# relate python variables to stan variables
treatment_data_dict = {'N': len(data),
'J': ntags,
'K': narrays,
'tag': data['locus_tag_index'] + 1,
'array': data['repXtrt_index'] + 1,
't': data['treatment'],
'x': data['log_input'],
'y': data['log_output']}
```
<div class="alert-danger">
<b>At this point, you have two options to obtain the model fit data</b>
</div>
1. Run the model fit 'live' in the notebook. This may take several hours. **USE CELL (1)**
1. (optionally) save the newly-generated model fit to a local file. **USE CELL (2)**
2. Load the model fit from a local file. **USE CELL (4)**
1. If you have not generated the data locally, then you can download it from Zenodo **USE CELL (3) FIRST**.
It may be quicker to download the data from Zenodo using the button below, than to use cell (3), but be sure to place the downloaded file in the correct location as specified in the variable `outfile`.
[](https://doi.org/10.5281/zenodo.269638)
```
# (1) USE THIS CELL TO RUN THE STAN FIT - takes a few hours on my laptop
#treatment_fit = pystan.stan(model_code=treatment_model,
# data=treatment_data_dict,
# iter=1000, chains=2,
# seed=tools.SEED)
# (2) USE THIS CELL TO SAVE THE STAN FIT TO A PICKLE FILE
#unpermutedChains = treatment_fit.extract()
#unpermutedChains_df = pd.DataFrame([dict(unpermutedChains)])
#pickle.dump(unpermutedChains_df, open(outfile, 'wb'))
# (3) USE THIS CELL TO DOWNLOAD THE STAN FIT FROM ZENODO: DOI:10.5281/zenodo.269638
# The file will not be downloaded if it already exists locally.
# The file is 0.5GB in size, so may take some time to download
import urllib.request
if not os.path.isfile(outfile):
zenodo_url = "https://zenodo.org/record/269638/files/full_model_fit.pkl"
response = urllib.request.urlretrieve(zenodo_url, outfile)
# (4) USE THIS CELL TO LOAD THE STAN FIT FROM A PICKLE FILE
# Import the previously-fit model
treatment_fit = pd.read_pickle(open(outfile, 'rb'))
```
### Extract the fit <a id="extract_stan"></a>
<br /><div class="alert-warning">
<b>In the cells below we load in the contents of the pickled output (if the fit has already been run), and then extract useful summary information about mean, median, variance, and credibility intervals for the parameter estimates.</b>
</div>
<div class="alert-success">
<ul>
<li> parameters $\alpha$, $\beta$, $\gamma$ and $\delta$ are represented by their Roman letter equivalents `a`, `b`, `g` and `d`.
<li> `*_mean` and `*_median` are the mean and median estimates for the parameter over the ensemble
<li> `*_sem` is the standard deviation for the parameter estimate over the ensemble
<li> `*_Npc` is the *N*th percentile for the parameter estimate, over the ensemble. These can be combined to obtain credibility intervals (e.g. the range `a_25pc`..`a_75pc` constitutes the 50% CI for $\alpha_{j[i]}$.
</div>
```
# Get summary data for parameter estimates
# use 'fit' for the model fit directly, and 'df'for loaded pickled data
(estimates_by_probe, estimates) = tools.extract_variable_summaries(treatment_fit, 'df',
['a', 'b', 'g', 'd'],
[arrays, locus_tags, arrays, locus_tags],
data)
# Inspect the data, one row per experiment probe
estimates_by_probe.head()
# Inspect the data, one row per locus tag
estimates.head()
# Separate estimates for Sakai and DH10B into two different dataframes
sakai_estimates = tools.split_estimates(estimates, 'sakai')
dh10b_estimates = tools.split_estimates(estimates, 'dh10b')
```
## Inspecting the fit <a id="inspect_fit"></a>
In the cells below, we visualise the fitted estimates for each of the parameters $\alpha$, $\beta$, $\gamma$, and $\delta$ as:
* box plots of median estimates for each locus tag
* relationship between control and treatment effects in Sakai
* plots of 50% credibility interval range and median estimate for each locus tag to identify locus tags with a possible selective advantage
### Median parameter estimates <a id="median_estimates"></a>
We first inspect the range of fitted estimates to get an overview of the relationships for the data as a whole, and then examine whether this relationship varies by *E. coli* isolate.
Making boxplots for the full set of fitted parameter estimates, for both isolates:
```
# Visualise median values for parameter estimates of alpha and gamma
tools.boxplot_medians(estimates_by_probe, ['a', 'g'])
# Visualise median values for parameter estimates of beta and delta
tools.boxplot_medians(estimates, ['b', 'd'])
```
<div class="alert-success">
For this fit we can see that the estimates are all, in the main, tightly-distributed. Most estimated (median) values of $\alpha$ (control intercept), $\gamma$ (treatment intercept), and $\delta$ (treatment slope) are close to zero. Most estimated values of $\beta$ are close to (but slightly less than) unity.
<b>This implies that:</b>
<ul>
<li> <b>The linear relationship between input and output intensity due to the control effects (growth only) is, for most genes in the experiment, a slight reduction of output intensity with respect to input intensity value, and on the whole the effect of the control/growth is neutral [median $\alpha$ ≈ 0, median $\beta$ ≈ 1]</b>
<li> <b>For most genes in the experiment there is no treatment effect due to exposure to the plant [median $\gamma$ ≈ 0, median $\delta$ ≈ 0]</b>
</ul>
</div>
<br /><div class="alert-warning">
<b>There are, however, a considerable number of outlying median values for each parameter, which suggests that a number of genes have associated parameter values that are affected by either control (growth) or treatment (passage).</b>
</div>
#### DH10B
Considering boxplots of estimated $\beta_{j[i]}$ and $\delta_{j[i]}$ for the DH10B (carrier) isolate only:
```
# Visualise median values for Sakai parameter estimates
tools.boxplot_medians(dh10b_estimates, ['b', 'd'])
```
it is clear that the median parameter estimates for DH10B are extremely restricted in their range:
* $0.93 < \beta < 0.98$
* $-0.065 < \delta < 0.045$
<div class="alert-success">
The control effect appears to be essentially *neutral*, in that the output intensity is almost a 1:1 linear relationship with the input intensity, but it is striking that the median estimates of $\gamma$ and $\delta$ are very close to zero, suggesting that passage (treatment) has almost no effect on this relationship, for any DH10B locus tag.
<b>This is exactly what would be expected for DH10B as the carrier isolate.</b>
</div>
#### Sakai
Considering the Sakai isolate parameter estimates for $\beta_{j[i]}$ and $\gamma_{j[i]}$ only:
```
# Visualise median values for Sakai parameter estimates
tools.boxplot_medians(sakai_estimates, ['b', 'd'])
```
By contrast to the results for DH10B, the median parameter estimates for Sakai have many large value outliers, though the bulk of estimates are close to the values seen for DH10B:
* $0.2 < \beta < 1.4$
* $-1.5 < \delta < 0.5$
<div class="alert-success">
This indicates that we see the expected result, that strong variability of control and treatment effects are effectively confined to the Sakai BAC fragments.
<b>It is expected that some genes/operons may be relatively advantageous in either growth (control) or passage (treatment) conditions, or both.</b>
</div>
We can visualise the relationships between parameter estimates for control and treatment effects in a scatterplot of control effect ($\beta$) against treatment effect ($\delta) for each locus tag. This plot can be considered in four quadrants, which are delineated by the bulk of the data which describes orthogonal effects of locus tags on growth and treatment:
<br /><div class="alert-success">
<b>(i.e. for most locus tags, there is *either* an effect on treatment or control, but *not both*)</b>
</div>
* (upper left) positive effect on growth, negative effect for treatment: may be related to ability to use growth medium more efficiently
* (upper right) positive effect on both growth and treatment: no locus tags display this characteristic
* (lower right) positive effect on treatment, negative effect for control: may be related to ability to use/exploit the plant, that is suppressive in the medium
* (lower left) negative effect on both growth and treatment: most locus tags that display an interaction lie in this group
```
# Plot estimated parameters for treatment effects against control effects for Sakai
fig, ax = plt.subplots(1, 1, figsize=(6,6))
ax.scatter(sakai_estimates['d_median'], sakai_estimates['b_median'], alpha=0.2)
ax.set_xlabel('delta (median)')
ax.set_ylabel('beta (median)');
```
<br /><div class="alert-warning">
The strong cross-like distribution indicates that most parameter estimates of $\beta$ or $\delta$ that vary from those of the bulk do so orthogonally in either *treatment* or *control* conditions, but not both.
<br /><br />
<b>Where Sakai genes have an estimated effect under both conditions, this is typically negative for both treatment and control (lower left quadrant).</b>
</div>
## Identifying locus tags that confer an advantage under treatment and/or control <a id="locus_tags"></a>
We use a 50% credibility interval to determine whether the effect of a gene on passage is likely to be positive. Under this assumption, we identify locus tags for which the median estimate of $\delta$ is positive, and the central 50% of the parameter estimates for $\delta$ (the 50% credibility interval) does not include zero. We label these locus tags as `trt_pos` in the dataframe.
<br /><div class="alert-success">
These locus tags correspond to the genes that we should believe confer a selective advantage in passage/*treatment* (i.e. we require our estimate to be credibly positive).
</div>
Likewise, we use a 50% credibility interval to determine whether the effect of a gene on surviving growth (control) is positive. If the 50% CI for $\beta$ does not include the 97.5 percentile for all estimates of $\beta$ (as an upper estimate of overall dataset centrality for this dataset), and the median value of $\beta$ is greater than this value, we consider that the effect of the gene on surviving growth conditions is positive. We label these locus tags as `ctl_pos` in the dataframe.
```
# Label locus tags with positive effects for control and treatment
sakai_estimates = tools.label_positive_effects(sakai_estimates)
```
We can count the number of locus_tags in each of the groups:
```
# Count locus tags in each of the positive groups
counts = [sum(sakai_estimates[col]) for col in ('trt_pos', 'ctl_pos', 'combined')]
print("treatment positive: {0}\ncontrol positive: {1}\nboth: {2}".format(*counts))
```
which indicates, with these assumptions, that:
<div class="alert-success">
<ul>
<b>
<li> 115 genes have a credible positive effect on passage (treatment)
<li> 65 genes have a credible positive effect in the growth (control) step
<li> no genes have a credible positive effect for both growth and treatment.
</b>
</ul>
</div>
(this confirms our observation in the earlier scatterplot)
### Plotting distribution of effects on the Sakai genome <a id="plot_effects"></a>
We can show the estimated effects, and our confidence in those estimates, on a rough representation of the genome by plotting those values for each locus tag, sorted in order on the genome.
In the plots that follow, parameter estimates for each locus tag are rendered as points (the median estimate), with the 50% credibility interval for the estimate indicated as a vertical line. If the 50% CI includes a threshold value - the median value for the bulk parameter estimate of $\beta$ or $\delta$ - then we consider that there is not strong evidence of an effect on survival due to that gene (compared to the bulk), and the interval is coloured blue.
If the interval does not include the corresponding threshold value, then it is coloured either green for a *positive* effect, or magenta for a *negative* effect.
#### Sakai
We split the Sakai estimates into groups: one for the chromosome, and one for each plasmid pOSAK and pO157, on the basis of the locus tag prefixes, annotating them with their start position on the parent molecule.
```
sakai_chromosome = sakai_estimates.loc[sakai_estimates['locus_tag'].str.startswith('ECs')]
sakai_pOSAK = sakai_estimates.loc[sakai_estimates['locus_tag'].str.startswith('pOSAK1')]
sakai_pO157 = sakai_estimates.loc[(sakai_estimates['locus_tag'].str.startswith('pO157')) |
(sakai_estimates['locus_tag'].str.startswith('ECp'))]
# Sakai chromosome
sakai_chromosome_annotated = tools.annotate_locus_tags(sakai_chromosome,
os.path.join('..', 'data', 'Sakai',
'GCF_000008865.1_ASM886v1_genomic.gbff'))
sakai_chromosome_annotated.sort_values('startpos', inplace=True)
#sakai_chromosome_annotated.head(15)
# pOSAK1
sakai_pOSAK_annotated = tools.annotate_locus_tags(sakai_pOSAK,
os.path.join('..', 'data', 'Sakai',
'GCF_000008865.1_ASM886v1_genomic.gbff'))
sakai_pOSAK_annotated.sort_values('startpos', inplace=True)
#sakai_pOSAK_annotated.head(15)
# pECp
sakai_pO157_annotated = tools.annotate_locus_tags(sakai_pO157,
os.path.join('..', 'data', 'Sakai',
'GCF_000008865.1_ASM886v1_genomic.gbff'))
sakai_pO157_annotated.sort_values('startpos', inplace=True)
#sakai_pO157_annotated.head(15)
# Regions of interest
regions = [('S-loop 71', 'ECs1276', 'ECs1288', 1.3),
('SpLE1', 'ECs1299', 'ECs1410', 1.5),
('S-loop 225', 'ECs4325', 'ECs4341', 1.5),
('S-loop 231', 'ECs4379', 'ECs4387', 1.3)]
annotations = {k:(tools.get_lt_index(v0, sakai_chromosome_annotated),
tools.get_lt_index(v1, sakai_chromosome_annotated), v2) for
k, v0, v1, v2 in regions}
# Plot genome-wide estimates of beta for Sakai and mark values that don't include the median beta in 50% CI
beta_thresh = np.median(sakai_chromosome_annotated['b_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of beta for Sakai chromosome'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
# Plot on the figure axes
tools.plot_parameter(sakai_chromosome_annotated, ax, 'b', beta_thresh, annotations=annotations);
# Regions of interest
regions = [('S-loop 71', 'ECs1276', 'ECs1288', 1),
('SpLE1', 'ECs1299', 'ECs1410', 1.8),
('S-loop 225', 'ECs4325', 'ECs4341', 1.8),
('S-loop 231', 'ECs4379', 'ECs4387', 1)]
annotations = {k:(tools.get_lt_index(v0, sakai_chromosome_annotated),
tools.get_lt_index(v1, sakai_chromosome_annotated), v2) for
k, v0, v1, v2 in regions}
# Plot genome-wide estimates of delta for Sakai and mark values that don't include zero in 50%CI
delta_thresh = np.median(sakai_chromosome_annotated['d_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of delta for Sakai chromosome'
plt.title("{0} [threshold: {1:.2f}]".format(title, delta_thresh))
tools.plot_parameter(sakai_chromosome_annotated, ax, 'd', delta_thresh, annotations=annotations)
# Plot genome-wide estimates of beta for Sakai and mark values that don't include the median beta in 50% CI
beta_thresh = np.median(sakai_pOSAK_annotated['b_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of beta for Sakai plasmid pOSAK'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(sakai_pOSAK_annotated, ax, 'b', beta_thresh)
# Plot genome-wide estimates of delta for Sakai and mark values that don't include zero in 50% CI
delta_thresh = np.median(sakai_pOSAK_annotated['d_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of delta for Sakai plasmid pOSAK'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(sakai_pOSAK_annotated, ax, 'd', delta_thresh)
# Regions of interest
regions = [('StcE', 'pO157p01', 'pO157p01', 0.98),
('etp T2SS', 'pO157p02', 'pO157p14', 1)]
annotations = {k:(tools.get_lt_index(v0, sakai_pO157_annotated),
tools.get_lt_index(v1, sakai_pO157_annotated), v2) for
k, v0, v1, v2 in regions}
# Plot genome-wide estimates of beta for Sakai and mark values that don't include the median beta in 50% CI
beta_thresh = np.median(sakai_pO157_annotated['b_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of beta for Sakai plasmid p0157'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(sakai_pO157_annotated, ax, 'b', beta_thresh, annotations=annotations)
# Regions of interest
regions = [('StcE', 'pO157p01', 'pO157p01', 0.13),
('etp T2SS', 'pO157p02', 'pO157p14', 0.19)]
annotations = {k:(tools.get_lt_index(v0, sakai_pO157_annotated),
tools.get_lt_index(v1, sakai_pO157_annotated), v2) for
k, v0, v1, v2 in regions}
# Plot genome-wide estimates of delta for Sakai and mark values that don't include zero in 50% CI
delta_thresh = np.median(sakai_pO157_annotated['d_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of delta for Sakai plasmid pO157'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(sakai_pO157_annotated, ax, 'd', delta_thresh, annotations=annotations)
```
<div class="alert-success">
These plots indicate that most Sakai genes do not produce parameter estimates that are indicative of credible effects in the control or treatment, in either direction.
<br /><br />
Where effects are seen they tend to cluster on the genome, which is as would be expected if operons or gene clusters with common function were responsible for producing an effect. This is suggestive that we are measuring a biological effect, rather than noise.
<br /><br />
<b>In general, several clusters of both positive and negative effects appear in the chromosome and pO157 plots for effects due to control ($\beta$) and treatment ($\delta$).</b>
</div>
#### DH10B
We plot similar representations for the DH10B isolate as a control, and see that all parameter estimates for this isolate's locus tags are very similar.
<br /><div class="alert-warning">
There is a weak sinusoidal pattern of fitted estimates. As no gene ordering information is available to the model fit, and there is an apparent symmetry to this pattern, it may reflect a real underlying biological process or structure.
</div>
```
# Annotate the DH10B results
dh10b_annotated = tools.annotate_locus_tags(dh10b_estimates,
os.path.join('..', 'data', 'DH10B',
'GCF_000019425.1_ASM1942v1_genomic.gbff'))
dh10b_annotated.sort_values('startpos', inplace=True)
# Plot genome-wide estimates of beta for DH10B
beta_thresh = np.median(dh10b_estimates['b_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of beta for DH10B',
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(dh10b_estimates, ax, 'b', beta_thresh)
# Plot genome-wide estimates of delta for DH10B
delta_thresh = np.median(dh10b_estimates['d_median'])
# Create figure with title to hold the plotted axis
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(1, 1, 1)
title = 'Estimates of delta for DH10B'
plt.title("{0} [threshold: {1:.2f}]".format(title, beta_thresh))
tools.plot_parameter(dh10b_estimates, ax, 'd', delta_thresh)
```
### Identifying Sakai candidates <a id="candidates"></a>
From the information above, we can list the 180 Sakai genes/locus tags that appear to impart a positive selective effect on treatment/passage (the green points/bars in the plots immediately above).
```
# Generate list of candidates with a positive effect under control or treatment.
candidates = sakai_estimates[sakai_estimates['ctl_pos'] | sakai_estimates['trt_pos']]
candidates = candidates[['locus_tag',
'b_median', 'ctl_pos',
'd_median', 'trt_pos']].sort_values(['ctl_pos', 'trt_pos', 'locus_tag'])
candidates.shape
# Inspect the data
candidates.head()
```
We restrict this set to those genes that only have a credible effect on treatment/passage, identifying 115 genes with positive $\delta$ where the 50% CI does not include zero:
```
# Restrict candidates only to those with an effect on treatment/passage.
trt_only_positive = candidates.loc[candidates['trt_pos'] & ~candidates['ctl_pos']]
trt_only_positive.shape
```
We add a column with the functional annotation of each of the candidates that appear to have a positive selective effect under treatment conditions:
```
# Annotated locus tags with functions from NCBI GenBank files
annotated = tools.annotate_locus_tags(trt_only_positive,
os.path.join('..', 'data', 'Sakai',
'GCF_000008865.1_ASM886v1_genomic.gbff'))
pd.options.display.max_rows = 115 # force to show all rows
annotated
```
Finally, we write this data out in tab-separated format
```
# Write data to file in tab-separated format
outfile_annotated = os.path.join('datasets', 'trt_positive.tab')
annotated.to_csv(outfile_annotated, sep="\t")
```
<a id="figure_1"></a>
## Manuscript Figure 1
The code in the cell below will reproduce figure 1 from the manuscript.
```
# Create figure with no title or xticks to hold the plotted axes
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20, 26))
# Add subplot for each result
# 1) Sakai chromosome
regions = [('S-loop 71', 'ECs1276', 'ECs1288', 1),
('SpLE1', 'ECs1299', 'ECs1410', 1.8),
('S-loop 225', 'ECs4325', 'ECs4341', 1.8),
('S-loop 231', 'ECs4379', 'ECs4387', 1)]
annotations = {k:(tools.get_lt_index(v0, sakai_chromosome_annotated),
tools.get_lt_index(v1, sakai_chromosome_annotated), v2) for
k, v0, v1, v2 in regions}
delta_thresh = np.median(sakai_chromosome_annotated['d_median'])
tools.plot_parameter(sakai_chromosome_annotated, ax1, 'd', delta_thresh, annotations=annotations,
label="a) Sakai chromosome")
# 2) pO157 plasmid
regions = [('StcE', 'pO157p01', 'pO157p01', 0.13),
('etp T2SS', 'pO157p02', 'pO157p14', 0.19)]
annotations = {k:(tools.get_lt_index(v0, sakai_pO157_annotated),
tools.get_lt_index(v1, sakai_pO157_annotated), v2) for
k, v0, v1, v2 in regions}
delta_thresh = np.median(sakai_pO157_annotated['d_median'])
tools.plot_parameter(sakai_pO157_annotated, ax2, 'd', delta_thresh, annotations=annotations,
label="b) Sakai pO157")
# 3) DH10B chromosome
delta_thresh = np.median(dh10b_estimates['d_median'])
tools.plot_parameter(dh10b_estimates, ax3, 'd', delta_thresh, label="c) DH10B chromosome")
# Save figure as pdf
plt.savefig("figure_1.pdf");
```
| github_jupyter |
```
import pandas as pd
import cv2
import numpy as np
import matplotlib.pyplot as plt
expression_df=pd.read_csv("C:/Users/user/Desktop/New folder/icml_face_data.csv")
expression_df.head()
expression_df[' Usage'].unique()
expression_df['emotion'].unique()
import collections
collections.Counter(np.array(expression_df['emotion']))
expression_df[' pixels'][0]
img=expression_df[' pixels'][920]
img=np.array(img.split(' ')).reshape(48,48,1).astype('float32')
img.shape
plt.imshow(img.squeeze(),cmap='gray')
images_list=np.zeros((len(expression_df),48,48,1))
images_list.shape
images_label=pd.get_dummies(expression_df['emotion'])
images_label
for idx in range(len(expression_df)):
single_pic=np.array(expression_df[' pixels'][idx].split(' ')).reshape(48,48,1)
images_list[idx]=single_pic
images_list.shape
images_list[35886].shape
expression_df['emotion'].value_counts()
expression_df['emotion'].value_counts().index
import seaborn as sns
sns.barplot(x=expression_df['emotion'].value_counts().index,y=expression_df['emotion'].value_counts())
plt.title('Number of images per emotion')
from sklearn.model_selection import train_test_split
X_train,X_Test,y_train,y_Test=train_test_split(images_list,images_label,test_size=0.20,shuffle=True)
X_val,X_Test,y_val,y_Test=train_test_split(X_Test,y_Test,test_size=0.5, shuffle=True)
X_train.shape
X_Test.shape
X_val.shape
X_train
```
# Normalizing
```
X_train=X_train/255
X_val=X_val/255
X_Test=X_Test/255
from keras.preprocessing.image import ImageDataGenerator
datagen=ImageDataGenerator(
featurewise_center=False,
samplewise_std_normalization=False,
zca_whitening=False,
featurewise_std_normalization=True,
rotation_range=30,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
datagen.fit(X_train)
input_reshape=(48,48,1)
epochs=10
batch_size=128
hidden_num_units=256
output_num_units=7
pool_size=(2,2)
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout,Convolution2D,Flatten,MaxPooling2D, Reshape,InputLayer
from keras.layers.normalization import BatchNormalization
from tensorflow.keras.preprocessing.image import load_img
model=Sequential([
Convolution2D(32,(3,3),activation='relu',input_shape=input_reshape),
Convolution2D(64,(3,3),activation='relu'),
MaxPooling2D((2,2)),
Convolution2D(64,(3,3),activation='relu'),
MaxPooling2D((2,2)),
Convolution2D(64,(3,3),activation='relu'),
Flatten(),
Dense(64,'relu'),
Dense(7,'softmax')
])
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
trained_model_cov=model.fit(X_train,y_train, epochs=epochs,batch_size=batch_size, validation_data=(X_val,y_val))
model.predict_classes(X_Test[1000].reshape(1,48,48,1))
plt.imshow(images_list[1000].squeeze(),cmap='gray')
```
| github_jupyter |
```
import course;course.header()
```
# The csv module
There are several ways to interact with files that contain data in a "comma separated value" format.
We cover the [basic csv module](https://docs.python.org/3/library/csv.html), as it is sometimes really helpful to retain only a fraction of the information of a csv to avoid memory overflow.
```
import csv
with open("../data/amino_acid_properties.csv") as aap:
aap_reader = csv.DictReader(aap, delimiter=",")
for line_dict in aap_reader:
print(line_dict)
break
```
Print not always very readable - use pretty print! :)
```
import pprint
pprint.pprint(line_dict)
```
The hydropathy index is the energy released or required ot transfer the amino acid from water to a hydrophobic environment.
- Arg: +4.5 kcal/mol
- Ile: -4.5 kcal/mol
We can also use the csv module to write csvs, or tab separated value files if we change the delimiter to "\t"
```
with open("../data/test.csv", "w") as output:
aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"])
aap_writer.writeheader()
aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"})
!cat ../data/test.csv
```
## Fix it!
```
# fix it
with open("c", "w") as output:
aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"], extrasaction='ignore')
aap_writer.writeheader()
aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"})
```
# Collections - high performance containers ... sorta
## [collections.Counter](https://docs.python.org/3.7/library/collections.html#counter-objects)
A counter tool is provided to support convenient and rapid tallies. For example
```
from collections import Counter
s = """
MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEK
NTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA
VIVDKDTGENLETPSSFTIKVHDVNDNWPVFTHRLFNASVPESSAVGTSVISVTAVDADD
PTVGDHASVMYQILKGKEYFAIDNSGRIITITKSLDREKQARYEIVVEARDAQGLRGDSG
TATVLVTLQDINDNFPFFTQTKYTFVVPEDTRVGTSVGSLFVEDPDEPQNRMTKYSILRG
DYQDAFTIETNPAHNEGIIKPMKPLDYEYIQQYSFIVEATDPTIDL RYMSPPAGNRAQVI
"""
Counter(s)
# Counter objects can be added together
Counter("AABB") + Counter("BBCC")
# Works with any type of object that are comparable
Counter([(1, 1), (1, 2), (2, 1), (1, 1)])
```
## [collections.deque](https://docs.python.org/3.7/library/collections.html#deque-objects)
Deque \[deck\] or double-ended queue can be used for many tasks, e.g. building a sliding window
```
from collections import deque
s = """MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEKNTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA"""
window = deque([], maxlen=5)
for pos, aa in enumerate(s):
window.append(aa)
print(window)
if pos > 7:
break
Counter(window)
```
## [collections.defaultdicts](https://docs.python.org/3.7/library/collections.html#defaultdict-objects)
Defaultdicts are like dicts yet they treat missing values not with an error, thus testing if key exists is not neccessary and makes life easier :) Ofcourse, one needs to define the default value that is taken if a key is not existent.
I use it a lot for counting
```python
counter["error"] += 1
```
or collecting elements in lists
```python
sorter["typeA"].append({"name": "John"})
```
No more, let's check if I have the key and if not I need to initialize.
```
from collections import defaultdict
ddict_int = defaultdict(int)
# ^---- default factory
ddict_list = defaultdict(list)
ddict_int[10] += 10
ddict_int
ddict_int[0]
def default_factory_with_prefilled_dictionary():
return {"__name": "our custom dict", "errors": 0}
ddict_custom = defaultdict(default_factory_with_prefilled_dictionary)
```
Does that work?
```
ddict_custom[10] += 10
ddict_custom["what_ever_key"]
ddict_custom[10]['errors'] += 10
ddict_custom
```
| github_jupyter |
```
import time
import psutil
import os
%run BurrowsWheelerTransformImproved.ipynb
class FMIndexImproved():
@staticmethod
def SampleSuffixArray(suffixArray, step = 32):
sampledSA = {}
for index, suffix in enumerate(suffixArray):
if suffix % step == 0:
sampledSA[index] = suffix
return sampledSA
def __init__(self, seq, suffixArray = None, checkpointStep = 128, sampledSAStep = 32):
if seq[-1] != '$':
seq += '$'
if suffixArray == None:
suffixArray = SuffixArrayImprovedSort(seq)
self.bwt = BWTViaSAImprovedDict(seq, suffixArray)
self.sampledSA = self.SampleSuffixArray(suffixArray, sampledSAStep)
self.length = len(self.bwt)
self.CreateCheckpoints(checkpointStep)
tots = dict()
for c in self.bwt:
tots[c] = tots.get(c, 0) + 1
self.first = {}
totc = 0
for c, count in sorted(tots.items()):
self.first[c] = totc
totc += count
def CreateCheckpoints(self, checkpointStep = 128):
self.checkpoints = {}
self.checkpointStep = checkpointStep
tally = {}
for char in self.bwt:
if char not in tally:
tally[char] = 0
self.checkpoints[char] = []
for index, char in enumerate(self.bwt):
tally[char] += 1
if index % checkpointStep == 0:
for c in tally.keys():
self.checkpoints[c].append(tally[c])
def Rank(self, bwt, char, row):
if row < 0 or char not in self.checkpoints:
return 0
index, numOccurences = row, 0
while index % self.checkpointStep != 0:
if bwt[index] == char:
numOccurences += 1
index -= 1
return self.checkpoints[char][index // self.checkpointStep] + numOccurences
def Range(self, pattern):
left, right = 0, self.length - 1
for i in range(len(pattern) - 1, -1, -1):
left = self.Rank(self.bwt, pattern[i], left - 1) + self.Count(pattern[i])
right = self.Rank(self.bwt, pattern[i], right) + self.Count(pattern[i]) - 1
if right < left:
break
return left, right + 1
def Resolve(self, row):
def StepLeft(row):
char = self.bwt[row]
return self.Rank(self.bwt, char, row - 1) + self.Count(char)
numSteps = 0
while row not in self.sampledSA:
row = StepLeft(row)
numSteps += 1
return self.sampledSA[row] + numSteps
def Count(self, char):
if char not in self.first:
for cc in sorted(self.first.keys()):
if char < cc:
return self.first[cc]
return self.first[cc]
else:
return self.first[char]
def HasSubstring(self, pattern):
left, right = self.Range(pattern)
return right > left
def HasSuffix(self, pattern):
left, right = self.Range(pattern)
if left >= self.length:
return False
offset = self.Resolve(left)
return right > left and offset + len(pattern) == self.length - 1
def Search(self, pattern):
left, right = self.Range(pattern)
return [self.Resolve(x) for x in range(left, right)]
dataSet = [
{"file" : "./data/13443_ref_Cara_1.0_chr1c.fa",
"patterns" : [
"ATGCATG",
"TCTCTCTA",
"TTCACTACTCTCA"
]},
{"file" : "./data/10093_ref_PAHARI_EIJ_v1.1_chrX.fa",
"patterns" : [
"ATGATG",
"CTCTCTA",
"TCACTACTCTCA"
]},
{"file" : "./data/144034_ref_Pbar_UMD_V03_chrUn.fa",
"patterns": [
"CGCGAG",
"GTCGAAT",
"GGGCGTCATCGCGCG"
]}
]
for data in dataSet:
file = data.get("file")
genome = GetWholeGenomeFromFile(file)
patterns = data.get("patterns")
for pattern in patterns:
startTime = time.time()
fm = FMIndexImproved(genome)
fm.Search(pattern)
endTime = time.time()
duration = endTime - startTime
print(f"{file} : {pattern} executed in {duration}")
del fm
del file
del genome
del patterns
file = dataSet[0].get("file")
genome = GetWholeGenomeFromFile(file)
pattern = dataSet[0].get("patterns")[1]
startTime = time.time()
fm = FMIndexImproved(genome)
for f in fm.first:
print()
fm.Search(pattern)
endTime = time.time()
duration = endTime - startTime
print(f"{file} : {pattern} executed in {duration}")
del fm
del file
del genome
del pattern
seq = GetWholeGenomeFromFile(dataSet[0].get("file"))
tots = dict()
for c in seq:
tots[c] = tots.get(c, 0) + 1
bwt = ""
#with open("./data/bwt" + str(1) + ".txt", "a") as f:
for char, count in sorted(tots.items()):
index = 0
toAdd = list()
while count > 0:
if seq[index] == char:
toAdd.append(seq[index:] + seq[:index])
count -= 1
index += 1
toWrite = ''.join(map(lambda x: x[-1], sorted(toAdd)))
# for rotation in sorted(toAdd):
# toWrite += rotation[-1]
bwt += toWrite
# f.write(toWrite)
print(toWrite)
del toWrite
del toAdd
# f.close()
del tots
!python "./memTest/FMIndexImprovedFirstFile.py" 128 32 "ATGCATG"
!python "./memTest/FMIndexImprovedFirstFile.py" 128 32 "TCTCTCTA"
!python "./memTest/FMIndexImprovedFirstFile.py" 128 32 "TTCACTACTCTCA"
```
| github_jupyter |
# Programming with Python
## Episode 3 - Storing Multiple Values in Lists
Teaching: 30 min,
Exercises: 30 min
## Objectives
- Explain what a list is.
- Create and index lists of simple values.
- Change the values of individual elements
- Append values to an existing list
- Reorder and slice list elements
- Create and manipulate nested lists
#### How can I store many values together?
Just as a `for loop` is a way to do operations many times, a list is a way to store many values. Unlike NumPy arrays, lists are built into the language (so we don't have to load a library to use them). We create a list by putting values inside square brackets and separating the values with commas:
```
odds = [1, 3, 5, 7]
print('odds are:', odds)
```
```
odds = [1, 3, 5, 7]
print('odds are:', odds)
print(odds[0])
# lists are mutable
odds[1] = 'a'
print(odds[1])
```
We can access elements of a list using indices – numbered positions of elements in the list. These positions are numbered starting at 0, so the first element has an index of 0.
```
print('first element:', odds[0])
print('last element:', odds[3])
print('"-1" element:', odds[-1])
```
```
print('first element:', odds[0])
print('last element:', odds[3])
# negatives count from the back, -2 element is 5, -1 element is 7
print('"-1" element:', odds[-1])
```
Yes, we can use negative numbers as indices in Python. When we do so, the index `-1` gives us the last element in the list, `-2` the second to last, and so on.
Because of this, `odds[3]` and `odds[-1]` point to the same element here.
If we loop over a list, the loop variable is assigned elements one at a time:
for number in odds:
print(number)
```
odds = [1, 3, 5, 7]
for number in odds:
print(number)
```
There is one important difference between lists and strings: we can change the values in a list, but we cannot change individual characters in a string. For example:
```
names = ['Curie', 'Darwing', 'Turing'] # typo in Darwin's name
print('names is originally:', names)
names[1] = 'Darwin' # correct the name
print('final value of names:', names)
```
```
names = ['Curie', 'Darwing', 'Turing'] # typo in Darwin's name
print('names is originally:', names)
names[1] = 'Darwin' # correct the name
print('final value of names:', names)
```
works, but:
```
name = 'Darwin'
name[0] = 'd'
```
doesn't.
### Ch-Ch-Ch-Ch-Changes
Data which can be modified in place is called *mutable*, while data which cannot be modified is called *immutable*. Strings and numbers are immutable. This does not mean that variables with string or number values are constants, but when we want to change the value of a string or number variable, we can only replace the old value with a completely new value.
Lists and arrays, on the other hand, are mutable: we can modify them after they have been created. We can change individual elements, append new elements, or reorder the whole list. For some operations, like sorting, we can choose whether to use a function that modifies the data in-place or a function that returns a modified copy and leaves the original unchanged.
Be careful when modifying data in-place. If two variables refer to the same list, and you modify the list value, it will change for both variables!
```
salsa = ['peppers', 'onions', 'cilantro', 'tomatoes']
my_salsa = salsa # <-- my_salsa and salsa point to the *same* list data in memory
salsa[0] = 'hot peppers'
print('Ingredients in salsa:', salsa)
print('Ingredients in my salsa:', my_salsa)
```
```
salsa = ['peppers', 'onions', 'cilantro', 'tomatoes']
my_salsa = salsa # <-- my_salsa and salsa point to the *same* list data in memory
salsa[0] = 'hot peppers'
print('Ingredients in salsa:', salsa)
print('Ingredients in my salsa:', my_salsa)
```
If you want variables with mutable values to be independent, you must make a copy of the value when you assign it.
```
salsa = ['peppers', 'onions', 'cilantro', 'tomatoes']
my_salsa = list(salsa) # <-- makes a *copy* of the list
salsa[0] = 'hot peppers'
print('Ingredients in salsa:', salsa)
print('Ingredients in my salsa:', my_salsa)
```
```
salsa = ['peppers', 'onions', 'cilantro', 'tomatoes']
my_salsa = list(salsa) # <-- makes a *copy* of the list
salsa[0] = 'hot peppers'
print('Ingredients in salsa:', salsa)
print('Ingredients in my salsa:', my_salsa)
```
Because of pitfalls like this, code which modifies data in place can be more difficult to understand. However, it is often far more efficient to modify a large data structure in place than to create a modified copy for every small change. You should consider both of these aspects when writing your code.
### Nested Lists
Since lists can contain any Python variable type, it can even contain other lists.
For example, we could represent the products in the shelves of a small grocery shop:
```
shop = [['pepper', 'zucchini', 'onion'],
['cabbage', 'lettuce', 'garlic'],
['apple', 'pear', 'banana']]
```
```
shop = [['pepper', 'zucchini', 'onion'],
['cabbage', 'lettuce', 'garlic'],
['apple', 'pear', 'banana']]
print(shop)
```
Here is an example of how indexing a list of lists works:
The first element of our list is another list representing the first shelf:
```
print(shop[0])
```
```
print(shop[0])
```
to reference a particular item on a particular shelf (e.g. the third item on the second shelf - i.e. the `garlic`) we'd use extra `[` `]`'s
```
print(shop[1][2])
```
don't forget the zero index thing ...
```
# these indexes are calles subscripts
print(shop[1][2])
# list of items of different types
another_shop = [['pepper', 'zucchini', 'onion'],
['cabbage', 'lettuce', 'garlic'],
['apple', 'pear', 'banana'],
42.5, 45.6, 39.8, 3]
print(another_shop)
print(another_shop[3]) # print(another_shop[3][0]) is not possible
print(another_shop[5])
```
### Heterogeneous Lists
Lists in Python can contain elements of different types. Example:
```
sample_ages = [10, 12.5, 'Unknown']
```
```
sample_ages = [10, 12.5, 'Unknown']
```
There are many ways to change the contents of lists besides assigning new values to individual elements:
```
odds.append(11)
print('odds after adding a value:', odds)
del odds[0]
print('odds after removing the first element:', odds)
odds.reverse()
print('odds after reversing:', odds)
```
```
odds.append(11)
print('odds after adding a value:', odds)
del odds[0]
print('odds after removing the first element:', odds)
odds.reverse()
print('odds after reversing:', odds)
# gives info about the object "odds"
odds?
# do odds. and press tab > shows a documentation for all possible functions that can be used with this variable
odds.
```
While modifying in place, it is useful to remember that Python treats lists in a slightly counter-intuitive way.
If we make a list and (attempt to) copy it then modify in place, we can cause all sorts of trouble:
```
odds = [1, 3, 5, 7]
primes = odds
primes.append(2)
print('primes:', primes)
print('odds:', odds)
primes: [1, 3, 5, 7, 2]
odds: [1, 3, 5, 7, 2]
```
```
odds = [1, 3, 5, 7]
primes = odds
primes.append(2)
print('primes:', primes)
print('odds:', odds)
# primes: [1, 3, 5, 7, 2]
# odds: [1, 3, 5, 7, 2]
```
This is because Python stores a list in memory, and then can use multiple names to refer to the same list. If all we want to do is copy a (simple) list, we can use the list function, so we do not modify a list we did not mean to:
```
odds = [1, 3, 5, 7]
primes = list(odds)
primes.append(2)
print('primes:', primes)
print('odds:', odds)
primes: [1, 3, 5, 7, 2]
odds: [1, 3, 5, 7]
```
```
odds = [1, 3, 5, 7]
primes = list(odds)
primes.append(2)
print('primes:', primes)
print('odds:', odds)
# primes: [1, 3, 5, 7, 2]
# odds: [1, 3, 5, 7]
# another way to copy is to create a new slice
# here, we slice from the beginning to the end
primes = odds[:]
# another option
primes = odds.copy()
shop = [['pepper', 'zucchini', 'onion'],
['cabbage', 'lettuce', 'garlic'],
['apple', 'pear', 'banana']]
print(shop)
# a copy only makes a copy of the top level. this is calles a shallow copy
another_shop = shop.copy()
print(another_shop)
del shop[2]
print(shop)
print(another_shop)
# the deeper structure is not copied, it's still stored in one place and is referenced by two variables
shop[0][1] = 'carrot'
print(shop)
print(another_shop)
# create a deep copy
import copy
shop = [['pepper', 'zucchini', 'onion'],
['cabbage', 'lettuce', 'garlic'],
['apple', 'pear', 'banana']]
third_shop = copy.deepcopy(shop)
print('original shop:', shop)
del shop[2]
print('original shop minus element 2:', shop)
print('deep copy of shop:', third_shop)
shop[0][1] = 'carrot'
print('original shop with carrot and minus element 2:', shop)
print('deep copy of shop:', third_shop)
```
### Turn a String Into a List
Use a `for loop` to convert the string "hello" into a list of letters: `["h", "e", "l", "l", "o"]`
Hint: You can create an empty list like this:
my_list = []
```
my_list = []
for letter in 'hello':
my_list.append(letter)
print(my_list)
# if you want to put a new item in a specific location, use insert
odds = [1,3,5,7]
primes = odds.copy()
primes.insert(1,2)
print(primes)
# if you insert it past the last position, it inserts it in the end
primes.insert(10,2)
print(primes)
```
Subsets of lists and strings can be accessed by specifying ranges of values in brackets, similar to how we accessed ranges of positions in a NumPy array. This is commonly referred to as *slicing* the list/string.
```
binomial_name = "Drosophila melanogaster"
group = binomial_name[0:10]
print("group:", group)
species = binomial_name[11:24]
print("species:", species)
chromosomes = ["X", "Y", "2", "3", "4"]
autosomes = chromosomes[2:5]
print("autosomes:", autosomes)
last = chromosomes[-1]
print("last:", last)
```
```
binomial_name = "Drosophila melanogaster"
# the last character (10) is not included
group = binomial_name[0:10]
print("group:", group)
species = binomial_name[11:24]
print("species:", species)
# or
print("species:", binomial_name[11:])
# to reverse the string
print(binomial_name[::-1])
# to leave out some specific characters
print(binomial_name[:-5])
# find an index for a specific element
print(binomial_name.index(' '))
# break up by a specific character
print(binomial_name.split(' '))
# split after every letter
print(list(binomial_name))
chromosomes = ["X", "Y", "2", "3", "4"]
autosomes = chromosomes[2:5]
print("autosomes:", autosomes)
```
### Slicing From the End
Use slicing to access only the last four characters of a string or entries of a list.
```
string_for_slicing = "Observation date: 02-Feb-2013"
list_for_slicing = [["fluorine", "F"],
["chlorine", "Cl"],
["bromine", "Br"],
["iodine", "I"],
["astatine", "At"]]
```
Would your solution work regardless of whether you knew beforehand the length of the string or list (e.g. if you wanted to apply the solution to a set of lists of different lengths)? If not, try to change your approach to make it more robust.
Hint: Remember that indices can be negative as well as positive
```
string_for_slicing = "Observation date: 02-Feb-2013"
list_for_slicing = [["fluorine", "F"],
["chlorine", "Cl"],
["bromine", "Br"],
["iodine", "I"],
["astatine", "At"]]
print(string_for_slicing[-4:])
print(list_for_slicing[-4:])
print(list_for_slicing[-1][1:])
```
### Non-Continuous Slices
So far we've seen how to use slicing to take single blocks of successive entries from a sequence. But what if we want to take a subset of entries that aren't next to each other in the sequence?
You can achieve this by providing a third argument to the range within the brackets, called the step size. The example below shows how you can take every third entry in a list:
```
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
subset = primes[0:12:3]
print("subset", subset)
```
```
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
subset = primes[0:12:3]
print("subset", subset)
```
Notice that the slice taken begins with the first entry in the range, followed by entries taken at equally-spaced intervals (the steps) thereafter. If you wanted to begin the subset with the third entry, you would need to specify that as the starting point of the sliced range:
```
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
subset = primes[2:12:3]
print("subset", subset)
```
```
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
subset = primes[2:12:3]
print("subset", subset)
```
Use the step size argument to create a new string that contains only every second character in the string "In an octopus's garden in the shade"
Start with:
```
beatles = "In an octopus's garden in the shade"
```
and print:
```
I notpssgre ntesae
```
```
beatles = "In an octopus's garden in the shade"
new = beatles[0:len(beatles):2]
print("new string:", new)
# or easier:
new = beatles[0::2]
```
If you want to take a slice from the beginning of a sequence, you can omit the first index in the range:
```
date = "Monday 4 January 2016"
day = date[0:6]
print("Using 0 to begin range:", day)
day = date[:6]
print("Omitting beginning index:", day)
```
```
date = "Monday 4 January 2016"
day = date[0:6]
print("Using 0 to begin range:", day)
day = date[:6]
print("Omitting beginning index:", day)
```
And similarly, you can omit the ending index in the range to take a slice to the end of the sequence:
```
months = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]
q4 = months[8:12]
print("With specified start and end position:", q4)
q4 = months[8:len(months)]
print("Using len() to get last entry:", q4)
q4 = months[8:]
print("Omitting ending index:", q4)
```
```
months = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]
q4 = months[8:12]
print("With specified start and end position:", q4)
q4 = months[8:len(months)]
print("Using len() to get last entry:", q4)
q4 = months[8:]
print("Omitting ending index:", q4)
```
### Overloading
`+` usually means addition, but when used on strings or lists, it means "concatenate". Given that, what do you think the multiplication operator * does on lists? In particular, what will be the output of the following code?
```
counts = [2, 4, 6, 8, 10]
repeats = counts * 2
print(repeats)
```
The technical term for this is operator overloading. A single operator, like `+` or `*`, can do different things depending on what it's applied to.
```
counts = [2, 4, 6, 8, 10]
repeats = counts * 2
print(repeats)
```
is this the same as:
```
counts + counts
```
and what might:
```
counts / 2
```
mean ?
```
# lists can't be divided, need slice if you want a half
double_counts = counts*2
double_counts[:int(len(double_counts)/2)]
```
## Key Points
- [value1, value2, value3, ...] creates a list.
- Lists can contain any Python object, including lists (i.e., list of lists).
- Lists are indexed and sliced with square brackets (e.g., list[0] and list[2:9]), in the same way as strings and arrays.
- Lists are mutable (i.e., their values can be changed in place).
- Strings are immutable (i.e., the characters in them cannot be changed).
### Save, and version control your changes
- save your work: `File -> Save`
- add all your changes to your local repository: `Terminal -> git add .`
- commit your updates a new Git version: `Terminal -> git commit -m "End of Episode 3"`
- push your latest commits to GitHub: `Terminal -> git push`
| github_jupyter |
# Tutorial 1 for R
## Solve Dantzig's Transport Problem using the *ix modeling platform* (ixmp)
<img style="float: right; height: 80px;" src="_static/R_logo.png">
### Aim and scope of the tutorial
This tutorial takes you through the steps to import the data for a very simple optimization model
and solve it using the ``ixmp``-GAMS interface.
We use Dantzig's transport problem, which is also used as the standard GAMS tutorial.
This problem finds a least cost shipping schedule that meets requirements at markets and supplies at factories.
If you are not familiar with GAMS, please take a minute to look at the [transport.gms](transport.gms) code.
For reference of the transport problem, see:
> Dantzig, G B, Chapter 3.3. In Linear Programming and Extensions.
> Princeton University Press, Princeton, New Jersey, 1963.
> This formulation is described in detail in:
> Rosenthal, R E, Chapter 2: A GAMS Tutorial.
> In GAMS: A User's Guide. The Scientific Press, Redwood City, California, 1988.
> see http://www.gams.com/mccarl/trnsport.gms
The steps in the tutorial are the following:
0. Launch an ixmp.Platform instance and initialize a new ixmp.Scenario.
0. Define the sets and parameters in the scenario, and commit the data to the platform
0. Check out the scenario and initialize variables and equations (necessary for ``ixmp`` to import the solution)
0. Solve the model (export to GAMS input gdx, execute, read solution from output gdx)
0. Display the solution (variables and equation)
### Launching the platform and initializing a new scenario
We launch a platform instance and initialize a new scenario. This will be used to store all data required to solve Dantzig's transport problem as well as the solution after solving it in GAMS.
```
# load the rixmp package source code
library("retixmp")
ixmp <- import('ixmp')
# launch the ix modeling platform using a local HSQL database instance
mp <- ixmp$Platform(dbtype="HSQLDB")
# details for creating a new scenario in the ix modeling platform
model <- "transport problem"
scenario <- "standard"
annot <- "Dantzig's transportation problem for illustration and testing"
# initialize a new ixmp.Scenario
# the parameter version='new' indicates that this is a new scenario instance
scen <- mp$Scenario(model, scenario, "new", annotation=annot)
```
### Defining the sets in the scenario
Below, we first show the data as they would be written in the GAMS tutorial ([transport.gms](transport.gms) in this folder).
Then, we show how this can be implemented in the R ``ixmp`` notation, and display the elements of set ``i`` as an R list.
```
# define the sets of locations of canning plants
scen$init_set("i")
i.set = c("seattle","san-diego")
scen$add_set("i", i.set )
### markets set
scen$init_set("j")
j.set = c("new-york","chicago","topeka")
scen$add_set("j", j.set )
# display the set 'i'
scen$set('i')
```
### Defining parameters in the scenario
Next, we define the production capacity and demand parameters, and display the demand parameter as a DataFrame.
Then, we add the two-dimensional distance parameter and the transport cost scalar.
```
# capacity of plant i in cases
scen$init_par("a", c("i"))
a.df = data.frame( i = i.set, value = c(350 , 600) , unit = 'cases')
scen$add_par("a", adapt_to_ret(a.df))
#scen$add_par("a", "san-diego", 600, "cases")
# demand at market j in cases
scen$init_par("b", c("j"))
b.df = data.frame( j = j.set, value = c(325 , 300, 275) , unit = 'cases')
scen$add_par("b", adapt_to_ret(b.df))
# display the parameter 'b'
scen$par('b')
```
```
# distance in thousands of miles
scen$init_par("d", c("i","j"))
d.df = data.frame(expand.grid(i = i.set,j = j.set), value = c(2.5,2.5,1.7,1.8,1.8,1.4), unit = 'km')
scen$add_par("d", adapt_to_ret(d.df))
```
Scalar f freight in dollars per case per thousand miles /90/ ;
```
# cost per case per 1000 miles
# initialize scalar with a value and a unit (and optionally a comment)
scen$init_scalar("f", 90.0, "USD/km")
```
### Committing the scenario to the ixmp database instance
```
# commit new scenario to the database
# no changes can then be made to the scenario data until a check-out is performed
comment = "importing Dantzig's transport problem for illustration of the R interface"
scen$commit(comment)
# set this new scenario as the default version for the model/scenario name
scen$set_as_default()
```
### Defining variables and equations in the scenario
The levels and marginals of these variables and equations will be imported to the scenario when reading the gdx solution file.
```
# perform a check_out to make further changes
scen$check_out()
# initialize the decision variables and equations
scen$init_var("z", NULL, NULL)
scen$init_var("x", idx_sets=c("i", "j"))
scen$init_equ("demand", idx_sets=c("j"))
# commit changes to the scenario (save changes in ixmp database instance)
change_comment = "inialize the model variables and equations"
scen$commit(change_comment)
```
### Solve the scenario
The ``solve()`` function exports the scenario to a GAMS gdx file, executes GAMS, and then imports the solution from an output GAMS gdx file to the database.
For the model equations and the GAMS workflow (reading the data from gdx, solving the model, writing the results to gdx), see ``transport_ixmp.gms``.
```
scen$solve(model="transport_ixmp")
```
### Display and analyze the results
```
# display the objective value of the solution
scen$var("z")
# display the quantities transported from canning plants to demand locations
scen$var("x")
# display the quantities and marginals (=shadow prices) of the demand balance constraints
scen$equ("demand")
```
### Close the database connection of the ix modeling platform
Closing the database connection is recommended when working with the local file-based database, i.e., ``dbtype='HSQLDB'``. This command closes the database files and removes temporary data. This is necessary so that other notebooks or ``ixmp`` instances can access the database file, or so that the database files can be copied to a different folder or drive.
```
# close the connection of the platform instance to the local database files
mp$close_db()
```
| github_jupyter |
# Unsupervised Graph Learning with GraphSage
GraphScope provides the capability to process learning tasks. In this tutorial, we demostrate how GraphScope trains a model with GraphSage.
The task is link prediction, which estimates the probability of links between nodes in a graph.
In this task, we use our implementation of GraphSAGE algorithm to build a model that predicts protein-protein links in the [PPI](https://humgenomics.biomedcentral.com/articles/10.1186/1479-7364-3-3-291) dataset. In which every node represents a protein. The task can be treated as a unsupervised link prediction on a homogeneous link network.
In this task, GraphSage algorithm would compress both structural and attribute information in the graph into low-dimensional embedding vectors on each node. These embeddings can be further used to predict links between nodes.
This tutorial has following steps:
- Creating session and loading graph
- Launching the learning engine and attaching to loaded graph.
- Defining train process with builtin GraphSage model and hyperparameters
- Training and evaluating
First, let's create a session and load the dataset as a graph.
```
import os
import graphscope
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
# create session
graphscope.set_option(show_log=True)
sess = graphscope.session(k8s_volumes=k8s_volumes)
# loading ppi graph
graph = graphscope.Graph(sess)
graph = graph.add_vertices("/home/jovyan/datasets/ppi/node.csv", "protein")
graph = graph.add_edges("/home/jovyan/datasets/ppi/edge.csv", "link")
```
## Launch learning engine
Then, we need to define a feature list for training. The training feature list should be seleted from the vertex properties. In this case, we choose all the properties prefix with "feat-" as the training features.
With the featrue list, next we launch a learning engine with the learning method of session. (You may find the detail of the method on [Session](https://graphscope.io/docs/reference/session.html).)
In this case, we specify the GCN training over `protein` nodes and `link` edges.
With gen_labels, we take protein nodes as training set.
```
# define the features for learning
paper_features = []
for i in range(50):
paper_features.append("feat-" + str(i))
# launch a learning engine.
lg = sess.learning(graph, nodes=[("protein", paper_features)],
edges=[("protein", "link", "protein")],
gen_labels=[
("train", "protein", 100, (0, 100)),
])
```
We use the builtin GraphSage model to define the training process. You can find more detail about all the builtin learning models on [Graph Learning Model](https://graphscope.io/docs/learning_engine.html#data-model)
In the example, we use tensorflow as NN backend trainer.
```
import numpy as np
from graphscope.learning.examples import GraphSage
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
# unsupervised GraphSage.
def train(config, graph):
def model_fn():
return GraphSage(
graph,
config["class_num"],
config["features_num"],
config["batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"],
unsupervised=config['unsupervised'],
)
trainer = LocalTFTrainer(
model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"], config["learning_rate"], config["weight_decay"]
),
)
trainer.train()
embs = trainer.get_node_embedding()
np.save(config['emb_save_dir'], embs)
# define hyperparameters
config = {
"class_num": 128, # output dimension
"features_num": 50,
"batch_size": 512,
"categorical_attrs_desc": "",
"hidden_dim": 128,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 5],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
'unsupervised': True,
"epoch": 1,
'emb_save_dir': './id_emb',
"node_type": "protein",
"edge_type": "link",
}
```
## Run training process
After define training process and hyperparameters,
Now we can start the traning process with learning engine `lg` and the hyperparameters configurations.
```
train(config, lg)
```
Finally, don't forget to close the session.
```
sess.close()
```
| github_jupyter |
Lambda School Data Science
*Unit 4, Sprint 3, Module 2*
---
# Convolutional Neural Networks (Prepare)
> Convolutional networks are simply neural networks that use convolution in place of general matrix multiplication in at least one of their layers. *Goodfellow, et al.*
## Learning Objectives
- <a href="#p1">Part 1: </a>Describe convolution and pooling
- <a href="#p2">Part 2: </a>Apply a convolutional neural network to a classification task
- <a href="#p3">Part 3: </a>Use a pre-trained convolution neural network for object detection
Modern __computer vision__ approaches rely heavily on convolutions as both a dimensinoality reduction and feature extraction method. Before we dive into convolutions, let's talk about some of the common computer vision applications:
* Classification [(Hot Dog or Not Dog)](https://www.youtube.com/watch?v=ACmydtFDTGs)
* Object Detection [(YOLO)](https://www.youtube.com/watch?v=MPU2HistivI)
* Pose Estimation [(PoseNet)](https://ai.googleblog.com/2019/08/on-device-real-time-hand-tracking-with.html)
* Facial Recognition [Emotion Detection](https://www.cbronline.com/wp-content/uploads/2018/05/Mona-lIsa-test-570x300.jpg)
* and *countless* more
We are going to focus on classification and pre-trained object detection today. What are some of the applications of object detection?
```
from IPython.display import YouTubeVideo
YouTubeVideo('MPU2HistivI', width=600, height=400)
```
# Convolution & Pooling (Learn)
<a id="p1"></a>
## Overview
Like neural networks themselves, CNNs are inspired by biology - specifically, the receptive fields of the visual cortex.
Put roughly, in a real brain the neurons in the visual cortex *specialize* to be receptive to certain regions, shapes, colors, orientations, and other common visual features. In a sense, the very structure of our cognitive system transforms raw visual input, and sends it to neurons that specialize in handling particular subsets of it.
CNNs imitate this approach by applying a convolution. A convolution is an operation on two functions that produces a third function, showing how one function modifies another. Convolutions have a [variety of nice mathematical properties](https://en.wikipedia.org/wiki/Convolution#Properties) - commutativity, associativity, distributivity, and more. Applying a convolution effectively transforms the "shape" of the input.
One common confusion - the term "convolution" is used to refer to both the process of computing the third (joint) function and the process of applying it. In our context, it's more useful to think of it as an application, again loosely analogous to the mapping from visual field to receptive areas of the cortex in a real animal.
```
from IPython.display import YouTubeVideo
YouTubeVideo('IOHayh06LJ4', width=600, height=400)
```
## Follow Along
Let's try to do some convolutions in `Keras`.
### Convolution - an example
Consider blurring an image - assume the image is represented as a matrix of numbers, where each number corresponds to the color value of a pixel.
```
import imageio
import matplotlib.pyplot as plt
from skimage import color, io
from skimage.exposure import rescale_intensity
austen = io.imread('https://dl.airtable.com/S1InFmIhQBypHBL0BICi_austen.jpg')
austen_grayscale = rescale_intensity(color.rgb2gray(austen))
austen_grayscale
plt.imshow(austen_grayscale, cmap="gray");
austen_grayscale.shape
import numpy as np
import scipy.ndimage as nd
horizontal_edge_convolution = np.array([[1,1,1,1,1],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[-1,-1,-1,-1,-1]])
vertical_edge_convolution = np.array([[1, 0, 0, 0, -1],
[1, 0, 0, 0, -1],
[1, 0, 0, 0, -1],
[1, 0, 0, 0, -1],
[1, 0, 0, 0, -1]])
austen_edges = nd.convolve(austen_grayscale, horizontal_edge_convolution)
austen_edges.shape
plt.imshow(austen_edges, cmap="gray");
```
## Challenge
You will be expected to be able to describe convolution.
# CNNs for Classification (Learn)
## Overview
### Typical CNN Architecture

The first stage of a CNN is, unsurprisingly, a convolution - specifically, a transformation that maps regions of the input image to neurons responsible for receiving them. The convolutional layer can be visualized as follows:

The red represents the original input image, and the blue the neurons that correspond.
As shown in the first image, a CNN can have multiple rounds of convolutions, [downsampling](https://en.wikipedia.org/wiki/Downsampling_(signal_processing)) (a digital signal processing technique that effectively reduces the information by passing through a filter), and then eventually a fully connected neural network and output layer. Typical output layers for a CNN would be oriented towards classification or detection problems - e.g. "does this picture contain a cat, a dog, or some other animal?"
Why are CNNs so popular?
1. Compared to prior image learning techniques, they require relatively little image preprocessing (cropping/centering, normalizing, etc.)
2. Relatedly, they are *robust* to all sorts of common problems in images (shifts, lighting, etc.)
Actually training a cutting edge image classification CNN is nontrivial computationally - the good news is, with transfer learning, we can get one "off-the-shelf"!
## Follow Along
```
from tensorflow.keras import datasets
from tensorflow.keras.models import Sequential, Model # <- May Use
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images.max()
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
train_images[0].shape
32*32*3
# Setup Architecture
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
# Compile Model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(type(test_labels))
# Fit Model
model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels))
# Evaluate Model
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
```
## Challenge
You will apply CNNs to a classification task in the module project.
# CNNs for Object Detection (Learn)
## Overview
### Transfer Learning - TensorFlow Hub
"A library for reusable machine learning modules"
This lets you quickly take advantage of a model that was trained with thousands of GPU hours. It also enables transfer learning - reusing a part of a trained model (called a module) that includes weights and assets, but also training the overall model some yourself with your own data. The advantages are fairly clear - you can use less training data, have faster training, and have a model that generalizes better.
https://www.tensorflow.org/hub/
**WARNING** - Dragons ahead!

TensorFlow Hub is very bleeding edge, and while there's a good amount of documentation out there, it's not always updated or consistent. You'll have to use your problem-solving skills if you want to use it!
## Follow Along
```
import numpy as np
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
def process_img_path(img_path):
return image.load_img(img_path, target_size=(224, 224))
def img_contains_banana(img):
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model = ResNet50(weights='imagenet')
features = model.predict(x)
results = decode_predictions(features, top=3)[0]
print(results)
for entry in results:
if entry[1] == 'banana':
return entry[2]
return 0.0
import requests
image_urls = ["https://github.com/LambdaSchool/ML-YouOnlyLookOnce/raw/master/sample_data/negative_examples/example11.jpeg",
"https://github.com/LambdaSchool/ML-YouOnlyLookOnce/raw/master/sample_data/positive_examples/example0.jpeg"]
for _id,img in enumerate(image_urls):
r = requests.get(img)
with open(f'example{_id}.jpg', 'wb') as f:
f.write(r.content)
from IPython.display import Image
Image(filename='./example0.jpg', width=600)
img_contains_banana(process_img_path('example0.jpg'))
Image(filename='example1.jpg', width=600)
img_contains_banana(process_img_path('example1.jpg'))
```
Notice that, while it gets it right, the confidence for the banana image is fairly low. That's because so much of the image is "not-banana"! How can this be improved? Bounding boxes to center on items of interest.
## Challenge
You will be expected to apply a pretrained model to a classificaiton problem today.
# Review
- <a href="#p1">Part 1: </a>Describe convolution and pooling
* A Convolution is a function applied to another function to produce a third function
* Convolutional Kernels are typically 'learned' during the process of training a Convolution Neural Network
* Pooling is a dimensionality reduction technique that uses either Max or Average of a feature map region to downsample data
- <a href="#p2">Part 2: </a>Apply a convolutional neural network to a classification task
* Keras has layers for convolutions :)
- <a href="#p3">Part 3: </a>Use a pre-trained convolution neural network for object detection
* Check out both pretinaed models available in Keras & TensorFlow Hub
# Sources
- *_Deep Learning_*. Goodfellow *et al.*
- [Keras CNN Tutorial](https://www.tensorflow.org/tutorials/images/cnn)
- [Tensorflow + Keras](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D)
- [Convolution Wiki](https://en.wikipedia.org/wiki/Convolution)
- [Keras Conv2D: Working with CNN 2D Convolutions in Keras](https://missinglink.ai/guides/keras/keras-conv2d-working-cnn-2d-convolutions-keras/)
| github_jupyter |
This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.
In case you are facing issues, please read the following two documents first:
https://github.com/IBM/skillsnetwork/wiki/Environment-Setup
https://github.com/IBM/skillsnetwork/wiki/FAQ
Then, please feel free to ask:
https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all
Please make sure to follow the guidelines before asking a question:
https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me
If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
```
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">'+string+'</span>'))
if ('sc' in locals() or 'sc' in globals()):
printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>')
!pip install pyspark==2.4.5
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
except ImportError as e:
printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>')
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
```
# Exercise 3.2
Welcome to the last exercise of this course. This is also the most advanced one because it somehow glues everything together you've learned.
These are the steps you will do:
- load a data frame from cloudant/ApacheCouchDB
- perform feature transformation by calculating minimal and maximal values of different properties on time windows (we'll explain what a time windows is later in here)
- reduce these now twelve dimensions to three using the PCA (Principal Component Analysis) algorithm of SparkML (Spark Machine Learning) => We'll actually make use of SparkML a lot more in the next course
- plot the dimensionality reduced data set
Now it is time to grab a PARQUET file and create a dataframe out of it. Using SparkSQL you can handle it like a database.
```
!wget https://github.com/IBM/coursera/blob/master/coursera_ds/washing.parquet?raw=true
!mv washing.parquet?raw=true washing.parquet
df = spark.read.parquet('washing.parquet')
df.createOrReplaceTempView('washing')
df.show()
```
This is the feature transformation part of this exercise. Since our table is mixing schemas from different sensor data sources we are creating new features. In other word we use existing columns to calculate new ones. We only use min and max for now, but using more advanced aggregations as we've learned in week three may improve the results. We are calculating those aggregations over a sliding window "w". This window is defined in the SQL statement and basically reads the table by a one by one stride in direction of increasing timestamp. Whenever a row leaves the window a new one is included. Therefore this window is called sliding window (in contrast to tubling, time or count windows). More on this can be found here: https://flink.apache.org/news/2015/12/04/Introducing-windows.html
```
result = spark.sql("""
SELECT * from (
SELECT
min(temperature) over w as min_temperature,
max(temperature) over w as max_temperature,
min(voltage) over w as min_voltage,
max(voltage) over w as max_voltage,
min(flowrate) over w as min_flowrate,
max(flowrate) over w as max_flowrate,
min(frequency) over w as min_frequency,
max(frequency) over w as max_frequency,
min(hardness) over w as min_hardness,
max(hardness) over w as max_hardness,
min(speed) over w as min_speed,
max(speed) over w as max_speed
FROM washing
WINDOW w AS (ORDER BY ts ROWS BETWEEN CURRENT ROW AND 10 FOLLOWING)
)
WHERE min_temperature is not null
AND max_temperature is not null
AND min_voltage is not null
AND max_voltage is not null
AND min_flowrate is not null
AND max_flowrate is not null
AND min_frequency is not null
AND max_frequency is not null
AND min_hardness is not null
AND min_speed is not null
AND max_speed is not null
""")
```
Since this table contains null values also our window might contain them. In case for a certain feature all values in that window are null we obtain also null. As we can see here (in my dataset) this is the case for 9 rows.
```
df.count()-result.count()
```
Now we import some classes from SparkML. PCA for the actual algorithm. Vectors for the data structure expected by PCA and VectorAssembler to transform data into these vector structures.
```
from pyspark.ml.feature import PCA
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
```
Let's define a vector transformation helper class which takes all our input features (result.columns) and created one additional column called "features" which contains all our input features as one single column wrapped in "DenseVector" objects
```
assembler = VectorAssembler(inputCols=result.columns, outputCol="features") ###columns of n features into a column of n_d row vector
```
Now we actually transform the data, note that this is highly optimized code and runs really fast in contrast if we had implemented it.
```
features = assembler.transform(result)
```
Let's have a look at how this new additional column "features" looks like:
```
features.rdd.map(lambda r : r.features).take(10)
```
Since the source data set has been prepared as a list of DenseVectors we can now apply PCA. Note that the first line again only prepares the algorithm by finding the transformation matrices (fit method)
```
pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures") ###computes the transformation matrix
model = pca.fit(features)
```
Now we can actually transform the data. Let's have a look at the first 20 rows
```
result_pca = model.transform(features).select("pcaFeatures") ###performs the transformation
result_pca.show(truncate=False)
```
So we obtained three completely new columns which we can plot now. Let run a final check if the number of rows is the same.
```
result_pca.count()
```
Cool, this works as expected. Now we obtain a sample and read each of the three columns into a python list
```
rdd = result_pca.rdd.sample(False,0.8)
x = rdd.map(lambda a : a.pcaFeatures).map(lambda a : a[0]).collect()
y = rdd.map(lambda a : a.pcaFeatures).map(lambda a : a[1]).collect()
z = rdd.map(lambda a : a.pcaFeatures).map(lambda a : a[2]).collect()
```
Finally we plot the three lists and name each of them as dimension 1-3 in the plot
```
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x,y,z, c='r', marker='o')
ax.set_xlabel('dimension1')
ax.set_ylabel('dimension2')
ax.set_zlabel('dimension3')
plt.show()
```
Congratulations, we are done! We can see two clusters in the data set. We can also see a third cluster which either can be outliers or a real cluster. In the next course we will actually learn how to compute clusters automatically. For now we know that the data indicates that there are two semi-stable states of the machine and sometime we see some anomalies since those data points don't fit into one of the two clusters.
| github_jupyter |
```
from IPython.display import YouTubeVideo
YouTubeVideo('FPgo-hI7OiE')
```
# 如何使用和开发微信聊天机器人的系列教程
# A workshop to develop & use an intelligent and interactive chat-bot in WeChat
### WeChat is a popular social media app, which has more than 800 million monthly active users.
<img src='http://www.kudosdata.com/wp-content/uploads/2016/11/cropped-KudosLogo1.png' width=30% style="float: right;">
<img src='reference/WeChat_SamGu_QR.png' width=10% style="float: right;">
### http://www.KudosData.com
by: Sam.Gu@KudosData.com
May 2017 ========== Scan the QR code to become trainer's friend in WeChat ========>>
### 第二课:图像识别和处理
### Lesson 2: Image Recognition & Processing
* 识别图片消息中的物体名字 (Recognize objects in image)
[1] 物体名 (General Object)
[2] 地标名 (Landmark Object)
[3] 商标名 (Logo Object)
* 识别图片消息中的文字 (OCR: Extract text from image)
包含简单文本翻译 (Call text translation API)
* 识别人脸 (Recognize human face)
基于人脸的表情来识别喜怒哀乐等情绪 (Identify sentiment and emotion from human face)
* 不良内容识别 (Explicit Content Detection)
### Using Google Cloud Platform's Machine Learning APIs
From the same API console, choose "Dashboard" on the left-hand menu and "Enable API".
Enable the following APIs for your project (search for them) if they are not already enabled:
<ol>
<li> Google Translate API </li>
<li> Google Cloud Vision API </li>
<li> Google Natural Language API </li>
<li> Google Cloud Speech API </li>
</ol>
Finally, because we are calling the APIs from Python (clients in many other languages are available), let's install the Python package (it's not installed by default on Datalab)
```
# Copyright 2016 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# !pip install --upgrade google-api-python-client
```
### 导入需要用到的一些功能程序库:
```
import io, os, subprocess, sys, time, datetime, requests, itchat
from itchat.content import *
from googleapiclient.discovery import build
```
### Using Google Cloud Platform's Machine Learning APIs
First, visit <a href="http://console.cloud.google.com/apis">API console</a>, choose "Credentials" on the left-hand menu. Choose "Create Credentials" and generate an API key for your application. You should probably restrict it by IP address to prevent abuse, but for now, just leave that field blank and delete the API key after trying out this demo.
Copy-paste your API Key here:
```
# Here I read in my own API_KEY from a file, which is not shared in Github repository:
# with io.open('../../API_KEY.txt') as fp:
# for line in fp: APIKEY = line
# You need to un-comment below line and replace 'APIKEY' variable with your own GCP API key:
APIKEY='AIzaSyCvxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# Below is for GCP Language Tranlation API
service = build('translate', 'v2', developerKey=APIKEY)
```
### 图片二进制base64码转换 (Define image pre-processing functions)
```
# Import the base64 encoding library.
import base64
# Pass the image data to an encoding function.
def encode_image(image_file):
with open(image_file, "rb") as image_file:
image_content = image_file.read()
return base64.b64encode(image_content)
```
### 机器智能API接口控制参数 (Define control parameters for API)
```
# control parameter for Image API:
parm_image_maxResults = 10 # max objects or faces to be extracted from image analysis
# control parameter for Language Translation API:
parm_translation_origin_language = '' # original language in text: to be overwriten by TEXT_DETECTION
parm_translation_target_language = 'zh' # target language for translation: Chinese
```
### * 识别图片消息中的物体名字 (Recognize objects in image)
[1] 物体名 (General Object)
```
# Running Vision API
# 'LABEL_DETECTION'
def KudosData_LABEL_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 物体识别 ]\n'
# 'LABEL_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['labelAnnotations'])):
image_analysis_reply += responses['responses'][0]['labelAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['labelAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### * 识别图片消息中的物体名字 (Recognize objects in image)
[2] 地标名 (Landmark Object)
```
# Running Vision API
# 'LANDMARK_DETECTION'
def KudosData_LANDMARK_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 地标识别 ]\n'
# 'LANDMARK_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['landmarkAnnotations'])):
image_analysis_reply += responses['responses'][0]['landmarkAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['landmarkAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### * 识别图片消息中的物体名字 (Recognize objects in image)
[3] 商标名 (Logo Object)
```
# Running Vision API
# 'LOGO_DETECTION'
def KudosData_LOGO_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 商标识别 ]\n'
# 'LOGO_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['logoAnnotations'])):
image_analysis_reply += responses['responses'][0]['logoAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['logoAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### * 识别图片消息中的文字 (OCR: Extract text from image)
```
# Running Vision API
# 'TEXT_DETECTION'
def KudosData_TEXT_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 文字提取 ]\n'
# 'TEXT_DETECTION'
if responses['responses'][0] != {}:
image_analysis_reply += u'----- Start Original Text -----\n'
image_analysis_reply += u'( Original Language 原文: ' + responses['responses'][0]['textAnnotations'][0]['locale'] \
+ ' )\n'
image_analysis_reply += responses['responses'][0]['textAnnotations'][0]['description'] + '----- End Original Text -----\n'
##############################################################################################################
# translation of detected text #
##############################################################################################################
parm_translation_origin_language = responses['responses'][0]['textAnnotations'][0]['locale']
# Call translation if parm_translation_origin_language is not parm_translation_target_language
if parm_translation_origin_language != parm_translation_target_language:
inputs=[responses['responses'][0]['textAnnotations'][0]['description']] # TEXT_DETECTION OCR results only
outputs = service.translations().list(source=parm_translation_origin_language,
target=parm_translation_target_language, q=inputs).execute()
image_analysis_reply += u'\n----- Start Translation -----\n'
image_analysis_reply += u'( Target Language 译文: ' + parm_translation_target_language + ' )\n'
image_analysis_reply += outputs['translations'][0]['translatedText'] + '\n' + '----- End Translation -----\n'
print('Compeleted: Translation API ...')
##############################################################################################################
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### * 识别人脸 (Recognize human face)
### * 基于人脸的表情来识别喜怒哀乐等情绪 (Identify sentiment and emotion from human face)
```
# Running Vision API
# 'FACE_DETECTION'
def KudosData_FACE_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 面部表情 ]\n'
# 'FACE_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['faceAnnotations'])):
image_analysis_reply += u'----- No.' + str(i+1) + ' Face -----\n'
image_analysis_reply += u'>>> Joy 喜悦: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'joyLikelihood'] + '\n'
image_analysis_reply += u'>>> Anger 愤怒: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'angerLikelihood'] + '\n'
image_analysis_reply += u'>>> Sorrow 悲伤: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'sorrowLikelihood'] + '\n'
image_analysis_reply += u'>>> Surprise 惊奇: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'surpriseLikelihood'] + '\n'
image_analysis_reply += u'>>> Headwear 头饰: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'headwearLikelihood'] + '\n'
image_analysis_reply += u'>>> Blurred 模糊: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'blurredLikelihood'] + '\n'
image_analysis_reply += u'>>> UnderExposed 欠曝光: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'underExposedLikelihood'] + '\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### * 不良内容识别 (Explicit Content Detection)
Detect explicit content like adult content or violent content within an image.
```
# Running Vision API
# 'SAFE_SEARCH_DETECTION'
def KudosData_SAFE_SEARCH_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 不良内容 ]\n'
# 'SAFE_SEARCH_DETECTION'
if responses['responses'][0] != {}:
image_analysis_reply += u'>>> Adult 成人: \n' + responses['responses'][0]['safeSearchAnnotation'][u'adult'] + '\n'
image_analysis_reply += u'>>> Violence 暴力: \n' + responses['responses'][0]['safeSearchAnnotation'][u'violence'] + '\n'
image_analysis_reply += u'>>> Spoof 欺骗: \n' + responses['responses'][0]['safeSearchAnnotation'][u'spoof'] + '\n'
image_analysis_reply += u'>>> Medical 医疗: \n' + responses['responses'][0]['safeSearchAnnotation'][u'medical'] + '\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
```
### 用微信App扫QR码图片来自动登录
```
itchat.auto_login(hotReload=True) # hotReload=True: 退出程序后暂存登陆状态。即使程序关闭,一定时间内重新开启也可以不用重新扫码。
# itchat.auto_login(enableCmdQR=-2) # enableCmdQR=-2: 命令行显示QR图片
# @itchat.msg_register([PICTURE], isGroupChat=True)
@itchat.msg_register([PICTURE])
def download_files(msg):
parm_translation_origin_language = 'zh' # will be overwriten by TEXT_DETECTION
msg.download(msg.fileName)
print('\nDownloaded image file name is: %s' % msg['FileName'])
image_base64 = encode_image(msg['FileName'])
##############################################################################################################
# call image analysis APIs #
##############################################################################################################
image_analysis_reply = u'[ Image Analysis 图像分析结果 ]\n'
# 1. LABEL_DETECTION:
image_analysis_reply += KudosData_LABEL_DETECTION(image_base64, 'LABEL_DETECTION', parm_image_maxResults)
# 2. LANDMARK_DETECTION:
image_analysis_reply += KudosData_LANDMARK_DETECTION(image_base64, 'LANDMARK_DETECTION', parm_image_maxResults)
# 3. LOGO_DETECTION:
image_analysis_reply += KudosData_LOGO_DETECTION(image_base64, 'LOGO_DETECTION', parm_image_maxResults)
# 4. TEXT_DETECTION:
image_analysis_reply += KudosData_TEXT_DETECTION(image_base64, 'TEXT_DETECTION', parm_image_maxResults)
# 5. FACE_DETECTION:
image_analysis_reply += KudosData_FACE_DETECTION(image_base64, 'FACE_DETECTION', parm_image_maxResults)
# 6. SAFE_SEARCH_DETECTION:
image_analysis_reply += KudosData_SAFE_SEARCH_DETECTION(image_base64, 'SAFE_SEARCH_DETECTION', parm_image_maxResults)
print('Compeleted: Image Analysis API ...')
return image_analysis_reply
itchat.run()
# interupt kernel, then logout
itchat.logout() # 安全退出
```
### 恭喜您!已经完成了:
### 第二课:图像识别和处理
### Lesson 2: Image Recognition & Processing
* 识别图片消息中的物体名字 (Recognize objects in image)
[1] 物体名 (General Object)
[2] 地标名 (Landmark Object)
[3] 商标名 (Logo Object)
* 识别图片消息中的文字 (OCR: Extract text from image)
包含简单文本翻译 (Call text translation API)
* 识别人脸 (Recognize human face)
基于人脸的表情来识别喜怒哀乐等情绪 (Identify sentiment and emotion from human face)
* 不良内容识别 (Explicit Content Detection)
### 下一课是:
### 第三课:自然语言处理:语音合成和识别
### Lesson 3: Natural Language Processing 1
* 消息文字转成语音 (Speech synthesis: text to voice)
* 语音转换成消息文字 (Speech recognition: voice to text)
* 消息文字的多语言互译 (Text based language translation)
<img src='http://www.kudosdata.com/wp-content/uploads/2016/11/cropped-KudosLogo1.png' width=30% style="float: right;">
<img src='reference/WeChat_SamGu_QR.png' width=10% style="float: left;">
| github_jupyter |
# Monte-Carlo Method
## Install External Libraries
```
!pip install PyPortfolioOpt
!pip install yfinance
```
## Import Dependencies
```
import matplotlib
import pypfopt
import datetime
import math
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
```
## Set Local Variables
```
risk_free_rate = 0.06501 # Risk-free rate of return, 10Yr Treasury Bond Yield
trading_days = 240 # No of trading days in a year
start_date = datetime.date(2000, 1, 1) # Oldest date to be considered for price analysis
investment_date = datetime.date(2019, 1, 1) # Date when funds are invested
investment_end = datetime.date(2020, 5, 1) # Date of end of investment period
asset_universe = ['HDFC.BO', 'BAJFINANCE.BO', 'SBIN.BO', 'TITAN.BO', 'HDFCBANK.BO',
'HEROMOTOCO.BO', 'INFY.BO', 'KOTAKBANK.BO', 'ONGC.BO', 'RELIANCE.BO',
'TATASTEEL.BO', 'LT.BO', 'M&M.BO', 'HINDUNILVR.BO', 'NESTLEIND.BO',
'ASIANPAINT.BO', 'ITC.BO', 'SUNPHARMA.BO', 'ICICIBANK.BO', 'INDUSINDBK.BO',
'AXISBANK.BO', 'HCLTECH.BO', 'BHARTIARTL.BO', 'MARUTI.BO','ULTRACEMCO.BO',
'TCS.BO', 'NTPC.BO', 'TECHM.BO', 'POWERGRID.BO','BAJAJ-AUTO.BO'
]
```
## Process Historical Data
```
# Download historical closing prices
sensex30_history = yf.download(asset_universe, start=start_date, end=investment_end, auto_adjust=True).loc[:, 'Close']
sensex_history = yf.download('^BSESN', start=start_date, end=investment_end, auto_adjust=True).loc[:, 'Close']
# Rename data columns
rename_col_dict = dict()
for ticker in sensex30_history.columns:
rename_col_dict[ticker] = ticker[:-3]
sensex30_history = sensex30_history.rename(columns=rename_col_dict)
```
## Compute and Plot Expected Returns
```
# Compute expected returns
sensex30_prices = sensex30_history.loc[start_date:investment_date , :].copy()
# Compute annualised mean daily returns of 30 stocks using data since 2009
mean_daily_returns = sensex30_prices.pct_change().dropna(how="all").mean()
# Annualise mean daily returns via compounding
expected_returns = (1 + mean_daily_returns) ** trading_days - 1
# print(type(expected_returns))
# Plot Expected Returns
plt.style.use("seaborn-whitegrid")
ax = expected_returns.plot(kind="barh")
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 0.5))
plt.grid(True)
plt.ylabel("Stock Tickers")
plt.xlabel("Expected Return")
plt.show()
```
## Compute and Plot Co-variance Matrix
```
# Compute Covariance Matrix
cov_matrix = pypfopt.risk_models.CovarianceShrinkage(sensex30_prices, frequency=trading_days).ledoit_wolf(shrinkage_target="constant_correlation")
# Convert Covariance Matrix to Correlation Matrix
inv_diag = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr_matrix = pd.DataFrame(np.dot(inv_diag, np.dot(cov_matrix, inv_diag)), index=cov_matrix.index, columns=cov_matrix.columns)
# Plot Covariance Matrix Heatmap
plt.style.use("default")
fig, ax = plt.subplots()
corr_ax = ax.imshow(corr_matrix, cmap='magma')
fig.colorbar(corr_ax)
ax.set_xticks(np.arange(0, corr_matrix.shape[0], 1))
ax.set_xticklabels(corr_matrix.index)
ax.set_yticks(np.arange(0, corr_matrix.shape[0], 1))
ax.set_yticklabels(corr_matrix.index)
plt.xticks(rotation=90)
plt.show()
```
## Monte-Carlo Simulations
```
num_iterations = 1000000 # Number of simulations to be run
simulation_results = np.zeros((33 , num_iterations)) # Array to store the simulation results
# Simulate random weights and normalize them to ensure their aggregate is 1
for i in range(num_iterations):
weights = np.array(np.random.random(30))
weights /= np.sum(weights)
portfolio_return = np.sum(expected_returns * weights)
portfolio_stddev = np.sqrt(np.dot(weights.T,np.dot(cov_matrix, weights)))
simulation_results[0,i] = portfolio_return
simulation_results[1,i] = portfolio_stddev
simulation_results[2,i] = simulation_results[0,i] / simulation_results[1,i]
for j in range(len(weights)):
simulation_results[j+3,i] = weights[j]
stock_list = list(sensex30_history.columns)
columns = ['return','stdev','sharpe'] + stock_list
simulation_df = pd.DataFrame(simulation_results.T, columns=columns)
```
## Optimal Portfolio: Maximum Sharpe Ratio
```
max_sharpe_portfolio = simulation_df.iloc[simulation_df['sharpe'].idxmax()]
max_sharpe_nonzero_wts = dict()
for key, val in max_sharpe_portfolio.iloc[3:].items():
if val != 0:
max_sharpe_nonzero_wts[key] = val
print(max_sharpe_nonzero_wts)
# max_sharpe_nonzero_wts = {'ASIANPAINT': 0.07343238490348997, 'AXISBANK': 0.008612974885070648, 'BAJAJ-AUTO': 0.03813084499118751, 'BAJFINANCE': 0.061185912560794316, 'BHARTIARTL': 0.004593054770775295, 'HCLTECH': 0.02086067944574963, 'HDFC': 0.03605937751872655, 'HDFCBANK': 0.040425363142332506, 'HEROMOTOCO': 0.06362023999177947, 'HINDUNILVR': 0.0013520625386036853, 'ICICIBANK': 0.043585316890703024, 'INDUSINDBK': 0.004503803481489264, 'INFY': 0.08042114124561395, 'ITC': 0.053653646551070425, 'KOTAKBANK': 0.024905588185587458, 'LT': 0.00534836152674258, 'M&M': 0.02682813480887665, 'MARUTI': 0.002167708621866709, 'NESTLEIND': 0.06076848889180933, 'NTPC': 0.015182944531474951, 'ONGC': 0.05442929888657013, 'POWERGRID': 0.03257005691129782, 'RELIANCE': 0.015565039461076551, 'SBIN': 0.05649540175373632, 'SUNPHARMA': 0.04901544405907509, 'TATASTEEL': 0.007102437112315602, 'TCS': 0.06347439598854171, 'TECHM': 0.024771014628683825, 'TITAN': 0.0016933222736356726, 'ULTRACEMCO': 0.02924555944132324}
# Plot Maximum Sharpe Ratio Portfolio
plt.style.use("default")
plt.pie(max_sharpe_nonzero_wts.values(), labels=max_sharpe_nonzero_wts.keys(), autopct="%.2f%%", pctdistance=0.8)
plt.show()
```
## Optimal Portfolio: Min Volatility
```
min_volatility_portfolio = simulation_df.iloc[simulation_df['stdev'].idxmin()]
min_volatility_nonzero_wts = dict()
for key, val in min_volatility_portfolio.iloc[3:].items():
if val != 0:
min_volatility_nonzero_wts[key] = val
print(min_volatility_nonzero_wts)
# min_volatility_nonzero_wts = {'ASIANPAINT': 0.01965804063580751, 'AXISBANK': 0.07921900481905146, 'BAJAJ-AUTO': 0.04732795120160519, 'BAJFINANCE': 0.006120193243540219, 'BHARTIARTL': 0.06385857271334033, 'HCLTECH': 0.020179076198290534, 'HDFC': 0.020976476571361927, 'HDFCBANK': 0.0031102360667119348, 'HEROMOTOCO': 0.06424486152873407, 'HINDUNILVR': 0.0835648551821641, 'ICICIBANK': 0.02450419620045124, 'INDUSINDBK': 0.0023788405507441305, 'INFY': 0.0349505328171844, 'ITC': 0.005338397125845731, 'KOTAKBANK': 0.015183855668722742, 'LT': 0.02607057640222921, 'M&M': 0.007845254228140591, 'MARUTI': 0.04593207681819233, 'NESTLEIND': 0.051963769101888095, 'NTPC': 0.04613073193245974, 'ONGC': 0.016806068861266624, 'POWERGRID': 0.04827795057037961, 'RELIANCE': 0.06797010969697301, 'SBIN': 0.01728353751669665, 'SUNPHARMA': 0.06417051300273635, 'TATASTEEL': 0.010970783283510551, 'TCS': 0.008782654701519997, 'TECHM': 0.02575905660883659, 'TITAN': 0.00041541287572671064, 'ULTRACEMCO': 0.0710064138758882}
# Plot Minimum Volatility Ratio Portfolio
plt.style.use("default")
plt.pie(min_volatility_nonzero_wts.values(), labels=min_volatility_nonzero_wts.keys(), autopct="%.2f%%", pctdistance=0.8)
plt.show()
```
## Scatter Plot For Simulated Portfolios
```
# Plot Scatter Plot For Monte-Carlo Simulated Portfolio
plt.style.use("seaborn-whitegrid")
plt.xlabel('Standard Deviation')
plt.ylabel('Returns')
plt.scatter(simulation_df['stdev']**2, simulation_df['return'], c=simulation_df['sharpe'], cmap="gray")
max_sharpe_pt = plt.scatter(max_sharpe_portfolio[1]**2, max_sharpe_portfolio[0],color='r', s=100)
min_volatility_pt = plt.scatter(min_volatility_portfolio[1]**2, min_volatility_portfolio[0], color='b', s=100)
plt.grid(True)
plt.legend((max_sharpe_pt, min_volatility_pt), ('Max Sharpe', 'Min Volatility'), loc='best')
plt.ylabel("Expected Returns")
plt.xlabel("Risk")
plt.show()
```
## Performance Analysis of Monte Carlo Method
```
# Slice price datasets to required time-ranges
sensex_prices = sensex_history.loc[investment_date:investment_end].copy()
sensex30_prices = sensex30_history.loc[investment_date:investment_end].copy()
# Create dataframe to analyse portfolio performance
portfolio_performance = pd.DataFrame(index=sensex_prices.index)
# Import Sensex benchmark price value data
portfolio_performance['Sensex'] = sensex_prices
# Import Max Sharpe portfolio price value
# max_sharpe_nonzero_wts = {'ASIANPAINT': 0.07343238490348997, 'AXISBANK': 0.008612974885070648, 'BAJAJ-AUTO': 0.03813084499118751, 'BAJFINANCE': 0.061185912560794316, 'BHARTIARTL': 0.004593054770775295, 'HCLTECH': 0.02086067944574963, 'HDFC': 0.03605937751872655, 'HDFCBANK': 0.040425363142332506, 'HEROMOTOCO': 0.06362023999177947, 'HINDUNILVR': 0.0013520625386036853, 'ICICIBANK': 0.043585316890703024, 'INDUSINDBK': 0.004503803481489264, 'INFY': 0.08042114124561395, 'ITC': 0.053653646551070425, 'KOTAKBANK': 0.024905588185587458, 'LT': 0.00534836152674258, 'M&M': 0.02682813480887665, 'MARUTI': 0.002167708621866709, 'NESTLEIND': 0.06076848889180933, 'NTPC': 0.015182944531474951, 'ONGC': 0.05442929888657013, 'POWERGRID': 0.03257005691129782, 'RELIANCE': 0.015565039461076551, 'SBIN': 0.05649540175373632, 'SUNPHARMA': 0.04901544405907509, 'TATASTEEL': 0.007102437112315602, 'TCS': 0.06347439598854171, 'TECHM': 0.024771014628683825, 'TITAN': 0.0016933222736356726, 'ULTRACEMCO': 0.02924555944132324}
max_sharpe_val = pd.Series(index=sensex30_prices.index)
for index in sensex30_prices.index:
val = 0
for stock, wt in max_sharpe_nonzero_wts.items():
val = val + wt * sensex30_prices.loc[index, stock]
max_sharpe_val[index] = val
portfolio_performance['Max Sharpe'] = max_sharpe_val
# Import Min Volatility portfolio price value
# min_volatility_nonzero_wts = {'ASIANPAINT': 0.01965804063580751, 'AXISBANK': 0.07921900481905146, 'BAJAJ-AUTO': 0.04732795120160519, 'BAJFINANCE': 0.006120193243540219, 'BHARTIARTL': 0.06385857271334033, 'HCLTECH': 0.020179076198290534, 'HDFC': 0.020976476571361927, 'HDFCBANK': 0.0031102360667119348, 'HEROMOTOCO': 0.06424486152873407, 'HINDUNILVR': 0.0835648551821641, 'ICICIBANK': 0.02450419620045124, 'INDUSINDBK': 0.0023788405507441305, 'INFY': 0.0349505328171844, 'ITC': 0.005338397125845731, 'KOTAKBANK': 0.015183855668722742, 'LT': 0.02607057640222921, 'M&M': 0.007845254228140591, 'MARUTI': 0.04593207681819233, 'NESTLEIND': 0.051963769101888095, 'NTPC': 0.04613073193245974, 'ONGC': 0.016806068861266624, 'POWERGRID': 0.04827795057037961, 'RELIANCE': 0.06797010969697301, 'SBIN': 0.01728353751669665, 'SUNPHARMA': 0.06417051300273635, 'TATASTEEL': 0.010970783283510551, 'TCS': 0.008782654701519997, 'TECHM': 0.02575905660883659, 'TITAN': 0.00041541287572671064, 'ULTRACEMCO': 0.0710064138758882}
min_volatility_val = pd.Series(index=sensex30_prices.index)
for index in sensex30_prices.index:
val = 0
for stock, wt in min_volatility_nonzero_wts.items():
val = val + wt * sensex30_prices.loc[index, stock]
min_volatility_val[index] = val
portfolio_performance['Min Volatility'] = min_volatility_val
# Compute and Plot Relative Portfolio Performance
portfolio_performance = portfolio_performance.pct_change()
# Normalise data to measure relative percentage change over time
for label, content in portfolio_performance.iteritems():
for index, value in content.iteritems():
if math.isnan(value):
portfolio_performance.loc[index, label] = 100
prev_index = index
else:
portfolio_performance.loc[index, label] = portfolio_performance.loc[prev_index, label] * (1 + value)
prev_index = index
# Plot Relative Performance Graph
plt.style.use('seaborn-whitegrid')
portfolio_performance.plot(grid=True)
plt.ylabel("Relative Value")
plt.show()
# Print LaTeX-friendly portfolio composition
for ticker in sensex30_prices.columns.values:
print(ticker + ' & ' +
(str(round(max_sharpe_nonzero_wts[ticker]*100, 2)) if ticker in max_sharpe_nonzero_wts else 0.00)
+ '\% & '
+ (str(round(min_volatility_nonzero_wts[ticker]*100, 2)) if ticker in min_volatility_nonzero_wts else 0.00)
+ '\% \\\\' )
```
| github_jupyter |
```
import pandas as pd
import json
import numpy as np
megye={'Fehér':'ALBA', 'Arad':'ARAD', 'Bukarest':'B', 'Bákó':'BACAU', 'Bihar':'BIHOR', 'Beszterce-Naszód':'BISTRITA-NASAUD',
'Brassó':'BRASOV', 'Kolozs':'CLUJ', 'Kovászna':'COVASNA', 'Krassó-Szörény':'CARAS-SEVERIN', 'Hunyad':'HUNEDOARA',
'Hargita':'HARGHITA', 'Máramaros':'MARAMURES', 'Maros':'MURES', 'Szeben':'SIBIU', 'Szatmár':'SATU MARE', 'Szilágy':'SALAJ',
'Temes':'TIMIS'}
ro={'Ă':'A','Ş':'S','Â':'A','Ș':'S','Ț':'T','Â':'A','Î':'I','Ă':'A','Ţ':'T','-':' ','SC.GEN.':'','I VIII':''}
def roman(s):
return replacer(s,ro)
```
Load processed geocoded db
```
data=pd.read_excel('data/clean/erdely6.xlsx').drop('Unnamed: 0',axis=1)
data['guess_scores']=abs(data['guess_scores']).replace(0,50)
data.index=data['Denumire'].astype(str)+' '+data['Localitate'].astype(str)+', '+data['Localitate superioară'].astype(str)+', '+\
data['Stradă'].astype(str)+' nr. '+data['Număr'].astype(str)+', '+data['Cod poștal'].astype(str).str[:-2]+', '+\
data['Judet'].astype(str)+', ROMANIA'
geo=pd.read_excel('data/clean/geo.xlsx').drop('Unnamed: 0',axis=1).set_index('index')
geo['telepules_g']=geo['telepules']
geo=geo.drop('telepules',axis=1)
data=data.join(geo)
hun_city={i:i for i in np.sort(list(data['varos'].unique()))}
open('data/geo/hun_city.json','w',encoding='utf8').write(json.dumps(hun_city,ensure_ascii=False))
pd.DataFrame(data['varos'].unique()).to_excel('data/geo/geo.xlsx')
# pd.DataFrame(data['varos'].unique()).to_excel('data/geo/geo_manual.xlsx')
```
Manually edit and fix, then load back
```
geom=list(pd.read_excel('data/geo/geo_manual.xlsx').drop('Unnamed: 0',axis=1)[0].unique())
geom=data[['telepules','varos']].set_index('varos').loc[geom].reset_index().set_index('telepules')
geom.columns=['varos_geo']
#can't join, no judet
```
Geocode from Szekelydata DB
```
hun=json.loads(open('data/geo/hun2.json','r').read())
hdf=pd.DataFrame(hun).stack().reset_index().set_index('level_1').join(pd.DataFrame(megye,index=['level_1']).T.reset_index().reset_index().set_index('level_1').drop('level_0',axis=1))
hdf.columns=['telepules','telepules_hun','Megye']
hdf.index=hdf['Megye']+'+'+hdf['telepules']
data.index=data['Megye']+'+'+data['telepules']
data=data.join(hdf['telepules_hun'])
data['telepules_hun']=data[['varos','telepules_hun']].T.ffill().T['telepules_hun']
gata=data[['ID','Év','Megye', 'telepules','telepules_hun','guessed_names2', 'guess_scores','Név','Típus', 'Profil',
'Óvodás csoportok összesen',
'Óvodások összesen', 'Kiscsoportok száma', 'Kiscsoportosok',
'Középcsoportok száma', 'Középcsoportosok', 'Nagycsoportok száma',
'Nagycsoportosok', 'Vegyes csoportok száma', 'Vegyescsoportosok',
'Tanítók összesen', 'Képzett tanítók', 'Képzetlen tanítók',
'Elemi osztályok összesen', 'Elemisek összesen',
'Előkészítő osztályok száma', 'Előkészítő osztályosok',
'1. osztályok száma', '1. osztályosok', '2. osztályok száma',
'2. osztályosok', '3. osztályok száma', '3. osztályosok',
'4. osztályok száma', '4. osztályosok', 'Általános osztályok összesen',
'Általánososok összesen', '5. osztályok száma', '5. osztályosok',
'6. osztályok száma', '6. osztályosok', '7. osztályok száma',
'7. osztályosok', '8. osztályok száma', '8. osztályosok',
'Középiskolai osztályok összesen', 'Középiskolások összesen',
'9. osztályok száma', '9. osztályosok', '10. osztályok száma',
'10. osztályosok', '11. osztályok száma', '11. osztályosok',
'12. osztályok száma', '12. osztályosok', '13. osztályok száma',
'13. osztályosok', '14. osztályok száma', '14. osztályosok','Továbbtanulás', 'Iskolabusz',
'Cod SIIIR', 'Cod SIRUES', 'Denumire scurtă', 'Denumire', 'Localitate',
'Localitate superioară', 'Stradă', 'Număr', 'Cod poștal', 'Statut',
'Tip unitate', 'Unitate PJ', 'Mod funcționare', 'Formă de finanțare',
'Formă de proprietate', 'Cod fiscal', 'Judet', 'Data modificării',
'Data acreditării', 'Data intrării în vigoare', 'Data închiderii',
'Telefon', 'Fax', 'Adresa email', 'nev', 'telepules_g','varos','cim', 'koordinata', 'telefon', 'web', 'maps', 'kep',
]]
gata.columns=['ID','Év','Megye', 'Település (eredeti)','Település (magyar VÁZLAT)','Név (normalizált)', 'Adatok megbízhatósága',
'Név (eredeti)','Típus (VÁZLAT)', 'Profil (VÁZLAT)',
'Óvodás csoportok összesen',
'Óvodások összesen', 'Kiscsoportok száma', 'Kiscsoportosok',
'Középcsoportok száma', 'Középcsoportosok', 'Nagycsoportok száma',
'Nagycsoportosok', 'Vegyes csoportok száma', 'Vegyescsoportosok',
'Tanítók összesen', 'Képzett tanítók', 'Képzetlen tanítók',
'Elemi osztályok összesen', 'Elemisek összesen',
'Előkészítő osztályok száma', 'Előkészítő osztályosok',
'1. osztályok száma', '1. osztályosok', '2. osztályok száma',
'2. osztályosok', '3. osztályok száma', '3. osztályosok',
'4. osztályok száma', '4. osztályosok', 'Általános osztályok összesen',
'Általánososok összesen', '5. osztályok száma', '5. osztályosok',
'6. osztályok száma', '6. osztályosok', '7. osztályok száma',
'7. osztályosok', '8. osztályok száma', '8. osztályosok',
'Középiskolai osztályok összesen', 'Középiskolások összesen',
'9. osztályok száma', '9. osztályosok', '10. osztályok száma',
'10. osztályosok', '11. osztályok száma', '11. osztályosok',
'12. osztályok száma', '12. osztályosok', '13. osztályok száma',
'13. osztályosok', '14. osztályok száma', '14. osztályosok','Továbbtanulás (VÁZLAT)', 'Iskolabusz (VÁZLAT)',
'RSH SIIIR kód', 'RSH SIRUES kód', 'RSH Rövid név', 'RSH Név', 'RSH Település',
'RSH Község', 'RSH Cím/Utca', 'RSH Cím/Szám', 'RSH Cím/Irányítószám', 'RSH Jogi forma',
'RSH Egység típusa', 'RSH Anyaintézmény', 'RSH Működési forma', 'RSH Finanszírozás',
'RSH Tulajdonviszony', 'RSH Adószám', 'RSH Megye', 'RSH Módosítva',
'RSH Akkreditálva', 'RSH Működés kezdete', 'RSH Bezárás ideje',
'RSH Telefon', 'RSH Fax', 'RSH Email', 'GOOGLE Név', 'GOOGLE Település', 'GOOGLE Község', 'GOOGLE Cím',
'GOOGLE koordináta', 'GOOGLE Telefon', 'GOOGLE weboldal', 'GOOGLE térkép', 'GOOGLE fénykép',
]
gata['Név (normalizált)']=gata['Név (normalizált)']\
.str.replace('SGMZ','ÁLTALÁNOS ISKOLA')\
.str.replace('SPRM','ELEMI ISKOLA')\
.str.replace('SPSTL','POSZTLÍCEUM')\
.str.replace('LICTEH','SZAKLÍCEUM')\
.str.replace('LISPRT','SPORTISKOLA')\
.str.replace('CLBCOP','GYEREK-KLUB')\
.str.replace('LITEOR','ELMÉLETI LÍCEUM')\
.str.replace('LIPDGA','TANÍTÓKÉPZŐ')\
.str.replace('LITOLX','TEOLÓGIAI LÍCEUM')\
.str.replace('LIARTE','MŰVÉSZETI LÍCEUM')\
.str.replace('COLGNAT','NEMZETI KOLLÉGIUM')\
.str.replace('GRDNRM','ÓVODA')\
.str.replace('GRDPLG','NAPKÖZI-OTTHON')\
.str.replace('INSPSCJ','TANFELÜGYELŐSÉG')\
.str.replace('SCSPC','SPECIÁLIS ISKOLA')
hata=gata.set_index('ID').sort_values(['Év','Megye','Település (eredeti)','Név (normalizált)'])
hata.to_excel('data/output/Erdely_draft_output.xlsx')
```
DEPRECATED - google cant translate place names
Finish up the rest with `googletrans`
```
# !pip install googletrans
from googletrans import Translator
translator = Translator()
t=translator.translate('scoala',src='ro',dest='hu')
to_translate=list((data['telepules']+', judetul '+data['Judet']+', ROMANIA').unique())[:20]
to_translate=list((data['Denumire'].astype(str)+' '+data['Localitate'].astype(str)+', '+data['Localitate superioară'].astype(str)+', '+\
data['Stradă'].astype(str)+' nr. '+data['Număr'].astype(str)+', '+data['Cod poștal'].astype(str).str[:-2]+', '+\
data['Judet'].astype(str)+', ROMANIA').unique())[:10]
translated={}
translations = translator.translate(to_translate, src='ro', dest='hu')
for translation in translations:
translated[translation.origin]=translation.text
translated
```
| github_jupyter |
# 1次元のデータの整理
## データの中心の指標
```
import numpy as np
import pandas as pd
# Jupyter Notebookの出力を小数点以下3桁に抑える
%precision 3
# Dataframeの出力を小数点以下3桁に抑える
pd.set_option('precision', 3)
df = pd.read_csv('../data/ch2_scores_em.csv',
index_col='生徒番号')
# dfの最初の5行を表示。df.head(3) にすれば3行。
df.head()
# df['英語'] は Pamdas の Series だった。
scores = np.array(df['英語'])[:10]
scores
# scores に、「生徒」という名前のインデックスをつけて、また DataFrame を作る。
scores_df = pd.DataFrame({'点数':scores},
index=pd.Index(['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J'],
name='生徒'))
scores_df
```
### 平均値
```
sum(scores) / len(scores)
np.mean(scores)
scores_df.mean()
```
### 中央値
```
sorted_scores = np.sort(scores)
sorted_scores
n = len(sorted_scores)
if n % 2 == 0:
m0 = sorted_scores[n//2 - 1]
m1 = sorted_scores[n//2]
median = (m0 + m1) / 2
else:
median = sorted_scores[(n+1)//2 - 1]
median
# データの個数が偶数なので、56,57 の平均値である 56.5 が中央値となる。
np.median(scores)
scores_df.median()
```
### 最頻値
```
pd.Series([1, 1, 1, 2, 2, 3]).mode()
pd.Series([1, 2, 3, 4, 5]).mode()
```
## データのばらつきの指標
### 分散と標準偏差
#### 偏差
```
mean = np.mean(scores)
# np.array から値を引き算している(!)。NumPy の「ブロードキャスト」という機能で実現されている。
deviation = scores - mean
deviation
another_scores = [50, 60, 58, 54, 51, 56, 57, 53, 52, 59]
another_mean = np.mean(another_scores)
another_deviation = another_scores - another_mean
another_deviation
np.mean(deviation)
np.mean(another_deviation)
summary_df = scores_df.copy()
summary_df['偏差'] = deviation
summary_df
summary_df.mean()
```
#### 分散
```
np.mean(deviation ** 2)
# NumPy なので標本分散が計算される。
np.var(scores)
# こちらは Pandas なので不偏分散。なお引数に ddof=0 を入れると標本分散になる。
scores_df.var()
summary_df['偏差二乗'] = np.square(deviation)
summary_df
# 偏差の二乗をとってその平均値を作ると傾向が見える。
summary_df.mean()
```
#### 標準偏差
```
np.sqrt(np.var(scores, ddof=0))
np.std(scores, ddof=0)
```
### 範囲と四分位範囲
#### 範囲
```
np.max(scores) - np.min(scores)
```
#### 四分位範囲
```
scores_Q1 = np.percentile(scores, 25)
scores_Q3 = np.percentile(scores, 75)
scores_IQR = scores_Q3 - scores_Q1
scores_IQR
```
### データの指標のまとめ
```
pd.Series(scores).describe()
```
## データの正規化
### 標準化
```
z = (scores - np.mean(scores)) / np.std(scores)
z
np.mean(z), np.std(z, ddof=0)
```
### 偏差値
```
z = 50 + 10 * (scores - np.mean(scores)) / np.std(scores)
scores_df['偏差値'] = z
scores_df
```
## データの視覚化
```
# 50人分の英語の点数のarray
english_scores = np.array(df['英語'])
# Seriesに変換してdescribeを表示
pd.Series(english_scores).describe()
```
### 度数分布表
```
freq, _ = np.histogram(english_scores, bins=10, range=(0, 100))
freq
# 0~10, 10~20, ... といった文字列のリストを作成
freq_class = [f'{i}~{i+10}' for i in range(0, 100, 10)]
# freq_classをインデックスにしてfreqでDataFrameを作成
freq_dist_df = pd.DataFrame({'度数':freq},
index=pd.Index(freq_class,
name='階級'))
freq_dist_df
class_value = [(i+(i+10))//2 for i in range(0, 100, 10)]
class_value
rel_freq = freq / freq.sum()
rel_freq
cum_rel_freq = np.cumsum(rel_freq)
cum_rel_freq
freq_dist_df['階級値'] = class_value
freq_dist_df['相対度数'] = rel_freq
freq_dist_df['累積相対度数'] = cum_rel_freq
freq_dist_df = freq_dist_df[['階級値', '度数',
'相対度数', '累積相対度数']]
freq_dist_df
```
#### 最頻値ふたたび
```
freq_dist_df.loc[freq_dist_df['度数'].idxmax(), '階級値']
```
### ヒストグラム
```
# Matplotlibのpyplotモジュールをpltという名前でインポート
import matplotlib.pyplot as plt
# グラフがnotebook上に表示されるようにする
%matplotlib inline
# キャンバスを作る
# figsizeで横・縦の大きさを指定
fig = plt.figure(figsize=(10, 6))
# キャンバス上にグラフを描画するための領域を作る
# 引数は領域を1×1個作り、1つめの領域に描画することを意味する
ax = fig.add_subplot(111)
# 階級数を10にしてヒストグラムを描画
freq, _, _ = ax.hist(english_scores, bins=10, range=(0, 100))
# X軸にラベルをつける
#ax.set_xlabel('点数')
ax.set_xlabel('点数')
# Y軸にラベルをつける
ax.set_ylabel('人数')
# X軸に0, 10, 20, ..., 100の目盛りをふる
ax.set_xticks(np.linspace(0, 100, 10+1))
# Y軸に0, 1, 2, ...の目盛りをふる
ax.set_yticks(np.arange(0, freq.max()+1))
# グラフの表示
plt.show()
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
freq, _ , _ = ax.hist(english_scores, bins=25, range=(0, 100))
ax.set_xlabel('点数')
ax.set_ylabel('人数')
ax.set_xticks(np.linspace(0, 100, 25+1))
ax.set_yticks(np.arange(0, freq.max()+1))
plt.show()
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(111)
# Y軸のスケールが違うグラフをax1と同じ領域上に書けるようにする
ax2 = ax1.twinx()
# 相対度数のヒストグラムにするためには、度数をデータの数で割る必要がある
# これはhistの引数weightを指定することで実現できる
weights = np.ones_like(english_scores) / len(english_scores)
rel_freq, _, _ = ax1.hist(english_scores, bins=25,
range=(0, 100), weights=weights)
cum_rel_freq = np.cumsum(rel_freq)
class_value = [(i+(i+4))//2 for i in range(0, 100, 4)]
# 折れ線グラフの描画
# 引数lsを'--'にすることで線が点線に
# 引数markerを'o'にすることでデータ点を丸に
# 引数colorを'gray'にすることで灰色に
ax2.plot(class_value, cum_rel_freq,
ls='--', marker='o', color='gray')
# 折れ線グラフの罫線を消去
ax2.grid(visible=False)
ax1.set_xlabel('点数')
ax1.set_ylabel('相対度数')
ax2.set_ylabel('累積相対度数')
ax1.set_xticks(np.linspace(0, 100, 25+1))
plt.show()
```
### 箱ひげ図
```
fig = plt.figure(figsize=(5, 6))
ax = fig.add_subplot(111)
ax.boxplot(english_scores, labels=['英語'])
plt.show()
```
| github_jupyter |
## RIHAD VARIAWA, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING
<h1 align="center"><font size="5">COLLABORATIVE FILTERING</font></h1>
Recommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. These systems have become ubiquitous can be commonly seen in online stores, movies databases and job finders. In this notebook, we will explore recommendation systems based on Collaborative Filtering and implement simple version of one using Python and the Pandas library.
<h1>Table of contents</h1>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ol>
<li><a href="#ref1">Acquiring the Data</a></li>
<li><a href="#ref2">Preprocessing</a></li>
<li><a href="#ref3">Collaborative Filtering</a></li>
</ol>
</div>
<br>
<hr>
<a id="ref1"></a>
# Acquiring the Data
To acquire and extract the data, simply run the following Bash scripts:
Dataset acquired from [GroupLens](http://grouplens.org/datasets/movielens/). Lets download the dataset. To download the data, we will use **`!wget`** to download it from IBM Object Storage.
__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
```
!wget -O moviedataset.zip https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip
print('unziping ...')
!unzip -o -j moviedataset.zip
```
Now you're ready to start working with the data!
<hr>
<a id="ref2"></a>
# Preprocessing
First, let's get all of the imports out of the way:
```
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Now let's read each file into their Dataframes:
```
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('_datasets/movies.csv')
#Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('_datasets/ratings.csv')
```
Let's also take a peek at how each of them are organized:
```
#Head is a function that gets the first N rows of a dataframe. N's default is 5.
movies_df.head()
```
So each movie has a unique ID, a title with its release year along with it (Which may contain unicode characters) and several different genres in the same field. Let's remove the year from the title column and place it into its own one by using the handy [extract](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.extract.html#pandas.Series.str.extract) function that Pandas has.
Let's remove the year from the __title__ column by using pandas' replace function and store in a new __year__ column.
```
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
```
Let's look at the result!
```
movies_df.head()
```
With that, let's also drop the genres column since we won't need it for this particular recommendation system.
```
#Dropping the genres column
movies_df = movies_df.drop('genres', 1)
```
Here's the final movies dataframe:
```
movies_df.head()
```
<br>
Next, let's look at the ratings dataframe.
```
ratings_df.head()
```
Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory.
```
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
```
Here's how the final ratings Dataframe looks like:
```
ratings_df.head()
```
<hr>
<a id="ref3"></a>
# Collaborative Filtering
Now, time to start our work on recommendation systems.
The first technique we're going to take a look at is called __Collaborative Filtering__, which is also known as __User-User Filtering__. As hinted by its alternate name, this technique uses other users to recommend items to the input user. It attempts to find users that have similar preferences and opinions as the input and then recommends items that they have liked to the input. There are several methods of finding similar users (Even some making use of Machine Learning), and the one we will be using here is going to be based on the __Pearson Correlation Function__.
<img src="https://ibm.box.com/shared/static/1ql8cbwhtkmbr6nge5e706ikzm5mua5w.png" width=800px>
The process for creating a User Based recommendation system is as follows:
- Select a user with the movies the user has watched
- Based on his rating to movies, find the top X neighbours
- Get the watched movie record of the user for each neighbour.
- Calculate a similarity score using some formula
- Recommend the items with the highest score
Let's begin by creating an input user to recommend movies to:
Notice: To add more movies, simply increase the amount of elements in the userInput. Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a "The", like "The Matrix" then write it in like this: 'Matrix, The' .
```
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = pd.DataFrame(userInput)
inputMovies
```
#### Add movieId to input user
With the input complete, let's extract the input movies's ID's from the movies dataframe and add them into it.
We can achieve this by first filtering out the rows that contain the input movies' title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space.
```
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
#Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
#Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('year', 1)
#Final input dataframe
#If a movie you added in above isn't here, then it might not be in the original
#dataframe or it might spelled differently, please check capitalisation.
inputMovies
```
#### The users who has seen the same movies
Now with the movie ID's in our input, we can now get the subset of users that have watched and reviewed the movies in our input.
```
#Filtering out users that have watched movies that the input has watched and storing it
userSubset = ratings_df[ratings_df['movieId'].isin(inputMovies['movieId'].tolist())]
userSubset.head()
```
We now group up the rows by user ID.
```
#Groupby creates several sub dataframes where they all have the same value in the column specified as the parameter
userSubsetGroup = userSubset.groupby(['userId'])
```
lets look at one of the users, e.g. the one with userID=1130
```
userSubsetGroup.get_group(1130)
```
Let's also sort these groups so the users that share the most movies in common with the input have higher priority. This provides a richer recommendation since we won't go through every single user.
```
#Sorting it so users with movie most in common with the input will have priority
userSubsetGroup = sorted(userSubsetGroup, key=lambda x: len(x[1]), reverse=True)
```
Now lets look at the first user
```
userSubsetGroup[0:3]
```
#### Similarity of users to input user
Next, we are going to compare all users (not really all !!!) to our specified user and find the one that is most similar.
we're going to find out how similar each user is to the input through the __Pearson Correlation Coefficient__. It is used to measure the strength of a linear association between two variables. The formula for finding this coefficient between sets X and Y with N values can be seen in the image below.
Why Pearson Correlation?
Pearson correlation is invariant to scaling, i.e. multiplying all elements by a nonzero constant or adding any constant to all elements. For example, if you have two vectors X and Y,then, pearson(X, Y) == pearson(X, 2 * Y + 3). This is a pretty important property in recommendation systems because for example two users might rate two series of items totally different in terms of absolute rates, but they would be similar users (i.e. with similar ideas) with similar rates in various scales .

The values given by the formula vary from r = -1 to r = 1, where 1 forms a direct correlation between the two entities (it means a perfect positive correlation) and -1 forms a perfect negative correlation.
In our case, a 1 means that the two users have similar tastes while a -1 means the opposite.
We will select a subset of users to iterate through. This limit is imposed because we don't want to waste too much time going through every single user.
```
userSubsetGroup = userSubsetGroup[0:100]
```
Now, we calculate the Pearson Correlation between input user and subset group, and store it in a dictionary, where the key is the user Id and the value is the coefficient
```
#Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is the coefficient
pearsonCorrelationDict = {}
#For every user group in our subset
for name, group in userSubsetGroup:
#Let's start by sorting the input and current user group so the values aren't mixed up later on
group = group.sort_values(by='movieId')
inputMovies = inputMovies.sort_values(by='movieId')
#Get the N for the formula
nRatings = len(group)
#Get the review scores for the movies that they both have in common
temp_df = inputMovies[inputMovies['movieId'].isin(group['movieId'].tolist())]
#And then store them in a temporary buffer variable in a list format to facilitate future calculations
tempRatingList = temp_df['rating'].tolist()
#Let's also put the current user group reviews in a list format
tempGroupList = group['rating'].tolist()
#Now let's calculate the pearson correlation between two users, so called, x and y
Sxx = sum([i**2 for i in tempRatingList]) - pow(sum(tempRatingList),2)/float(nRatings)
Syy = sum([i**2 for i in tempGroupList]) - pow(sum(tempGroupList),2)/float(nRatings)
Sxy = sum( i*j for i, j in zip(tempRatingList, tempGroupList)) - sum(tempRatingList)*sum(tempGroupList)/float(nRatings)
#If the denominator is different than zero, then divide, else, 0 correlation.
if Sxx != 0 and Syy != 0:
pearsonCorrelationDict[name] = Sxy/sqrt(Sxx*Syy)
else:
pearsonCorrelationDict[name] = 0
pearsonCorrelationDict.items()
pearsonDF = pd.DataFrame.from_dict(pearsonCorrelationDict, orient='index')
pearsonDF.columns = ['similarityIndex']
pearsonDF['userId'] = pearsonDF.index
pearsonDF.index = range(len(pearsonDF))
pearsonDF.head()
```
#### The top x similar users to input user
Now let's get the top 50 users that are most similar to the input.
```
topUsers=pearsonDF.sort_values(by='similarityIndex', ascending=False)[0:50]
topUsers.head()
```
Now, let's start recommending movies to the input user.
#### Rating of selected users to all movies
We're going to do this by taking the weighted average of the ratings of the movies using the Pearson Correlation as the weight. But to do this, we first need to get the movies watched by the users in our __pearsonDF__ from the ratings dataframe and then store their correlation in a new column called _similarityIndex". This is achieved below by merging of these two tables.
```
topUsersRating=topUsers.merge(ratings_df, left_on='userId', right_on='userId', how='inner')
topUsersRating.head()
```
Now all we need to do is simply multiply the movie rating by its weight (The similarity index), then sum up the new ratings and divide it by the sum of the weights.
We can easily do this by simply multiplying two columns, then grouping up the dataframe by movieId and then dividing two columns:
It shows the idea of all similar users to candidate movies for the input user:
```
#Multiplies the similarity by the user's ratings
topUsersRating['weightedRating'] = topUsersRating['similarityIndex']*topUsersRating['rating']
topUsersRating.head()
#Applies a sum to the topUsers after grouping it up by userId
tempTopUsersRating = topUsersRating.groupby('movieId').sum()[['similarityIndex','weightedRating']]
tempTopUsersRating.columns = ['sum_similarityIndex','sum_weightedRating']
tempTopUsersRating.head()
#Creates an empty dataframe
recommendation_df = pd.DataFrame()
#Now we take the weighted average
recommendation_df['weighted average recommendation score'] = tempTopUsersRating['sum_weightedRating']/tempTopUsersRating['sum_similarityIndex']
recommendation_df['movieId'] = tempTopUsersRating.index
recommendation_df.head()
```
Now let's sort it and see the top 20 movies that the algorithm recommended!
```
recommendation_df = recommendation_df.sort_values(by='weighted average recommendation score', ascending=False)
recommendation_df.head(10)
movies_df.loc[movies_df['movieId'].isin(recommendation_df.head(10)['movieId'].tolist())]
```
### Advantages and Disadvantages of Collaborative Filtering
##### Advantages
* Takes other user's ratings into consideration
* Doesn't need to study or extract information from the recommended item
* Adapts to the user's interests which might change over time
##### Disadvantages
* Approximation function can be slow
* There might be a low of amount of users to approximate
* Privacy issues when trying to learn the user's preferences
<h2>Want to learn more?</h2>
IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
<h3>Thanks for completing this lesson!</h3>
<h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4>
<p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
<hr>
<p>Copyright © 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
| github_jupyter |
# Melanoma analysis with fractal neural networks
This notebook shows how good is [Fractal neural network](#Fractal-neural-network) for [melanoma](#Melanoma) analysis.
```
import os
import datetime
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
```
Check if a GPU is available.
```
tf.config.list_physical_devices('GPU')
```
Remove excessive logging.
```
tf.get_logger().setLevel('ERROR')
```
# Melanoma
__Melanoma__, also redundantly known as __malignant melanoma__, is a type of skin cancer that develops from the pigment-producing cells known as melanocytes. Melanomas typically occur in the skin, but may rarely occur in the mouth, intestines, or eye (uveal melanoma). In women, they most commonly occur on the legs, while in men, they most commonly occur on the back. About 25% of melanomas develop from moles. Changes in a mole that can indicate melanoma include an increase in size, irregular edges, change in color, itchiness, or skin breakdown.

<div style="text-align: center; font-weight: bold">Pic.1. A melanoma of approximately 2.5 cm (1 in) by 1.5 cm (0.6 in)</div>
The primary cause of melanoma is ultraviolet light (UV) exposure in those with low levels of the skin pigment melanin. The UV light may be from the sun or other sources, such as tanning devices. Those with many moles, a history of affected family members, and poor immune function are at greater risk. A number of rare genetic conditions, such as xeroderma pigmentosum, also increase the risk. Diagnosis is by biopsy and analysis of any skin lesion that has signs of being potentially cancerous.
Melanoma is the most dangerous type of skin cancer. Globally, in 2012, it newly occurred in 232,000 people. In 2015, 3.1 million people had active disease, which resulted in 59,800 deaths. Australia and New Zealand have the highest rates of melanoma in the world. High rates also occur in Northern Europe and North America, while it is less common in Asia, Africa, and Latin America. In the United States, melanoma occurs about 1.6 times more often in men than women. Melanoma has become more common since the 1960s in areas mostly populated by people of European descent.
# Fractal neural network
We propose an ensemble model based on handcrafted fractal features and deep learning that consists of combining the classification of two CNNs by applying the sum rule. We apply feature extraction to obtain 300 fractal features from different
dermoscopy datasets. These features are reshaped into a 10 × 10 × 3 matrix to compose an artificial image that
is given as input to the first CNN. The second CNN model receives as input the correspondent original image.

<div style="text-align: center; font-weight: bold">Pic.2. Overview of the proposed FNN model.</div>
If you want to learn more about fractal neural networks, read [here](https://www.sciencedirect.com/science/article/abs/pii/S0957417420308563).
## Dividing images into patches.
According to the acticle:
> One of the approaches available in the literature for multiscale analysis is the gliding-box algorithm (Ivanovici & Richard, 2011). The main advantage of this approach is that it can be applied on datasets containing images with different resolutions since the output features are given in relation to the scale instead of being absolute values. This algorithm consists in placing a box $\beta_{i}$ sized $𝐿 × 𝐿$ on the left superior corner of the image, wherein 𝐿 is given in pixels. This box glides through the image, one column and then one row at a time. After reaching the end of the image, the box is repositioned at the starting point and the value of 𝐿 is increased by 2.
The gliding-box method will not be used since it consumes too much RAM. We'll employ a box-counting approach, which basically means we'll partition the images into non-overlapping chunks.
```
class Patchify(tf.keras.layers.Layer):
def __init__(self, patch_size):
super(Patchify, self).__init__()
self.patch_size = patch_size
def call(self, inputs):
outputs = tf.image.extract_patches(
inputs,
sizes=(1, self.patch_size, self.patch_size, 1),
strides=(1, self.patch_size, self.patch_size, 1),
rates=(1, 1, 1, 1),
padding='SAME'
)
_, rows, cols, _ = tf.unstack(tf.shape(outputs))
outputs = tf.reshape(outputs, shape=(-1, rows * cols, self.patch_size, self.patch_size, 3))
return outputs
```
## Creating an array of binary values from image patches using the Chebyshev colour distance function applied to the patch centre and each pixel.
According to the article:
> For each time the box $\beta_{i}$ is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector $𝑓_{c} = 𝑟_{c}, 𝑔_{c}, 𝑏_{c}$, where $𝑟_{c}, 𝑔_{c}$ and $𝑏_{c}$ correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector $𝑓_{i} = 𝑟_{i}, 𝑔_{i}, 𝑏_{i}$ and compared to the centre pixel by calculating a colour distance $\Delta$. On the proposed approach, the Chebyshev ($\Delta_{h}$) ...
The following equation is used to compute the Chebyshev distance.
$$
\Delta_{h} = max(|f_{i}(k_{i}) - f_{c}(k_{c})|), k \in r, g, b.
$$
```
class Chebyshev(tf.keras.layers.Layer):
def __init__(self):
super(Chebyshev, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size, channels = tf.unstack(tf.shape(inputs))
outputs = tf.reshape(inputs, shape=(-1, patch_number, patch_size, channels))
centers = tf.image.resize_with_crop_or_pad(outputs, 1, 1)
outputs = tf.math.subtract(outputs, centers)
outputs = tf.math.abs(outputs)
outputs = tf.math.reduce_max(outputs, axis=3)
outputs = tf.math.less_equal(outputs, tf.cast(patch_size, dtype=tf.float32))
outputs = tf.cast(outputs, dtype=tf.int32)
outputs = tf.reshape(outputs, shape=(-1, patch_number, patch_size, patch_size))
return outputs
```
## Creating an array of binary values from image patches using the Euclidean colour distance function applied to the patch centre and each pixel.
According to the article:
> For each time the box $\beta_{i}$ is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector $𝑓_{c} = 𝑟_{c}, 𝑔_{c}, 𝑏_{c}$, where $𝑟_{c}, 𝑔_{c}$ and $𝑏_{c}$ correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector $𝑓_{i} = 𝑟_{i}, 𝑔_{i}, 𝑏_{i}$ and compared to the centre pixel by calculating a colour distance $\Delta$. On the proposed approach, ... the Euclidean ($\Delta_{e}$) ...
$$
\Delta_{e} = \sqrt{\sum_{k} (f_{i}(k_{i}) - f_{c}(k_{c}))^2}, k \in r, g, b
$$
```
class Euclidean(tf.keras.layers.Layer):
def __init__(self):
super(Euclidean, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size, channels = tf.unstack(tf.shape(inputs))
outputs = tf.reshape(inputs, shape=(-1, patch_number, patch_size, channels))
centers = tf.image.resize_with_crop_or_pad(outputs, 1, 1)
outputs = tf.math.subtract(outputs, centers)
outputs = tf.math.pow(outputs, 2)
outputs = tf.math.reduce_sum(outputs, axis=3)
outputs = tf.math.pow(outputs, 0.5)
outputs = tf.math.less_equal(outputs, tf.cast(patch_size, dtype=tf.float32))
outputs = tf.cast(outputs, dtype=tf.int32)
outputs = tf.reshape(outputs, shape=(-1, patch_number, patch_size, patch_size))
return outputs
```
## Creating an array of binary values from image patches using the Manhattan colour distance function applied to the patch centre and each pixel.
According to the article:
> For each time the box $\beta_{i}$ is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector $𝑓_{c} = 𝑟_{c}, 𝑔_{c}, 𝑏_{c}$, where $𝑟_{c}, 𝑔_{c}$ and $𝑏_{c}$ correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector $𝑓_{i} = 𝑟_{i}, 𝑔_{i}, 𝑏_{i}$ and compared to the centre pixel by calculating a colour distance $\Delta$. On the proposed approach, ... the Manhattan ($\Delta_{m}$) ...
$$
\Delta_{m} = \sum_{k} |f_{i}(k_{i}) - f_{c}(k_{c})|, k \in r, g, b
$$
```
class Manhattan(tf.keras.layers.Layer):
def __init__(self):
super(Manhattan, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size, channels = tf.unstack(tf.shape(inputs))
outputs = tf.reshape(inputs, shape=(-1, patch_number, patch_size, channels))
centers = tf.image.resize_with_crop_or_pad(outputs, 1, 1)
outputs = tf.math.subtract(outputs, centers)
outputs = tf.math.abs(outputs)
outputs = tf.math.reduce_sum(outputs, axis=3)
outputs = tf.math.less_equal(outputs, tf.cast(patch_size, dtype=tf.float32))
outputs = tf.cast(outputs, dtype=tf.int32)
outputs = tf.reshape(outputs, shape=(-1, patch_number, patch_size, patch_size))
return outputs
```
## Calculating probability matrices
According to the article:
> After performing this conversion for every box of every given 𝐿 scale, a structure known as probability matrix is generated. Each element of the matrix corresponds to the probability 𝑃 that 𝑚 pixels on a scale 𝐿 are labelled as 1 on each box. ... The matrix is normalized in a way that the sum of the elements in a column is equal to 1, as showed here:
$$
\sum_{m=1}^{L^2} P(m, L) = 1, \forall L
$$
```
class Probability(tf.keras.layers.Layer):
def __init__(self):
super(Probability, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size = tf.unstack(tf.shape(inputs))
outputs = tf.math.reduce_sum(inputs, axis=(2, 3))
outputs = tf.vectorized_map(lambda image: tf.math.bincount(image, minlength=patch_size ** 2 + 1), outputs)
outputs = tf.math.divide(outputs, patch_number)
return outputs
```
## Calculating fractal dimensions
According to the article:
> FD is the most common technique to evaluate the fractal properties of an image. This is a measure for evaluating the irregularity and the complexity of a fractal. To obtain local FD features from the probability
matrix, for each value of 𝐿, the FD denominated 𝐷(𝐿) is calculated according to
$$
D(L) = \sum_{m=1}^{L^2} \frac{P(m, L)}{m}
$$
```
class FractalDimension(tf.keras.layers.Layer):
def __init__(self):
super(FractalDimension, self).__init__()
def call(self, inputs):
batch_size, _len = tf.unstack(tf.shape(inputs))
numbers = tf.reshape(
tf.concat(
[tf.constant([1], dtype=tf.float32), tf.range(1, _len, dtype=tf.float32)],
axis=0
),
shape=(1, -1)
)
outputs = tf.math.divide(inputs, numbers)
outputs = tf.math.reduce_sum(outputs, axis=1)
return outputs
```
## Calculating lacunarity
According to the article:
> LAC is a measure complementary to FD and allows to evaluate how the space of a fractal is filled (Ivanovici & Richard, 2009). From the probability matrix, first and second-order moments are calculated with
$$
\mu(L) = \sum_{m=1}^{L^2} mP(m, L)
$$
$$
\mu^2(L) = \sum_{m=1}^{L^2} m^{2}P(m, L)
$$
> The LAC value for a scale $L$ is given by $\Lambda$(𝐿), which is obtained according to
$$
\Lambda(L) = \frac{\mu^{2}(L) - (\mu(L))^{2}}{(\mu(L))^{2}}
$$
```
class Lacunarity(tf.keras.layers.Layer):
def __init__(self):
super(Lacunarity, self).__init__()
def call(self, inputs):
batch_size, _len = tf.unstack(tf.shape(inputs))
numbers = tf.reshape(
tf.concat(
[tf.constant([1], dtype=tf.float32), tf.range(1, _len, dtype=tf.float32)],
axis=0
),
shape=(1, -1)
)
mu_first_2 = tf.math.multiply(inputs, numbers)
mu_first_2 = tf.math.reduce_sum(mu_first_2, axis=1)
mu_first_2 = tf.math.pow(mu_first_2, 2)
mu_second = tf.math.pow(numbers, 2)
mu_second = tf.math.multiply(inputs, mu_second)
mu_second = tf.math.reduce_sum(mu_second, axis=1)
outputs = tf.math.divide(
tf.math.subtract(mu_second, mu_first_2),
mu_first_2
)
return outputs
```
## Calculating percolation Q - the average occurrence of percolation on a scale L
According to the article:
> We can also verify whether a box $\beta_{i}$ is percolating. This can be achieved due to a property that states a percolation threshold for different types of structures. In squared matrices (digital images), this threshold has the value of $p = 0.59275$, which means that if the ratio between pixels labelled as 1 and pixels labelled as 0 is greater or equal than $p$, the matrix is considered as percolating. Let $\Omega_{i}$ be the number of pixels labelled as 1 in a box $\beta_{i}$ with size $L \times L $ , we determine whether such box is percolating according to
$$
q_{i} =
\begin{cases}
1, & \frac{\Omega_{i}}{L^2} \ge 0.59275 \\
0, & \frac{\Omega_{i}}{L^2} < 0.59275
\end{cases}
$$
> This results in a binary value for $q_{i}$, wherein 1 indicates that thebox is percolating. The feature $Q(L)$ regards the average occurrence of percolation on a scale $L$ and can be obtained by
$$
Q(L) = \frac{\sum_{i=1}^{T(L)} q_{i}}{T(L)}
$$
```
class PercolationQ(tf.keras.layers.Layer):
def __init__(self, threshold=0.59275):
super(PercolationQ, self).__init__()
self.threshold = threshold
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size = tf.unstack(tf.shape(inputs))
outputs = tf.math.reduce_sum(inputs, axis=(2, 3))
outputs = tf.math.divide(outputs, patch_size ** 2)
outputs = tf.math.greater_equal(outputs, self.threshold)
outputs = tf.cast(outputs, dtype=tf.float32)
outputs = tf.math.reduce_mean(outputs, axis=1)
return outputs
```
## Clustering values in binarized patches
The next two layers, which calculate percolation C and M, work with value clusters. We clustorize values in a separate layer to speed up calculations.
```
class Clusterize(tf.keras.layers.Layer):
def __init__(self):
super(Clusterize, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size = tf.unstack(tf.shape(inputs))
outputs = tf.reshape(inputs, shape=(-1, patch_size, patch_size))
outputs = tfa.image.connected_components(outputs)
outputs = tf.reshape(outputs, shape=(-1, patch_number, patch_size, patch_size))
return outputs
```
## Calculating percolation C - the average number of clusters per box on a scale L
According to the article:
> Let $c_{i}$ be the number of clusters on a box $\beta_{i}$, the feature $C(L)$ that represents the average number of clusters per box on a scale $L$ is given by
$$
C(L) = \frac{\sum_{i=1}^{T(L)} c_{i}}{T(L)}
$$
```
class PercolationC(tf.keras.layers.Layer):
def __init__(self):
super(PercolationC, self).__init__()
def call(self, inputs):
outputs = tf.cast(inputs, dtype=tf.float32)
outputs = tf.math.reduce_max(outputs, axis=(2, 3))
outputs = tf.math.reduce_mean(outputs, axis=1)
return outputs
```
## Calculating percolation M - the average coverage area of the largest cluster on a scale L
According to the article:
>Another feature that can be obtained is the average coverage area of the largest cluster in a box and is given by $M(L)$. Let $m_{i}$ be the size in pixels of the largest cluster of the box $\beta_{i}$. The feature $M(L)$ is givenaccording to
$$
M(L) = \frac{\sum_{i=1}^{T(L)} \frac{m_{i}}{L^2}}{T(L)}
$$
```
class PercolationM(tf.keras.layers.Layer):
def __init__(self):
super(PercolationM, self).__init__()
def call(self, inputs):
batch_size, patch_number, patch_size, patch_size = tf.unstack(tf.shape(inputs))
outputs = tf.reshape(inputs, shape=(-1, patch_number, patch_size ** 2))
outputs = tf.map_fn(lambda image: tf.math.reduce_max(tf.math.bincount(image)), outputs)
outputs = tf.cast(outputs, dtype=tf.float32)
return outputs
```
## Assembling fractal features into an image channel
According to the article:
> To serve as input for the incoming CNN classification, the feature vectors generated on the previous layers of the network must be converted into feature matrices. To do so, the 100 features obtained by each distance $\Delta$ are rearranged as a $10 \times 10 \times 10$ matrix. The matrices generated by $\Delta_{h}$, $\Delta_{e}$ and $\Delta_{m}$ correspond to the R, G and B colour channels, respectively. ... Since each of the functions $C(L), Q(L), M(L), \Lambda(L)$ and $D(L)$, obtained from a specific $\Delta$, generate 20 features, each function is fit exactly into 2 columns of the matrix.
>Since each of the functions $C(L), Q(L), M(L), \Lambda(L)$ and $D(L)$, obtained from a specific $\Delta$, generate 20 features, each function is fit exactly into 2 columns of the matrix.
```
class AssembleChannel(tf.keras.layers.Layer):
def __init__(self):
super(AssembleChannel, self).__init__()
def call(self, inputs):
fractal_dimension = tf.convert_to_tensor(inputs[0])
fractal_dimension = tf.transpose(fractal_dimension, perm=(1, 0))
lacunarity = tf.convert_to_tensor(inputs[1])
lacunarity = tf.transpose(lacunarity, perm=(1, 0))
percolation_q = tf.convert_to_tensor(inputs[2])
percolation_q = tf.transpose(percolation_q, perm=(1, 0))
percolation_c = tf.convert_to_tensor(inputs[3])
percolation_c = tf.transpose(percolation_c, perm=(1, 0))
percolation_m = tf.convert_to_tensor(inputs[4])
percolation_m = tf.transpose(percolation_m, perm=(1, 0))
outputs = tf.concat([
percolation_c,
percolation_q,
percolation_m,
lacunarity,
fractal_dimension
], axis=1)
outputs = tf.reshape(outputs, shape=(-1, 10, 10))
return outputs
```
## Organising fractal feature extraction into layers
We move feature extraction to layers to simplify and clarify the code.
### based on Chebyshev distance
```
class ChebyshevFeatures(tf.keras.layers.Layer):
def __init__(self):
super(ChebyshevFeatures, self).__init__()
self.chebyshev = Chebyshev()
self.probability = Probability()
self.clusterize = Clusterize()
self.fractal_dimension = FractalDimension()
self.lacunarity = Lacunarity()
self.percolation_q = PercolationQ()
self.percolation_c = PercolationC()
self.percolation_m = PercolationM()
self.assemble_channel = AssembleChannel()
def call(self, inputs):
chebyshevs = [self.chebyshev(i) for i in inputs]
probability = [self.probability(ch) for ch in chebyshevs]
cluster = [self.clusterize(ch) for ch in chebyshevs]
fractal_dimension = [self.fractal_dimension(ch) for ch in probability]
lacunarity = [self.lacunarity(ch) for ch in probability]
percolation_q = [self.percolation_q(ch) for ch in chebyshevs]
percolation_c = [self.percolation_c(ch) for ch in cluster]
percolation_m = [self.percolation_m(ch) for ch in cluster]
features = self.assemble_channel([
fractal_dimension,
lacunarity,
percolation_q,
percolation_c,
percolation_m
])
return features
```
### based on Euclidean distance
```
class EuclideanFeatures(tf.keras.layers.Layer):
def __init__(self):
super(EuclideanFeatures, self).__init__()
self.euclidean = Euclidean()
self.probability = Probability()
self.clusterize = Clusterize()
self.fractal_dimension = FractalDimension()
self.lacunarity = Lacunarity()
self.percolation_q = PercolationQ()
self.percolation_c = PercolationC()
self.percolation_m = PercolationM()
self.assemble_channel = AssembleChannel()
def call(self, inputs):
euclideans = [self.euclidean(i) for i in inputs]
probability = [self.probability(eu) for eu in euclideans]
cluster = [self.clusterize(eu) for eu in euclideans]
fractal_dimension = [self.fractal_dimension(eu) for eu in probability]
lacunarity = [self.lacunarity(eu) for eu in probability]
percolation_q = [self.percolation_q(eu) for eu in euclideans]
percolation_c = [self.percolation_c(eu) for eu in cluster]
percolation_m = [self.percolation_m(eu) for eu in cluster]
features = self.assemble_channel([
fractal_dimension,
lacunarity,
percolation_q,
percolation_c,
percolation_m
])
return features
```
### based on Manhattan distance
```
class ManhattanFeatures(tf.keras.layers.Layer):
def __init__(self):
super(ManhattanFeatures, self).__init__()
self.manhattan = Manhattan()
self.probability = Probability()
self.clusterize = Clusterize()
self.fractal_dimension = FractalDimension()
self.lacunarity = Lacunarity()
self.percolation_q = PercolationQ()
self.percolation_c = PercolationC()
self.percolation_m = PercolationM()
self.assemble_channel = AssembleChannel()
def call(self, inputs):
manhattans = [self.manhattan(i) for i in inputs]
probability = [self.probability(mh) for mh in manhattans]
cluster = [self.clusterize(mh) for mh in manhattans]
fractal_dimension = [self.fractal_dimension(mh) for mh in probability]
lacunarity = [self.lacunarity(mh) for mh in probability]
percolation_q = [self.percolation_q(mh) for mh in manhattans]
percolation_c = [self.percolation_c(mh) for mh in cluster]
percolation_m = [self.percolation_m(mh) for mh in cluster]
features = self.assemble_channel([
fractal_dimension,
lacunarity,
percolation_q,
percolation_c,
percolation_m
])
return features
```
## Assembling fractal features into images
We assemble fractal features into images, such that each set of fractal features corresponds to a colour channel (R, G, B).
```
class AssembleImage(tf.keras.layers.Layer):
def __init__(self):
super(AssembleImage, self).__init__()
def call(self, inputs):
outputs = tf.stack(inputs)
outputs = tf.transpose(outputs, perm=(1, 2, 3, 0))
return outputs
```
## Organising the fractal feature extraction layers into the single, fractal image layer
To further simplify the code, we will gather the fractal feature extraction into the single layer, which generates artificial fractal image.
```
class FractalImage(tf.keras.layers.Layer):
def __init__(self):
super(FractalImage, self).__init__()
self.patchifies = [Patchify(patch_size) for patch_size in range(3, 41 + 1, 2)]
self.chebyshev_features = ChebyshevFeatures()
self.euclidean_features = EuclideanFeatures()
self.manhattan_features = ManhattanFeatures()
self.assemble_image = AssembleImage()
def call(self, inputs):
patchifies = [patchify(inputs) for patchify in self.patchifies]
chebyshev_features = self.chebyshev_features(patchifies)
euclidean_features = self.euclidean_features(patchifies)
manhattan_features = self.manhattan_features(patchifies)
outputs = self.assemble_image([
chebyshev_features,
euclidean_features,
manhattan_features
])
return outputs
```
## Assembling the fractal neural network
So, here we are assembling the fractal neural network from the pieces mentioned above.
```
class FractalNeuralNetwork(tf.keras.Model):
TARGET_WIDTH = 299
TARGET_HEIGHT = 299
def __init__(self, class_number):
super(FractalNeuralNetwork, self).__init__()
self.fractal_image = FractalImage()
self.resize = tf.keras.layers.Resizing(width=self.TARGET_WIDTH, height=self.TARGET_HEIGHT)
self.rescale_original = tf.keras.layers.Rescaling(scale=1./255)
self.rescale_fractal = tf.keras.layers.Lambda(lambda x: tf.math.divide(x, tf.math.reduce_max(x)))
self.model = hub.KerasLayer(
"https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/5",
trainable=False
)
self.combine = tf.keras.layers.Multiply()
self.score = tf.keras.layers.Dense(class_number, activation='softmax')
def call(self, inputs):
fractal_outputs = self.fractal_image(inputs)
fractal_outputs = self.resize(fractal_outputs)
fractal_outputs = self.rescale_fractal(fractal_outputs)
fractal_outputs = self.model(fractal_outputs)
original_outputs = self.rescale_original(inputs)
original_outputs = self.model(original_outputs)
outputs = self.combine([fractal_outputs, original_outputs])
outputs = self.score(outputs)
return outputs
```
# Data loading
## Data source
As a data source, we use the ISIC Archive.
The ISIC Archive is an open source platform with publicly available images of skin lesions under Creative Commons licenses. The images are associated with ground-truth diagnoses and other clinical metadata. Images can be queried using faceted search and downloaded individually or in batches. The initial focus of the archive has been on dermoscopy images of individual skin lesions, as these images are inherently standardized by the use of a specialized acquisition device and devoid of many of the privacy challenges associated with clinical images. To date, the images have been provided by specialized melanoma centers from around the world. The archive is designed to accept contributions from new sources under the Terms of Use and welcomes new contributors. There are ongoing efforts to supplement the dermoscopy images in the archive with close-up clinical images and a broader representation of skin types. The images in the Archive are used to support educational efforts through linkage with Dermoscopedia and are used for Grand Challenges and Live Challenges to engage the computer science community for the development of diagnostic AI.
For more information, go to [ISIC Archive web site](https://www.isic-archive.com/)
```
generator = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=180,
horizontal_flip=True,
vertical_flip=True,
brightness_range=(0.2, 1.5),
validation_split=0.2,
)
training_set = generator.flow_from_directory(
f"{os.environ['SCRATCH']}/data10000",
target_size=(299, 299),
batch_size=32,
class_mode='categorical',
subset='training'
)
validation_set = generator.flow_from_directory(
f"{os.environ['SCRATCH']}/data10000",
target_size=(299, 299),
batch_size=32,
class_mode='categorical',
subset='validation'
)
CLASS_NUMBER = len(training_set.class_indices)
```
# Model training
## Preparing TensorFlow callbacks
For our convenience, we create a few TensorFlow callbacks.
### The TensorBoard callback
We want to see how the training is going. We add the callback, which will log the metrics to TensorBoard.
```
log_dir = '../logs/fit/' + datetime.datetime.now().strftime('both__inception_resnet_v2___combine__multiply')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
```
### The EarlyStopping callback
This callback stops training when the metrics (e.g. validation loss) are not improving,
```
early_stop_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0.01,
patience=5,
restore_best_weights=True
)
```
### The ModelCheckpoint callback
This callback saves the model with the best metrics during training.
```
checkpoint_path = 'checkpoints/both__inception_resnet_v2___combine__multiply.ckpt'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
save_freq='epoch',
mode='auto'
)
```
## Actual training
```
model = FractalNeuralNetwork(class_number=CLASS_NUMBER)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(
training_set,
validation_data=validation_set,
epochs=10,
callbacks=[
tensorboard_callback,
early_stop_callback,
checkpoint_callback
]
)
```
# Model validation
## Loading the model from the checkpoint
```
model = FractalNeuralNetwork(class_number=CLASS_NUMBER)
model.load_weights('./checkpoints/both__inception_resnet_v2___combine__multiply.ckpt')
```
## Loading the test data
```
testing_set = generator.flow_from_directory(
f"{os.environ['SCRATCH']}/data10000-test",
target_size=(299, 299),
batch_size=32,
class_mode='categorical'
)
```
## Making diagnoses
```
true_labels = np.concatenate([testing_set[i][1] for i in range(len(testing_set))], axis=0)
predicted_labels = model.predict(testing_set)
```
## Plot the ROC Curve
```
fpr = dict()
tpr = dict()
auc_metric = dict()
diagnosis_index_dict = {v: k for k, v in testing_set.class_indices.items()}
for i in range(CLASS_NUMBER):
diagnosis = diagnosis_index_dict[i]
fpr[diagnosis], tpr[diagnosis], _ = roc_curve(true_labels[:, i], predicted_labels[:, i])
auc_metric[diagnosis] = auc(fpr[diagnosis], tpr[diagnosis])
%matplotlib inline
for diagnosis in testing_set.class_indices:
plt.plot(fpr[diagnosis], tpr[diagnosis], label=diagnosis)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
```
## Show AUC
```
auc_metric
```
| github_jupyter |
# Let's apply the GP-based optimizer to our small Hubbard model.
Make sure your jupyter path is the same as your virtual environment that you used to install all your packages.
If nopt, do something like this in your terminal:
`$ ipython kernel install --user --name TUTORIAL --display-name "Python 3.9"`
```
# check your python
from platform import python_version
print(python_version())
```
Gaussian Process (GP) models were introduced in the __[Gaussian Process Models](optimization.ipynb)__ notebook. The GP-based optimizer uses these techniques as implemented in the included __[opti_by_gp.py](opti_by_gp.py)__ module, which also provides helpers for plotting results. Note that this module uses the ImFil optimizer underneath, a choice that can not currently be changed.
As a first step, create once more a __[Hubbard Model](hubbard_model_intro.ipynb)__ setup.
```
import hubbard as hb
import logging
import noise_model as noise
import numpy as np
import opti_by_gp as obg
from IPython.display import Image
logging.getLogger('hubbard').setLevel(logging.INFO)
# Select a model appropriate for the machine used:
# laptop -> use small model
# server -> use medium model
MODEL = hb.small_model
#MODEL = hb.medium_model
# Hubbard model for fermions (Fermi-Hubbard) required parameters
xdim, ydim, t, U, chem, magf, periodic, spinless = MODEL()
# Number of electrons to add to the system
n_electrons_up = 1
n_electrons_down = 1
n_electrons = n_electrons_up + n_electrons_down
# Total number of "sites", with each qubit representing occupied or not
spinfactor = spinless and 1 or 2
n_qubits = n_sites = xdim * ydim * spinfactor
# Create the Hubbard Model for use with Qiskit
hubbard_op = hb.hamiltonian_qiskit(
x_dimension = xdim,
y_dimension = ydim,
tunneling = t,
coulomb = U,
chemical_potential = chem,
magnetic_field = magf,
periodic = periodic,
spinless = spinless)
```
The GP modeling needs persistent access to the evaluated points, so tell the objective to save them. Otherwise, the objective is the same as before. Choose the maximum number of objective evaluations, the initial and set the bounds. Then run the optimization using GP (as mentioned before, this uses ImFil underneath).
```
# noise-free objective with enough Trotter steps to get an accurate result
objective = hb.EnergyObjective(hubbard_op, n_electrons_up, n_electrons_down,
trotter_steps=3, save_evals=True)
# initial and bounds (set good=True to get tighter bounds)
initial_amplitudes, bounds = MODEL.initial(
n_electrons_up, n_electrons_down, objective.npar(), good=False)
# max number of allowed function evals
maxevals = 100
result = obg.opti_by_gp(objective.npar(), bounds, objective, maxevals)
print('Results with GP:')
print("Estimated energy: %.5f" % result[1])
print("Parameters: ", result[0])
print("Number of iters: ", result[2])
```
Now let's analyze the results be looking at the sample evaluations and convergence plot.
```
Image(filename='samples.png')
```
The left plot shows:
1) the points sampled with GP (pink squares): you can see that we have some points everywhere in the space, but a denser pink square cloud where the function has its minimum
2) yellow circles (5) -- these are the points from which the local search with ImFil starts: we choose the best point found by the GP, and another 4 points based on their function value and distance to already selected start points. 5 is a parameter, if you want to do only one local search, you can just start from the best point found by the GP iterations. Also: not all 5 points will necessarily be used for ImFil, the optimization stops when the maximum number of allowed evaluations has been reached.
3) the green squares are the points ImFil decided to sample -- you can see that they cover most of the space. Wouldn't it be nice to force ImFil to search only a smaller radius?!
4) the red dot indicates the best point found during optimization
5) the contours are created by using a GP model and all sample information that we collected - so this is not the true contours, but the best guess of what the true contours may look like
The right plot shows the GP approximation of the energy surface - again, not the true surface, just our best guess based on training a GP on all input-output pairs
```
Image(filename='progress.png')
```
This plot shows the progress we are making with respect to improving the energy versus the number of function evaluations.
We show the best energy value found so far, thus, the graph is monotonically decreasing and has a step-like shape. whenever the graph is flat, it means that during these iterations no energy improvements were found. If you were to plot simply the energy at each function evaluation, the graph would go up and down because we use sampling based algorithms and not gradient-based algorithms. Thus, not in every iteration we find an improvement.
There is a large down-step in the beginning - this is due to our random space filling sampling initially. We can also see that ImFil does not make much progress here. The GP-based sampling is used until 30 evaluations.
Note that the GP based optimizer has parameters, including the size of the initial experimental design, the number of iterations that we want to apply the GP (here 30), the maximum number of local searches with ImFil after the GP is done, .... see the __[opti_by_gp.py](opti_by_gp.py)__ module (or run the cell below to load).
```
%load 'opti_by_gp.py'
```
**Exercise:** redo the above analysis using a noisy objective. If time is limited, consider only using sampling noise, e.g. by setting `shots=8192` (see the notebook on __[noise](hubbard_vqe_noise.ipynb)__ for more examples), and using tight bounds.
**Optional Exercise:** for comparison purposes, follow-up with an optimization run that does not use GP and try in particular what happens when using only few function evaluations (20, say, if using tight bounds). Try different optimizers (but consider that some, such as SPSA, will take more evalations per iteration; and consider that optimizers that do not respect bounds are at a severe disadvantage).
```
# Pull in a couple of optimizers to play with
from qiskit.algorithms.optimizers import COBYLA, SPSA
try:
from qiskit.algorithms.optimizers import IMFIL, SNOBFIT
except ImportError:
print("install scikit-quant to use IMFIL and SNOBFIT")
```
| github_jupyter |
# Performance analysis at system-level
## Reproduce line chart for CPU utiliztion
```
import py2neo
import pandas as pd
import matplotlib.pyplot as plt
graph = py2neo.Graph(bolt=True, host='localhost', user='neo4j', password = 'neo4j')
# query for CPU measurements
cpu_query = """
MATCH (r:Record)-[:CONTAINS]->(c:CpuUtilization)
WHERE r.fileName =~ '.*/1-MemoryLeak-5/kieker-logs/kieker-20150820-064855519-UTC-middletier2-KIEKER'
RETURN c.timestamp AS timestamp, c.cpuID AS cpuID, c.totalUtilization * 100 AS cpuUtilization
ORDER BY timestamp
"""
df = pd.DataFrame(graph.run(cpu_query).data())
# drop first and last measurements to sanitize data
df.drop(df.head(3).index, inplace=True)
df.drop(df.tail(5).index, inplace=True)
# cast to datetime and round up to the nearest second
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['timestamp'] = df['timestamp'].dt.round('1s')
df['CPU ID'] = pd.to_numeric(df['cpuID'])
# get the mean utilization of every CPU core
df = df.groupby(['timestamp']).mean()
df = df.drop('CPU ID', 1)
# get the average of 7 measurements to sanitize the data
df_cpu_plot = df.rolling(7).mean()
df_cpu_plot = df_cpu_plot.iloc[::7, :]
# label and style the plot
plt.plot_date(df_cpu_plot.index, df_cpu_plot['cpuUtilization'], fmt='-', color='#00035b')
plt.title('CPU utilization (%)')
plt.ylim(-2.5, 102.5)
plt.grid(linestyle=':')
# fill in the data
date_list = pd.date_range(start=df.index[0], end=df.index[-1], periods=7).tolist()
# As we don't know the time zone, we choose the time that makes the most sense
plt.xticks(date_list + ['2015-08-20 08:05'], ['', '', '', '', '', '', '', '4:05 PM'])
plt.yticks([0.0, 20.0, 40.0, 60.0, 80.0, 100.0], ['0.0', '', '', '', '', '100.0'])
plt.axvline('2015-08-20 08:05', color='black', label='4:05 PM', linestyle='--')
plt.setp(plt.gca().xaxis.get_majorticklabels(), 'rotation', 0)
# uncomment to save the plot as a pdf
# plt.savefig('cpu_plot.pdf')
plt.show()
```
## Reproduce line chart for memory utilization
```
# query for memory measurements
mem_query = """
MATCH (r:Record)-[:CONTAINS]->(m:MemSwapUsage)
WHERE r.fileName =~ '.*/1-MemoryLeak-5/kieker-logs/kieker-20150820-064855519-UTC-middletier2-KIEKER'
RETURN m.timestamp AS timestamp, toFloat(m.memUsed)*100.0 / toFloat(m.memTotal) AS memoryUtilization
ORDER BY timestamp
"""
df_mem = pd.DataFrame(graph.run(mem_query).data())
# drop first and last measurements to sanitize data
df_mem.drop(df_mem.head(3).index, inplace=True)
df_mem.drop(df_mem.tail(5).index, inplace=True)
# cast to datetime and round up to the nearest second
df_mem['timestamp'] = pd.to_datetime(df_mem['timestamp'])
df_mem['timestamp'] = df_mem['timestamp'].dt.round('1s')
df_mem.set_index('timestamp', inplace=True)
# get the average of 10 measurements to sanitize the data
df_mem_plot = df_mem.rolling(7).mean()
df_mem_plot = df_mem_plot.iloc[::7, :]
# label and style the plot
plt.plot_date(df_mem_plot.index, df_mem_plot['memoryUtilization'], fmt='-', color='#00035b')
plt.title('Memory utilization (%)')
plt.ylim(-2.5, 102.5)
plt.grid(linestyle=':')
# fill in the data
date_list = pd.date_range(start=df.index[0], end=df.index[-1], periods=7).tolist()
plt.xticks(date_list + ['2015-08-20 07:55'], ['', '', '', '', '', '', '', '3:55 PM'])
plt.yticks([0.0, 20.0, 40.0, 60.0, 80.0, 100.0], ['0.0', '', '', '', '', '100.0'])
plt.axvline('2015-08-20 07:55', color='black', label='3:55 PM', linestyle='--')
plt.setp(plt.gca().xaxis.get_majorticklabels(), 'rotation', 0)
# uncomment to save the plot as a pdf
# plt.savefig('mem_plot.pdf')
plt.show()
```
| github_jupyter |
# KNN, Decision Tree, SVM, and Logistic Regression Classifiers to Predict Loan Status
Today, we'll look into the question: will a new bank customer default on his or her loan? We'll optimize, train, make predictions with, and evaluate four classification models - K Nearest Neighbor (KNN), Decision Tree, Support Vector Machine (SVM), and logistic regression - for loan status of new customers. We'll work with a bank data set of 346 customers with key variables such as loan status, principal, terms, effective date, due date, age, education, and gender. A bank's department head, for example, could apply a predictive model to better structure loans and tailor terms to various target customers. Let's break it down:
**Part 1**: Cleaning and wrangling, including converting data types, using .to_datetime, and replacing values
<br>**Part 2**: Exploratory analysis, including plotting stratified histograms, working with groupby, creating new relevant variables, and making observations to determine key features
<br>**Part 3**: One hot encoding to convert categorical variables with multiple categories to binary variables using pd.get_dummies and adding new features using pd.concat
<br>**Part 4**: Feature selection of predictors (X) and labeled target (y)
<br>**Part 5**: Normalizing feature set using scikit learn's preprocessing.StandardScaler.fit.transform
<br>**Part 6**: KNN, including determining and plotting optimal k value, training model and making predictions on test set, generating a confusion matrix heatmap and report, evaluating jaccard and F1 scores
<br>**Part 7**: Decision Tree, including determining and plotting optimal max depth, training model and making predictions on test set, visualizing decision tree using pydotplus and graphviz, generating a confusion matrix heatmap and report, evaluating jaccard and F1 scores
<br>**Part 8**: SVM, including determining and plotting optimal kernel function, training model and making predictions on test set, generating a confusion matrix heatmap and report, evaluating jaccard and F1 scores
<br>**Part 9**: Logistic Regression, including determining and plotting optimal regularization and numerical solver, training model and making predictions on test set, calculating probability, generating a confusion matrix heatmap and report, evaluating jaccard, F1, and log loss scores
<br>**Part 10**: Evaluating model performance head-to-head by creating a dataframe of accuracy scores for KNN, Decision Tree, SVM, and Logistic Regression models to make comparisons
We'll cover cleaning, wrangling, and visualizing techniques, apply important scikit learn libraries to develop, optimize, train, and make predictions, and walk through evaluating and comparing models. Let's dig in.
```
# Import relevant libraries
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
import seaborn as sns
# Scikit learn libraries
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import scipy.optimize as opt
from sklearn.svm import SVC
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
# Visualizing Decision Tree
!pip install graphviz
!pip install pydotplus
import graphviz
import pydotplus
```
## Part 1: Cleaning and Wrangling
```
# Read in data
pd.set_option("display.max_columns", 100)
df = pd.read_csv('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv')
# Check first few rows
df.head()
# Check dimensions of dataframe
df.shape
# Check number of null values. We see there are no null values
df.isnull().sum().to_frame()
# Check datatypes. Several key variables are objects, let's convert them to numerical values
df.dtypes
# Convert gender strings 'Male' to 0 and 'Female' to 1
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
# Convert effective data and due date columns into date time object
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
# Under column education, "Bachelor" is mispelled, let's replace it with the correct spelling
df['education']=df['education'].str.replace('Bechalor', 'Bachelor')
df.head()
```
## Part 2: Exploratory Analysis
```
# As we'll be plotting with seaborn, let's set the style to darkgrid
sns.set_style("darkgrid")
# Check loan status split. We have 260 loans paied, and 86 that defaulted.
df['loan_status'].value_counts().to_frame()
```
Let's check how loan status looks by gender. In our sample, from our plot, there are fewer women applying for and receiving loans (and thus fewer defaulting). Grouping our data by gender using groupby we see that 27% of men defaulted while 13% of women defaulted.
```
# Plot histogram of age stratified by gender and loan status
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set2", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# Check percentage of loan status as collection by day of week
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True).to_frame()
```
Let's see if loan status differs by education status. From our plot and grouped data below, it looks like there's not too much of a difference among different education categories, though people who are college-educated have a slightly lower default rate (24%) than those who have an education level of high school or below (26%). We also see that there are only 2 people who have an education level of master or above in our sample, let's exclude this group later when we build our feature set.
```
# Plot histogram of age stratified by education and loan status
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="education", hue="loan_status", palette="Set2", col_wrap=4)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# Check percentage of loan status as collection by day of week
df.groupby(['education'])['loan_status'].value_counts(normalize=True).to_frame()
# We confirm that 'Master or Above' only has 2 observations, let's delete these observations when we do our feature selection
len(df[df.education=='Master or Above'])
```
Let's see if loan status differs by the day of the week of the loan. Below, we create a variable 'dayofweek' by applying the .dt.dayofweek function to our effective date variable.
In our sample, from our plot, people receiving loans on Fridays, Saturdays, and Sundays are more likely to default than those doing so earlier in the week. If we group the data by day of week using groupby, we see that 45% and 39% of people receiving a loan on Saturday and Sunday, respectively, defaulted, while only 3% of people receiving a loan on Monday defaulted.
```
# Convert effective date to a day of the week using .dt.dayofweek
df['dayofweek'] = df['effective_date'].dt.dayofweek
# Plot histogram of day of week stratified by loan status and gender.
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col = "loan_status", hue ="Gender", palette="Set2", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# Check percentage of loan status as collection by day of week
df.groupby(['dayofweek'])['loan_status'].value_counts(normalize=True).to_frame()
```
As weekend loans seem to be a significant feature in our data set, let's create a variable 'weekend' for loans given on days 4, 5, and 6 (Friday, Saturday, and Sunday).
```
# Create variable 'weekend' for any 'dayofweek' above 3
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
```
## Part 3: One Hot Encoding
For our feature set (predictors), let's select 'Principal', 'terms', 'age', 'Gender', 'weekend', and 'education'. Since our education variable is categorical with multiple categories, let's use one hot encoding to convert them to binary variables using pd.get_dummies and append them to our dataframe using pd.concat. As we saw before, let's drop education level 'Master or Above' as there were only two observations.
```
# Create a numpy array for the features we'll be selecting. We use double brackets to create the numpy array, which is required for the scikit learn alogrithms later.
features = df[['Principal','terms','age','Gender','weekend']]
# Apply pd.get_dummies for one hot encoding and add new features to array using pd.concat
features = pd.concat([features, pd.get_dummies(df['education'])], axis=1)
features.drop(['Master or Above'], axis = 1,inplace=True)
features[0:5]
```
## Part 4: Feature Selection
Let's select our predictors, feature set X.
```
X = features
X[0:5]
```
Let's select our labeled target as loan status, y.
```
y = df['loan_status'].values
y[0:5]
```
## Part 5: Normalizing Feature Set
It's important to normalize our feature set to a zero mean and variance to prevent individual feature from being over or underweighted and in order to generate interpretable, reliable predictions. We can do this using the scikit learn libraries StandardScaler, fit, and transform.
```
# Normalize using scikit learn preprocessing libraries StandardScaler, fit, transform.
X = preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
```
## Classification Models - Optimizing Algorithms, Train and Test Sets, and Evaluating Models
Let's build and compare 4 classification models: K Nearest Neighbor (KNN), Decision Tree, Support Vector Machine (SVM), and Logistic Regression.
## Part 6: KNN
```
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
print ('Training set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
```
Let's determine our optimal value of K for the number of nearest neighbors. A K value that is too low will capture too much noise (overfit) while one that is too large will be over-generalized. Below, we determine our optimal K value is 7, which has a cross-validation accuracy of 0.74.
```
# Determine optimal k value.
best_score = 0.0
acc_list=[]
for k in range(3,15):
clf_knn = KNeighborsClassifier(n_neighbors = k, algorithm='auto')
# using 10-fold cross validation for scoring the classifier's accuracy
scores = cross_val_score(clf_knn, X, y, cv=10)
score = scores.mean()
acc_list.append(score)
if score > best_score:
best_score=score
best_clf = clf_knn
best_k = k
print("Best K is :",best_k,"| Cross validation Accuracy :",best_score)
clf_knn = best_clf
```
Let's plot accuracy across varying K values. We see our optimal K value, the one with the highest accuracy, is 7.
```
# Plot accuracy of various K values.
plt.plot(range(3,15),acc_list, c="r")
plt.xlabel('K')
plt.ylabel('Cross-Validation Accuracy')
plt.show()
# Train model using our algorithm above with optimal value of K of 7
clf_knn.fit(X_train,y_train)
# Make predictions on test set using our model
y_hat_knn = clf_knn.predict(X_test)
y_hat_knn[0:5]
```
#### Evaluating KNN Performance
Let's calculate the confusion matrix to evaluate model performance.
```
# Calculate confusion matrix
cm = confusion_matrix(y_test, y_hat_knn,labels=["PAIDOFF","COLLECTION"])
print(cm)
```
We can plot a heatmap to make it easier to visualize and interpret:
Top left is true negative (TN)
<br>Top right is false positive (FP)
<br>Bottom left is false negative (FN)
<br>Bottom right is true postivie (TP)
```
# Plot heatmap of confusion matrix
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
```
Let's generate our confustion matrix report to evaluate model performance. Remember that:
Precision is the ratio of true postives to all positive predicted by model, Precision = TP / (TP + FP).
Recall is the ratio of true positives to all positives actually in our data set, Recall = TP / (TP + FN).
F1-score is the harmonic mean of preiciaion and recall.
```
# Confusion matrix report
np.set_printoptions(precision=2)
print (classification_report(y_test, y_hat_knn))
```
We can also calculate jaccard similarity score and f1-score automaically using the jaccard_similarity_score and f1_score functions, respectively. Jaccard score is the intersection divided by the union of the two labeled sets (the test and fitted set). F1-score is the harmonic mean of preiciaion and recall as we saw above.
```
# Jaccard similarity score for KNN model for test set
jaccard_knn = jaccard_similarity_score(y_test, y_hat_knn)
jaccard_knn
# F1 score for KNN model for test set
f1_knn = f1_score(y_test, y_hat_knn, average='weighted')
f1_knn
```
## Part 7: Decision Tree
Before we train and test our decision tree, let's determine our max depth that will yield the highest accuracy. When training our decision tree, we'll focus here on reducing entropy as much as possible (maximizing information gain) in each node of the tree. We see below that our max depth that will achieve the highest accuracy is 12. We see in our plot below that accuracy drops off after this point.
```
# Test max depths from 1 to 19
depth_range = range(1, 20)
# Create empty array for jaccard and f1 scores
jaccard_similarity_score_ = []
f1_score_ = []
# Use for loop to train decision tree using increasing max depth values.
# Make predictions using test set, append jaccard and F1 score to arrays created above
for d in depth_range:
dec_tree = DecisionTreeClassifier(criterion = 'entropy', max_depth = d)
dec_tree.fit(X_train, y_train)
y_hat_tree = dec_tree.predict(X_test)
jaccard_similarity_score_.append(jaccard_similarity_score(y_test, y_hat_tree))
f1_score_.append(f1_score(y_test, y_hat_tree, average = 'weighted'))
# Create dataframe with jaccard and F1 accuaracy values to determine best max depth. We see the best max depth is d = 12.
result = pd.DataFrame([jaccard_similarity_score_, f1_score_], index = ['Jaccard', 'F1'], columns = ['d = 1','d = 2','d = 3','d = 4','d = 5','d = 6','d = 7','d = 8','d = 9','d = 10', 'd = 11', 'd = 12', 'd = 13', 'd = 14', 'd = 15', 'd = 16', 'd = 17', 'd = 18', 'd = 19'])
result.columns.name = 'Evaluation Metrics'
result
# Plot accuracy of various max depths, jaccard score in blue and F1 score in red.
plt.plot(range(1,20),jaccard_similarity_score_)
plt.plot(range(1,20),f1_score_, c='r')
plt.xlabel('Max Depth')
plt.ylabel('Accuracy')
plt.show()
# Set tree algorithm to max depth 12
dec_tree = DecisionTreeClassifier(criterion="entropy", max_depth = 12)
dec_tree
# Train decision tree
dec_tree.fit(X_train,y_train)
# Make prediction on test set using our model
y_hat_tree = dec_tree.predict(X_test)
y_hat_tree[0:5]
```
Let's visualize our decision tree, which has a depth of 12 nodes. Keep in mind that values for our features are normalized. Our initial node is a criterion for weekend, which splits into criteria for a person's age and terms of loan. We can continue to follow the nodes and branches down, with each step maximizing reduction in entropy (maximizing information gain).
```
# Visualize decision tree using tree.export_graphviz
dot_data = tree.export_graphviz(dec_tree, out_file=None,
feature_names=['Principal','terms','age','Gender','weekend','Bachelor','High School or Below','college'],
class_names='loan_status',
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.set_size('"8,8!"') # We have 8 features in our feature set
gvz_graph = graphviz.Source(graph.to_string())
gvz_graph
```
#### Evaluating Decision Tree Performance
Let's calculate the confusion matrix to evaluate model performance.
```
# Calculate confusion matrix
cm = confusion_matrix(y_test, y_hat_tree,labels=["PAIDOFF","COLLECTION"])
print(cm)
```
We can plot a heatmap to make it easier to visualize and interpret:
Top left is true negative (TN)
<br>Top right is false positive (FP)
<br>Bottom left is false negative (FN)
<br>Bottom right is true postivie (TP)
```
# Plot heatmap of confusion matrix
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
```
Let's generate our confustion matrix report to evaluate model performance. Remember that:
Precision is the ratio of true postives to all positive predicted by model, Precision = TP / (TP + FP).
Recall is the ratio of true positives to all positives actually in our data set, Recall = TP / (TP + FN).
F1-score is the harmonic mean of preiciaion and recall.
```
# Confusion matrix report
np.set_printoptions(precision=2)
print (classification_report(y_test, y_hat_tree))
```
We can also calculate jaccard similarity score and f1-score automaically using the jaccard_similarity_score and f1_score functions, respectively. Jaccard score is the intersection divided by the union of the two labeled sets (the test and fitted set). F1-score is the harmonic mean of preiciaion and recall as we saw above.
```
# Jaccard similarity score for decision tree
jaccard_tree = jaccard_similarity_score(y_test, y_hat_tree)
jaccard_tree
# F1 score for decision tree
f1_tree = f1_score(y_test, y_hat_tree, average='weighted')
f1_tree
```
## Part 8: SVM
For our SVM, let's first determine which kernel function - linear, polynomial, radial basis function (rbf), or sigmoid - generates the highest accuracy. We see that all of the polynomial kernel function generates the highest F1 score of 0.69.
```
# Determine optimal kernel function
# Array of kernel functions
kernel_func = ['sigmoid', 'poly', 'rbf', 'linear']
# Empty array for accuracy score (F1 score)
accuracy_score = []
# For each kernel function, train SVM model, run prediction on test set, calculate F1 score and append it to accuracy_score array
for k in kernel_func:
svc_model = SVC(kernel = k)
svc_model.fit(X_train, y_train)
y_hat_svm = svc_model.predict(X_test)
accuracy_score.append(f1_score(y_test, y_hat_svm, average = 'weighted'))
accuracy_score
# Bar plot of accuracy score for each kernel function
y_pos = np.arange(len(kernel_func))
plt.bar(y_pos, accuracy_score, align='center', alpha = 0.6)
plt.xticks(y_pos, kernel_func)
plt.xlabel('Kernel Functions')
plt.ylabel('Accuracy')
# Set SVM algorithm with polynomial kernel function
svc_model = SVC(kernel = 'poly')
# Train SVM model
svc_model.fit(X_train, y_train)
svc_model
# Make prediction on test set using our model
y_hat_svm = svc_model.predict(X_test)
y_hat_svm [0:5]
```
#### Evaluating SVM Performance
Let's calculate the confusion matrix to evaluate model performance.
```
# Calculate confusion matrix
cm = confusion_matrix(y_test, y_hat_svm,labels=["PAIDOFF","COLLECTION"])
print(cm)
```
We can plot a heatmap to make it easier to visualize and interpret:
Top left is true negative (TN)
<br>Top right is false positive (FP)
<br>Bottom left is false negative (FN)
<br>Bottom right is true postivie (TP)
```
# Plot heatmap of confusion matrix
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
```
Let's generate our confustion matrix report to evaluate model performance. Remember that:
Precision is the ratio of true postives to all positive predicted by model, Precision = TP / (TP + FP).
Recall is the ratio of true positives to all positives actually in our data set, Recall = TP / (TP + FN).
F1-score is the harmonic mean of preiciaion and recall.
```
# Confusion matrix report
np.set_printoptions(precision=2)
print (classification_report(y_test, y_hat_svm))
```
We can also calculate jaccard similarity score and f1-score automaically using the jaccard_similarity_score and f1_score functions, respectively. Jaccard score is the intersection divided by the union of the two labeled sets (the test and fitted set). F1-score is the harmonic mean of preiciaion and recall as we saw above.
```
# Jaccard similarity score for SVM
jaccard_svm = jaccard_similarity_score(y_test, y_hat_svm)
jaccard_svm
# F1 score for SVM
f1_svm = f1_score(y_test, y_hat_svm, average='weighted')
f1_svm
```
## Part 9: Logistic Regression
Logistic regression is best suited for binary categorical target variable like the one we have. Moreover, it offers the benefit of determining not just whether a customer will default or not, but the probabality that he or she will default. This is useful if knowing the likelihood that a customer will default is an important question for a bank to answer. For our logistic regression, let's first determine which combination of regularization (to account for overfitting) and numerical solver to find parameters - newton-cg, lbfgs, liblinear, sag, saga - generates the highest accuracy score (log loss score). We find below that our highest accuracy is with regularization C = 0.001 and solver liblinear.
```
solvers = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
regularization_val = [0.1, 0.01, 0.001]
index = []
accuracy_score = []
iterations = 0
for index1, c in enumerate(regularization_val):
for index2, solver in enumerate(solvers):
index.append(index1 + index2 *5)
iterations +=1
lr_model = LogisticRegression(C = c, solver = solver)
lr_model.fit(X_train, y_train)
y_hat_lr = lr_model.predict(X_test)
y_prob = lr_model.predict_proba(X_test)
print('Test {}: Accuracy at C = {} when Solver = {} is : {}'.format(iterations, c, solver, log_loss(y_test, y_prob) ))
accuracy_score.append(log_loss(y_test, y_prob))
print('\n')
# Visualize the above accuracy tests, with the peak at test 13, which corresponds to C = 0.001 and solver = liblinear
lr_prob = lr_model.predict_proba(X_test)
log_loss(y_test, lr_prob)
plt.plot(index, accuracy_score)
plt.xlabel('Parameter Value')
plt.ylabel('Testing Accuracy')
# Set logistic regression with optimal regularization of C = 0.001 and solver = 'liblinear'
lr_model = LogisticRegression(C = 0.001, solver = 'liblinear')
# Train logistic regression model
lr_model.fit(X_train, y_train)
lr_model
# Make prediction on test set using our model
y_hat_lr = lr_model.predict(X_test)
# Determine probabilities of loan classification using our test set. We'll need this for our logloss score.
y_hat_lr_prob = lr_model.predict_proba(X_test)
```
#### Evaluating Logistic Regression Performance
Let's calculate the confusion matrix to evaluate model performance.
```
# Calculate confusion matrix
cm = confusion_matrix(y_test, y_hat_lr,labels=["PAIDOFF","COLLECTION"])
print(cm)
```
We can plot a heatmap to make it easier to visualize and interpret:
Top left is true negative (TN)
<br>Top right is false positive (FP)
<br>Bottom left is false negative (FN)
<br>Bottom right is true postivie (TP)
```
# Plot heatmap of confusion matrix
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
```
Let's generate our confustion matrix report to evaluate model performance. Remember that:
Precision is the ratio of true postives to all positive predicted by model, Precision = TP / (TP + FP).
Recall is the ratio of true positives to all positives actually in our data set, Recall = TP / (TP + FN).
F1-score is the harmonic mean of preiciaion and recall.
```
# Confusion matrix report
np.set_printoptions(precision=2)
print (classification_report(y_test, y_hat_lr))
```
We can also calculate jaccard similarity score, f1-score, and log loss automaically using the jaccard_similarity_score, f1_score, and log_loss functions, respectively. Jaccard score is the intersection divided by the union of the two labeled sets (the test and fitted set). F1-score is the harmonic mean of preiciaion and recall as we saw above.
```
# Jaccard similarity score for logistic regression
jaccard_lr = jaccard_similarity_score(y_test, y_hat_lr)
jaccard_lr
# F1 score for logistic regression
f1_lr = f1_score(y_test, y_hat_lr, average='weighted')
f1_lr
# Logloss for logistic regression
logloss_lr = log_loss(y_test, y_hat_lr_prob)
logloss_lr
```
## Part 10: Evaluating Model Performance Head-to-Head
Let's compare KNN, decision tree, SVM, and logistic regression head-to-head using our specified parameters for classifying loan status for this data set. Creating a data frame with our evaluation metrics, we see that KNN performs best while logistic regression is the weakest of the four.
```
# Create dataframe with evaluation metrics
evaluation = {"Jaccard":[jaccard_knn, jaccard_tree, jaccard_svm, jaccard_lr],
"F1-score":[f1_knn, f1_tree, f1_svm, f1_lr],
"Log Loss":["NA", "NA", "NA", logloss_lr]
}
eval_df = pd.DataFrame(evaluation, columns=["Jaccard", "F1-score", "Log Loss"], index=["KNN", "Decision Tree", "SVM", "Logistic Regression"])
eval_df.columns.name = "Algorithm"
eval_df
```
| github_jupyter |
# Introduction to Data Science
# Lecture 25: Neural Networks II
*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/*
In this lecture, we'll continue discussing Neural Networks.
Recommended Reading:
* A. Géron, [Hands-On Machine Learning with Scikit-Learn & TensorFlow](http://proquest.safaribooksonline.com/book/programming/9781491962282) (2017)
* I. Goodfellow, Y. Bengio, and A. Courville, [Deep Learning](http://www.deeplearningbook.org/) (2016)
* Y. LeCun, Y. Bengio, and G. Hinton, [Deep learning](https://www.nature.com/articles/nature14539), Nature (2015)
## Recap: Neural Networks
Last time, we introduced *Neural Networks* and discussed how they can be used for classification and regression.
There are many different *network architectures* for Neural Networks, but our focus is on **Multi-layer Perceptrons**. Here, there is an *input layer*, typically drawn on the left hand side and an *output layer*, typically drawn on the right hand side. The middle layers are called *hidden layers*.
<img src="Colored_neural_network.svg" title="https://en.wikipedia.org/wiki/Artificial_neural_network#/media/File:Colored_neural_network.svg"
width="300">
Given a set of features $X = x^0 = \{x_1, x_2, ..., x_n\}$ and a target $y$, a neural network works as follows.
Each layer applies an affine transformation and an [activation function](https://en.wikipedia.org/wiki/Activation_function) (e.g., ReLU, hyperbolic tangent, or logistic) to the output of the previous layer:
$$
x^{j} = f ( A^{j} x^{j-1} + b^j ).
$$
At the $j$-th hidden layer, the input is represented as the composition of $j$ such mappings. An additional function, *e.g.* [softmax](https://en.wikipedia.org/wiki/Softmax_function), is applied to the output layer to give the prediction, $\hat y$, for classification or regression.
<img src="activationFct.png"
title="see Géron, Ch. 10"
width="700">
## Softmax function for classificaton
The *softmax function*, $\sigma:\mathbb{R}^K \to (0,1)^K$ is defined by
$$
\sigma(\mathbf{z})_j = \frac{e^{z_j}}{\sum_{k=1}^K e^{z_k}}
\qquad \qquad \textrm{for } j=1, \ldots, K.
$$
Note that each component is in the range $(0,1)$ and the values sum to 1. We interpret $\sigma(\mathbf{z})_j$ as the probability that $\mathbf{z}$ is a member of class $j$.
## Training a neural network
Neural networks uses a loss function of the form
$$
Loss(\hat{y},y,W) = \frac{1}{2} \sum_{i=1}^n g(\hat{y}_i(W),y_i) + \frac{\alpha}{2} \|W\|_2^2
$$
Here,
+ $y_i$ is the label for the $i$-th example,
+ $\hat{y}_i(W)$ is the predicted label for the $i$-th example,
+ $g$ is a function that measures the error, typically $L^2$ difference for regression or cross-entropy for classification, and
+ $\alpha$ is a regularization parameter.
Starting from initial random weights, the loss function is minimized by repeatedly updating these weights. Various **optimization methods** can be used, *e.g.*,
+ gradient descent method
+ quasi-Newton method,
+ stochastic gradient descent, or
+ ADAM.
There are various parameters associated with each method that must be tuned.
**Back propagation** is a way of using the chain rule from calculus to compute the gradient of the $Loss$ function for optimization.
## Neural Networks in scikit-learn
In the previous lecture, we used Neural Network implementations in scikit-learn to do both classification and regression:
+ [multi-layer perceptron (MLP) classifier](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html)
+ [multi-layer perceptron (MLP) regressor](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html)
However, there are several limitations to the scikit-learn implementation:
- no GPU support
- limited network architectures
## Neural networks with TensorFlow
Today, we'll use [TensorFlow](https://github.com/tensorflow/tensorflow) to train a Neural Network.
TensorFlow is an open-source library designed for large-scale machine learning.
### Installing TensorFlow
Instructions for installing TensorFlow are available at [the tensorflow install page](https://www.tensorflow.org/install).
It is recommended that you use the command:
```
pip install tensorflow
```
```
import tensorflow as tf
print(tf.__version__)
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
```
TensorFlow represents computations by connecting op (operation) nodes into a computation graph.
<img src="graph.png"
title="An example of computational graph"
width="400">
A TensorFlow program usually has two components:
+ In the *construction phase*, a computational graph is built. During this phase, no computations are performed and the variables are not yet initialized.
+ In the *execution phase*, the graph is evaluated, typically many times. In this phase, the each operation is given to a CPU or GPU, variables are initialized, and functions can be evaluted.
```
# construction phase
x = tf.Variable(3)
y = tf.Variable(4)
f = x*x*y + y + 2
# execution phase
with tf.Session() as sess: # initializes a "session"
x.initializer.run()
y.initializer.run()
print(f.eval())
# alternatively all variables cab be initialized as follows
init = tf.global_variables_initializer()
with tf.Session() as sess: # initializes a "session"
init.run() # initializes all the variables
print(f.eval())
```
### Autodiff
TensorFlow can automatically compute the derivative of functions using [```gradients```](https://www.tensorflow.org/api_docs/python/tf/gradients).
```
# construction phase
x = tf.Variable(3.0)
y = tf.Variable(4.0)
f = x + 2*y*y + 2
grads = tf.gradients(f,[x,y])
# execution phase
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # initializes all variables
print([g.eval() for g in grads])
```
This is enormously helpful since training a NN requires the derivate of the loss function with respect to the parameters (and there are a lot of parameters). This is computed using backpropagation (chain rule) and TensorFlow does this work for you.
**Exercise:** Use TensorFlow to compute the derivative of $f(x) = e^x$ at $x=2$.
```
# your code here
```
### Optimization methods
Tensorflow also has several built-in optimization methods.
Other optimization methods in TensorFlow:
+ [```tf.train.Optimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Optimizer)
+ [```tf.train.GradientDescentOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/GradientDescentOptimizer)
+ [```tf.train.AdadeltaOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdadeltaOptimizer)
+ [```tf.train.AdagradOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdagradOptimizer)
+ [```tf.train.AdagradDAOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdagradDAOptimizer)
+ [```tf.train.MomentumOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/MomentumOptimizer)
+ [```tf.train.AdamOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdamOptimizer)
+ [```tf.train.FtrlOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/FtrlOptimizer)
+ [```tf.train.ProximalGradientDescentOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/ProximalGradientDescentOptimizer)
+ [```tf.train.ProximalAdagradOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/ProximalAdagradOptimizer)
+ [```tf.train.RMSPropOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/RMSPropOptimizer)
For more information, see the [TensorFlow training webpage](https://www.tensorflow.org/api_guides/python/train).
Let's see how to use the [```GradientDescentOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/GradientDescentOptimizer).
```
x = tf.Variable(3.0, trainable=True)
y = tf.Variable(2.0, trainable=True)
f = x*x + 100*y*y
opt = tf.train.GradientDescentOptimizer(learning_rate=5e-3).minimize(f)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
if i%100 == 0: print(sess.run([x,y,f]))
sess.run(opt)
```
Using another optimizer, such as the [```MomentumOptimizer```](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/MomentumOptimizer),
has similiar syntax.
```
x = tf.Variable(3.0, trainable=True)
y = tf.Variable(2.0, trainable=True)
f = x*x + 100*y*y
opt = tf.train.MomentumOptimizer(learning_rate=1e-2,momentum=.5).minimize(f)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
if i%100 == 0: print(sess.run([x,y,f]))
sess.run(opt)
```
**Exercise:** Use TensorFlow to find the minimum of the [Rosenbrock function](https://en.wikipedia.org/wiki/Rosenbrock_function):
$$
f(x,y) = (x-1)^2 + 100*(y-x^2)^2.
$$
```
# your code here
```
## Classifying the MNIST handwritten digit dataset
We now use TensorFlow to classify the handwritten digits in the MNIST dataset.
### Using plain TensorFlow
We'll first follow [Géron, Ch. 10](https://github.com/ageron/handson-ml/blob/master/10_introduction_to_artificial_neural_networks.ipynb) to build a NN using plain TensorFlow.
#### Construction phase
+ We specify the number of inputs and outputs and the size of each layer. Here the images are 28x28 and there are 10 classes (each corresponding to a digit). We'll choose 2 hidden layers, with 300 and 100 neurons respectively.
+ Placeholder nodes are used to represent the training data and targets. We use the ```None``` keyword to leave the shape (of the training batch) unspecified.
+ We add layers to the NN using the ```layers.dense()``` function. In each case, we specify the input, and the size of the layer. We also specify the activation function used in each layer. Here, we choose the ReLU function.
+ We specify that the output of the NN will be a softmax function. The loss function is cross entropy.
+ We then specify that we'll use the [GradientDescentOptimizer](https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer)
with a learning rate of 0.01.
+ Finally, we specify how the model will be evaluated. The [```in_top_k```](https://www.tensorflow.org/api_docs/python/tf/nn/in_top_k) function checks to see if the targets are in the top k predictions.
We then initialize all of the variables and create an object to save the model using the [```saver()```](https://www.tensorflow.org/programmers_guide/saved_model) function.
#### Execution phase
At each *epoch*, the code breaks the training batch into mini-batches of size 50. Cycling through the mini-batches, it uses gradient descent to train the NN. The accuracy for both the training and test datasets are evaluated.
```
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# load the data
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
# helper code
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# construction phase
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
#y_proba = tf.nn.softmax(logits)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# execution phase
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 10
#n_batches = 50
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Batch accuracy:", acc_batch, "Validation accuracy:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
Since the NN has been saved, we can use it for classification using the [```saver.restore```](https://www.tensorflow.org/programmers_guide/saved_model) function.
We can also print the confusion matrix using [```confusion_matrix```](https://www.tensorflow.org/api_docs/python/tf/confusion_matrix).
```
with tf.Session() as sess:
saver.restore(sess, save_path)
Z = logits.eval(feed_dict={X: X_test})
y_pred = np.argmax(Z, axis=1)
print(confusion_matrix(y_test,y_pred))
```
### Using TensorFlow's Keras API
Next, we'll use TensorFlow's Keras API to build a NN for the MNIST dataset.
[Keras](https://keras.io/) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. We'll use it with TensorFlow.
```
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
(X_train, y_train),(X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
# set the model
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
# specifiy optimizer
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# train the model
model.fit(X_train, y_train, epochs=5)
score = model.evaluate(X_test, y_test)
names = model.metrics_names
for ii in np.arange(len(names)):
print(names[ii],score[ii])
model.summary()
y_pred = np.argmax(model.predict(X_test), axis=1)
print(confusion_matrix(y_test,y_pred))
```
## Using a pre-trained network
There are many examples of pre-trained NN that can be accessed [here](https://www.tensorflow.org/api_docs/python/tf/keras/applications).
These NN are very large, having been trained on giant computers using massive datasets.
It can be very useful to initialize a NN using one of these. This is called [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning).
We'll use a NN that was pretrained for image recognition. This NN was trained on the [ImageNet](http://www.image-net.org/) project, which contains > 14 million images belonging to > 20,000 classes (synsets).
```
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications import vgg16
vgg_model = tf.keras.applications.VGG16(weights='imagenet',include_top=True)
vgg_model.summary()
img_path = 'images/scout1.jpeg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = vgg16.preprocess_input(x)
preds = vgg_model.predict(x)
print('Predicted:', vgg16.decode_predictions(preds, top=5)[0])
```
**Exercise:** Repeat the above steps for an image of your own.
**Exercise:** There are several [other pre-trained networks in Keras](https://github.com/keras-team/keras-applications). Try these!
```
# your code here
```
## Some NN topics that we didn't discuss
+ Recurrent neural networks (RNN) for time series
+ How NN can be used for unsupervised learning problems and [Reinforcement learning problems](https://en.wikipedia.org/wiki/Reinforcement_learning)
+ Special layers in NN for image processing
+ Using Tensorflow on a GPU
+ ...
## CPU vs. GPU
[CPUs (Central processing units)](https://en.wikipedia.org/wiki/Central_processing_unit) have just a few cores. The number of processes that a CPU can do in parallel is limited. However, each cores is very fast and is good for sequential tasks.
[GPUs (Graphics processing units)](https://en.wikipedia.org/wiki/Graphics_processing_unit) have thousands of cores, so can do many processes in parallel. GPU cores are typically slower and are more limited than CPU cores. However, for the right kind of computations (think matrix multiplication), GPUs are very fast. GPUs also have their own memory and caching systems, which further improves the speed of some computations, but also makes GPUs more difficult to program. (You have to use something like [CUDA](https://en.wikipedia.org/wiki/CUDA)).
TensorFlow can use GPUs to significantly speed up the training NN. See the programmer's guide [here](https://www.tensorflow.org/programmers_guide/using_gpu).
| github_jupyter |
# Red Giant Mode fitting
Fitting $nstars$ RG stars chosen at random using Vrard model.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pystan
import random
#import output data
nstars = 5
IDs = []
stardat = pd.read_csv('RGdata/output_1000stars.csv', delim_whitespace=False, header=0, usecols=range(1,4))
for i in range(nstars):
IDs.append(random.choice(stardat['ID']))
modes = {} # dictionary with frequencies and errors
for i in IDs:
modes[str(i)] = pd.read_csv('RGdata/modes_'+str(i)+'.csv', delim_whitespace=False, header=0, usecols=[0,7])
modes[str(i)] = modes[str(i)].sort_values(by=['f0'])
modes[str(i)] = modes[str(i)].set_index(np.arange(0,len(modes[str(i)]),1))
modes[str(i)]['dnu'] = modes[str(i)].f0.diff(2).shift(-1)/2
dnu_avg = np.mean(modes[str(i)].dnu)
n_min = int(modes[str(i)].f0.min() / dnu_avg)
n_obs = np.arange(n_min, n_min+len(modes[str(i)].f0), 1)
modes[str(i)]['n'] = n_obs
stardat= stardat.loc[stardat['ID'].isin(IDs)]
```
To find a ballpark figure before defining priors, will use model:
$$\nu(n)=(n+\epsilon)\langle\Delta\nu\rangle+k(n_{max}-n)^2+\mathcal{A}e^{-n/\tau}sin(nw+\phi)$$
where $n_{max}= \nu_{max}/\Delta\nu - \epsilon$
```
def echelle(stardat, modes, ID, model=False, stanfit=[], stan_dnu=0):
numax_obs = float(stardat.loc[stardat['ID'] == ID].Numax)
numax_obs_err = float(stardat.loc[stardat['ID'] == ID].Numax_err)
#dnu_obs = float(stardat.loc[stardat.ID == IDs[i]].Dnu_median_all)
#dnu_obs = np.mean(np.diff(modes[str(IDs[i])].f0))
dnu_obs = np.mean(modes[str(ID)].f0.diff(2).shift(-1)/2)
# Create dataframes for frequencies and calculate orders n
l0modes = pd.DataFrame([modes[str(ID)].f0, modes[str(ID)].f0_err])
l0modes = l0modes.T
l0modes = l0modes.sort_values('f0', ascending=True)
n_min = int(l0modes.f0.min() / dnu_obs)
n_obs = np.arange(n_min, n_min+len(l0modes.f0), 1)
l0modes['n'] = n_obs
l0modes = l0modes.set_index(np.arange(0, len(l0modes.f0), 1))
plt.scatter(l0modes.f0 % dnu_obs, l0modes.f0, label = str(ID))
if model:
label = 'Stan Fit '+str(ID)
plt.plot(stanfit % stan_dnu, stanfit, label = label)
plt.xlabel(r'Frequency modulo ($\mu Hz$)')
plt.ylabel(r'Frequency ($\mu Hz$)')
plt.legend()
def model(n, dnu, nmax, epsilon, alpha, A, G, phi):
freqs = (n + epsilon + alpha/2 * (nmax - n)**2 + A*G/(2*np.pi) * np.sin((2*np.pi*(n-nmax))/G + phi))*dnu #* np.exp(-n/tau);
return freqs
for i in IDs:
#values from Vrard
dnu_avg = np.mean(modes[str(i)].dnu)
n = modes[str(i)].n
#epsilon = 0.601 + 0.632*np.log(dnu_avg)
epsilon = np.median((modes[str(i)].f0 % dnu_avg) / dnu_avg)
numax_obs = float(stardat.loc[stardat['ID'] == i].Numax)
nmax = numax_obs/dnu_avg - epsilon
alpha = 0.015*dnu_avg**(-0.32)
A = 0.06*dnu_avg**(-0.88)
G = 3.08
#tau = 8
phi = 1.71
f = model(n, dnu_avg, nmax, epsilon, alpha, A, G, phi)
echelle(stardat, modes, i, True, f, dnu_avg)
'''plt.scatter(l0modes.f0 % dnu_obs, l0modes.f0, label = 'Data')
plt.plot(f % dnu_obs, f, label = 'Model')
plt.xlabel(r'Frequency modulo ($\mu Hz$)')
plt.ylabel(r'Frequency ($\mu Hz$)')
#mod_err = (l0modes.f0 % dnu_obs)*np.sqrt((dnu_obs_err/dnu_obs)**2 + (l0modes.f0_err/l0modes.f0)**2)
plt.errorbar(l0modes.f0 % dnu_obs, l0modes.f0, yerr = l0modes.f0_err, xerr = mod_err, ecolor = 'r', ls='none', label = 'Error')
plt.legend()'''
code = '''
data {
int N;
real n[N];
real freq[N];
real freq_err[N];
real dnu_guess;
}
parameters {
real dnu;
real nmax;
real epsilon;
real alpha;
real<lower = 0> A;
real<lower = 0> G;
real<lower = -2.0*pi(), upper = 2.0*pi()> phi;
//real<lower = 0> tau;
}
model {
real mod[N];
for (i in 1:N){
mod[i] = (n[i] + epsilon + (alpha/2) * (nmax - n[i])^2 +
A*G/(2*pi()) * sin((2*pi()*(n[i]-nmax))/G + phi))*dnu;
}
mod ~ normal(freq, freq_err);
dnu ~ normal(dnu_guess, dnu_guess*0.001);
epsilon ~ normal(0.601 + 0.632*log(dnu), 0.5);
epsilon ~ uniform(-1.0, 2.0);
nmax ~ normal(10, 4);
alpha ~ lognormal(log(0.015*dnu^(-0.32)), 0.3);
A ~ lognormal(log(0.06*dnu^(-0.88)), 0.4);
G ~ normal(3.08, 0.8);
// tau ~ normal(50, 10);
}
generated quantities{
real fm[N];
for (j in 1:N){
fm[j] = (n[j] + epsilon + (alpha/2) * (nmax - n[j])^2 +
A*G/(2*pi()) * sin((2*pi()*(n[j]-nmax))/G + phi))*dnu;
}
}
'''
sm = pystan.StanModel(model_code=code)
fits = {}
for i in IDs:
dat_star = stardat.loc[stardat['ID'] == i]
df_star = modes[str(i)]
df_star = df_star.sort_values(by=['f0'])
dnu_obs = np.mean(df_star.dnu)
numax_obs = float(dat_star.loc[dat_star['ID'] == i].Numax)
df_star = df_star.set_index(np.arange(0, len(df_star.f0), 1))
nmax_guess = np.mean(df_star.n)
epsilon_obs = np.median((df_star.f0 % dnu_obs) / dnu_obs)
#epsilon_obs = 0.601 + 0.632*np.log(dnu_obs)
alpha_obs = 0.015*dnu_obs**(-0.32)
A_obs = 0.06*dnu_obs**(-0.88)
data = {'N': len(df_star.f0), 'n': df_star.n.values, 'freq': df_star.f0.values,
'freq_err': df_star.f0_err.values, 'dnu_guess': dnu_obs}
start = {'dnu': dnu_obs, 'epsilon': epsilon_obs,
'nmax': numax_obs/dnu_obs - epsilon_obs, 'alpha': alpha_obs, 'A': A_obs,
'G': 3, 'phi': -1.6}
nchains=4
fit = sm.sampling(data=data, iter=5000, chains=nchains, init=[start for n in range(nchains)])
fits[str(i)] = fit
output = pd.DataFrame({'dnu': fit['dnu'], 'nmax': fit['nmax'], 'epsilon': fit['epsilon'],
'alpha': fit['alpha'], 'A': fit['A'], 'G': fit['G'], 'phi': fit['phi']})
#output.to_csv('samples_' + str(i) + '.csv')
for i in IDs:
print('ID = ' + str(i))
print(fits[str(i)])
for i in IDs:
stanfit = model(modes[str(i)].n, fits[str(i)]['dnu'].mean(), fits[str(i)]['nmax'].mean(), fits[str(i)]['epsilon'].mean(), fits[str(i)]['alpha'].mean(), fits[str(i)]['A'].mean(), fits[str(i)]['G'].mean(), fits[str(i)]['phi'].mean())
stan_dnu = fits[str(i)]['dnu'].mean()
echelle(stardat, modes, i, True, stanfit, stan_dnu)
import corner
data = np.vstack([fit['dnu'], fit['nmax'], fit['epsilon'], fit['alpha'], fit['A'], fit['G'], fit['phi']]).T
corner.corner(data, labels=['dnu', 'nmax', 'epsilon', 'alpha', 'A', 'G', 'phi'])
plt.show()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/us_ned_physio_diversity.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_ned_physio_diversity.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Terrain/us_ned_physio_diversity.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_ned_physio_diversity.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity')
physiographicDiversity = dataset.select('b1')
physiographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-94.625, 39.825, 7)
Map.addLayer(
physiographicDiversity, physiographicDiversityVis,
'Physiographic Diversity')
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
```
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("../..")
import gradient_analyze as ga
import hp_file
filename = './results.pickle'
with open(filename, "rb") as file:
results = pickle.load(file)
hess_exact = np.array([[ 0.794, 0.055, 0.109, -0.145, 0. ],
[ 0.055, 0.794, -0.042, 0.056, -0. ],
[ 0.109, -0.042, 0.794, 0.11 , 0. ],
[-0.145, 0.056, 0.11 , 0.794, 0. ],
[ 0. , -0. , 0. , 0. , -0. ]])
corr = lambda x: np.array(2 * x, dtype="float64") - np.diag(np.diag(np.array(x, dtype="float64")))
ga.calculate_new_quantity(['hess_ps'], 'hess_ps_corr', corr, results, hp_file)
ga.calculate_new_quantity(['hess_fd'], 'hess_fd_corr', corr, results, hp_file)
f = lambda x: np.sum((x - hess_exact) ** 2)
ga.calculate_new_quantity(['hess_ps_corr'], 'hess_ps_err', f, results, hp_file)
ga.calculate_new_quantity(['hess_fd_corr'], 'hess_fd_err', f, results, hp_file)
results_processed = ga.avg_quantities(['hess_ps_err', 'hess_fd_err'], results, hp_file)
results_processed_accessed = ga.access_quantities(['hess_ps_err', 'hess_fd_err'], results, hp_file)
n_shots = [10, 20, 41, 84, 119, 242, 492, 1000, 2031,
4125, 8192, 11938, 24245, 49239, 100000]
n_shots = n_shots[7]
n_shots
cols = plt.rcParams['axes.prop_cycle'].by_key()['color']
results_slice = ga.calculate_slice({"n_shots": n_shots}, results_processed)
results_slice_acc = ga.calculate_slice({"n_shots": n_shots}, results_processed_accessed)
x, y_fd = ga.make_numpy(results_slice, "h", "hess_fd_err")
x, y_ps = ga.make_numpy(results_slice, "h", "hess_ps_err")
stds_fd = []
stds_ps = []
for h in x:
errors = list(ga.calculate_slice({"h": h}, results_slice_acc).values())[0]
errors_fd = errors["hess_fd_err"]
errors_ps = errors["hess_ps_err"]
stds_fd.append(np.std(errors_fd))
stds_ps.append(np.std(errors_ps))
stds_fd = np.array(stds_fd)
stds_ps = np.array(stds_ps)
plt.fill_between(x, y_fd - stds_fd, y_fd + stds_fd, color=cols[0], alpha=0.2)
plt.fill_between(x, y_ps - stds_ps, y_ps + stds_ps, color=cols[1], alpha=0.2)
plt.plot(x, y_fd, label="finite-difference", c=cols[0])
plt.plot(x, y_ps, label="parameter-shift", c=cols[1])
# plt.axvline(np.pi / 2, c="black", alpha=0.4, linestyle=":")
plt.xlabel('step size', fontsize=20)
plt.ylabel('MSE', fontsize=20)
plt.xscale("log")
plt.tick_params(labelsize=15)
plt.legend()
# plt.savefig("tradeoff_1.pdf")
plt.yscale("log")
# plt.ylim(10**-5.25, 10**(-0.95))
plt.tick_params(labelsize=15)
plt.legend(fontsize=12)
# plt.title("(A)", loc="left", fontsize=15)
plt.tight_layout()
plt.savefig("ps_vs_fd_hess.pdf")
max_point = 8
y_fit_low = np.log(y_fd[:max_point])
x_fit_low = np.log(x[:max_point])
p = np.polyfit(x_fit_low, y_fit_low, 1)
print(p[0])
y_fit_low = p[0] * np.log(x) + p[1]
y_fit_low = np.exp(y_fit_low)
min_point = 40
max_point = 50
y_fit_high_ = np.log(y_fd[min_point:max_point])
x_fit_high_ = np.log(x[min_point:max_point])
ppp = np.polyfit(x_fit_high_, y_fit_high_, 1)
print(ppp[0])
y_fit_high_ = ppp[0] * np.log(x) + ppp[1]
y_fit_high_ = np.exp(y_fit_high_)
min_point = 80
max_point = 99
y_fit_high = np.log(y_fd[min_point:max_point])
x_fit_high = np.log(x[min_point:max_point])
pp = np.polyfit(x_fit_high, y_fit_high, 1)
print(pp[0])
y_fit_high = pp[0] * np.log(x) + pp[1]
y_fit_high = np.exp(y_fit_high)
plt.plot(x, y_fd, '--bo', label="Finite difference")
plt.plot(x, y_fit_low, label="Power law fit with p={:.4f}".format(p[0]))
# plt.plot(x, y_fit_high, label="Power law fit with p={:.4f}".format(pp[0]))
plt.plot(x, y_fit_high_, label="Power law fit with p={:.4f}".format(ppp[0]))
plt.xlabel('Finite difference step size', fontsize=20)
plt.ylabel('Mean squared error', fontsize=20)
plt.xscale("log")
plt.tick_params(labelsize=15)
plt.legend()
plt.tight_layout()
plt.savefig("tradeoff_1.pdf")
plt.yscale("log")
plt.ylim(10**-4, 0)
n_shots_list = [10, 20, 41, 84, 119, 242, 492, 1000, 2031,
4125, 8192, 11938, 24245, 49239, 100000]
errs = []
err_fds = []
errs_vars = []
for n_shots in n_shots_list:
results_slice = ga.calculate_slice({"n_shots": n_shots}, results_processed)
results_slice_acc = ga.calculate_slice({"n_shots": n_shots}, results_processed_accessed)
x, y_fd = ga.make_numpy(results_slice, "h", "hess_fd_err")
x, y_ps = ga.make_numpy(results_slice, "h", "hess_ps_err")
opt_arg = np.argmin(np.abs(x - np.pi / 2))
opt_x = x[opt_arg]
err = np.min(y_ps)
opt_x = x[np.argmin(y_ps)]
results_slice_acc_h = ga.calculate_slice({"h": opt_x}, results_slice_acc)
results_slice_acc_h = list(results_slice_acc_h.values())[0]["hess_ps_err"]
errs.append(err)
err_fd = np.min(y_fd)
err_fds.append(err_fd)
errs_vars.append(np.std(results_slice_acc_h))
errs = np.array(errs)
errs_vars = np.array(errs_vars)
min_point = 0
max_point = -1
y_fit_high = np.log(errs[min_point:max_point])
x_fit_high = np.log(n_shots_list[min_point:max_point])
pp = np.polyfit(x_fit_high, y_fit_high, 1)
print(pp[0])
y_fit_high = pp[0] * np.log(x) + pp[1]
y_fit_high = np.exp(y_fit_high)
# plt.fill_between(n_shots_list, errs - errs_vars, errs + errs_vars, color=cols[0], alpha=0.2)
plt.plot(n_shots_list, err_fds, label="finite-difference")
plt.plot(n_shots_list, errs, label="paramter-shift")
plt.xlabel('N', fontsize=20)
plt.ylabel('MSE', fontsize=20)
plt.tick_params(labelsize=15)
plt.tight_layout()
plt.yscale("log")
plt.xscale("log")
plt.tick_params(labelsize=15)
plt.legend(fontsize=12)
# plt.title("(B)", loc="left", fontsize=15)
plt.tight_layout()
plt.savefig("ps_vs_fd_N.pdf")
# # plt.ylim(10**-5.25, 10**(-0.95))
min_point = 0
max_point = -1
y_fit_high = np.log(errs[min_point:max_point])
x_fit_high = np.log(n_shots_list[min_point:max_point])
pp = np.polyfit(x_fit_high, y_fit_high, 1)
print(pp[0])
y_fit_high = pp[0] * np.log(x) + pp[1]
y_fit_high = np.exp(y_fit_high)
min_point = 10
max_point = -1
y_fit_high = np.log(err_fds[min_point:max_point])
x_fit_high = np.log(n_shots_list[min_point:max_point])
pp = np.polyfit(x_fit_high, y_fit_high, 1)
print(pp[0])
y_fit_high = pp[0] * np.log(x) + pp[1]
y_fit_high = np.exp(y_fit_high)
```
| github_jupyter |
# ML Pipeline Preparation
Follow the instructions below to help you create your ML pipeline.
### 1. Import libraries and load data from database.
- Import Python libraries
- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
- Define feature and target variables X and Y
```
# import libraries
import matplotlib.pyplot as plt
import nltk
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import load, dump
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import LinearSVC
from sqlalchemy import create_engine
nltk.download("punkt")
nltk.download("wordnet")
nltk.download('averaged_perceptron_tagger')
# load data from database
database_filepath = "../disaster_response/data/DisasterResponse.db"
engine = create_engine(f"sqlite:///{database_filepath}")
num_of_feature_cols = 4
df = pd.read_sql_table("DisasterResponse", engine)
X = df["message"]
Y = df[df.columns[num_of_feature_cols:]]
classes_names = Y.columns.tolist()
```
### 2. Write a tokenization function to process your text data
```
def tokenize(text):
lemmatizer = WordNetLemmatizer()
tokens = word_tokenize(text)
clean_tokens = [lemmatizer.lemmatize(token.lower()) for token in tokens]
return clean_tokens
```
### 3. Build a machine learning pipeline
This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
```
pipeline = Pipeline([
("c_vect", CountVectorizer(tokenizer=tokenize, ngram_range=(1, 2), max_df=0.95)),
("tfidf", TfidfTransformer(use_idf=True, smooth_idf=True)),
("clf", MultiOutputClassifier(RandomForestClassifier(verbose=1, n_jobs=6))),
])
```
### 4. Train pipeline
- Split data into train and test sets
- Train pipeline
```
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
pipeline.fit(X_train, Y_train)
```
### 5. Test your model
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
```
def extract_macro_avg(report):
for item in report.split("\n"):
if "macro avg" in item:
return float(item.strip().split()[4])
def show_scores(predicted_values):
macro_avg_list = []
for i in range(1, len(classes_names)):
report = classification_report(Y_test.iloc[:, i].values, Y_pred[:, i], zero_division=1)
macro_avg_list.append(extract_macro_avg(report))
print("Category:", classes_names[i], "\n", report)
overall_avg_score = sum(macro_avg_list) / len(macro_avg_list)
print(f"Overral average score: {overall_avg_score:.3}")
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
Y_pred = model.predict(X_test)
show_scores(Y_pred)
```
### 6. Improve your model
Use grid search to find better parameters.
```
parameters = {
"c_vect__ngram_range": ((1, 1), (1, 2)),
"c_vect__max_df": (0.75, 0.90, 1.0),
"c_vect__max_features": (5000, 10000),
"tfidf__use_idf": (True, False),
"tfidf__smooth_idf": (True, False),
"tfidf__sublinear_tf": (True, False),
"clf__estimator__n_estimators": [300],
}
cv = GridSearchCV(pipeline, parameters)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
### BE CAREFUL! This may take a very very long time to fit ###
#cv.fit(X_train, Y_train)
```
### 7. Test your model
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
```
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
model = load("../disaster_response/models/model.pkl")
Y_pred = model.predict(X_test)
show_scores(Y_pred)
```
### 8. Try improving your model further. Here are a few ideas:
* try other machine learning algorithms
* add other features besides the TF-IDF
```
from sklearn.base import BaseEstimator, TransformerMixin
class PosCounter(BaseEstimator, TransformerMixin):
def pos_tagger(self, text):
tokenized = tokenize(text)
tagged = nltk.pos_tag(tokenized)
part_of_speech_list = []
for word_tag_pair in tagged:
tag = word_tag_pair[1]
if tag[0] == "V":
part_of_speech_list.append("verb")
elif tag[0] == "N":
part_of_speech_list.append("noun")
elif tag[0] == "J":
part_of_speech_list.append("adjective")
return part_of_speech_list
def count_pos(self, l, pos):
return l.count(pos)
def fit(self, X, y=None):
return self
def transform(self, X):
X = pd.DataFrame(X.apply(self.pos_tagger))
for pos in ("verb", "noun", "adjective"):
X[pos] = X["message"].apply(lambda l: self.count_pos(l, pos))
del X['message']
return X
pc = PosCounter()
pc.transform(X)
pipeline = Pipeline([
("features", FeatureUnion([
("nlp_pipeline", Pipeline([
("c_vect", CountVectorizer(tokenizer=tokenize, ngram_range=(1, 2), max_df=0.95)),
("tfidf", TfidfTransformer(use_idf=True, smooth_idf=True)),
])),
("pos_counter", PosCounter()),
])),
("clf", MultiOutputClassifier(LinearSVC(verbose=2, max_iter=100000))),
])
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
pipeline.fit(X_train, Y_train)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
Y_pred = pipeline.predict(X_test)
show_scores(Y_pred)
```
### 9. Export your model as a pickle file
### 10. Use this notebook to complete `train.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| github_jupyter |
# Dowloading data
We'll use a shell command to download the zipped data, unzip it into are working directory (folder).
```
!wget "https://docs.google.com/uc?export=download&id=1h3YjfecYS8vJ4yXKE3oBwg3Am64kN4-x" -O temp.zip && unzip -o temp.zip && rm temp.zip
```
# Importing and Cleaning the Data
```
import pandas as pd # aliasing for convenience
```
## Importing data one file at a time
### Importing 2015 data
```
df = pd.read_csv('happiness_report/2015.csv') # loading the data to a variable called "df"
df.head(3) # looking at the first 3 rows
df.tail(2) # looking at the last 2 rows
```
#### adding a year column
To add a column we can use the syntax:
`df['new_col_name'] = values`
**note**: if there was a column with the same name, it would be overwritten
```
df['year'] = 2015 # adding a column
df
```
### Importing 2016 data
```
df_2016 = pd.read_csv('happiness_report/2016.csv')
df_2016['year'] = 2016
```
### merging (stacking vertically) the two dataframes
**note** if a column exists in one dataframe but not in the other, the values for the latter will be set to NaN (empty value)
```
list_of_df_to_merge = [df, df_2016]
df_merged = pd.concat(list_of_df_to_merge)
df_merged
```
## Interaction with the filesystem
```
# python library for OperatingSystem interaction
import os
# list of files under the speficied folder
os.listdir('happiness_report')
# getting the full path given the folder and file
os.path.join('happiness_report','2019.csv')
```
## Loading and combining data from all files
We will:
- initialise an empty list of dataframes
- loop over the content of the `happiness_report` folder
- get the filepath from the filename and folder name
- load the data from the filepath
- add a column to the dataframe so we can keep track of which file the data belongs to
- add the dataframe to the list
- merge all the dataframes (vertically)
```
fld_name = 'happiness_report'
df_list = []
for filename in os.listdir(fld_name):
filepath = os.path.join(fld_name, filename)
df = pd.read_csv(filepath)
print(filename, ':', df.columns) # printing the column name for the file
df['filename'] = filename
df_list.append(df)
df_merged = pd.concat(df_list)
```
## Data cleaning
Because of inconsistency over the years of reporting, we need to do some data cleaning:
- we want a `year` column which we can get from the filename
- there are different naming for the Happiness score over the years: `Happiness Score`, `Happiness.Score`, `Score`. We want to unify them into one column.
- the country column has the same issue: `Country`, `Country or region`
```
# `filename` column is a text (string) column, so we can use string methods to edit it
column_of_string_pairs = df_merged['filename'].str.split('.') # '2015.csv' is now ['2015', 'csv']
# selecting only the fist element for each list
column_year_string = column_of_string_pairs.str[0] # ['2015', 'csv'] is now '2015'
# converting the string to an integer (number)
column_of_years = (column_year_string).astype(int) # '2015' (string) is now 2015 (number)
df_merged['year'] = column_of_years
```
To fix the issue of change in naming, we can use:
`colA.fillna(colB)`
which checks if there are any empty valus in `colA` and fills them with the values in `colB` for the same row.
```
# checks if there are any empty valus in colA and fills them with the values in colB for the same row
df_merged['Happiness Score'] = df_merged['Happiness Score'].fillna(df_merged['Happiness.Score']).fillna(df_merged['Score'])
df_merged['Country'] = df_merged['Country or region'].fillna(df_merged['Country'])
```
## Data Reshaping and Plotting
### Trends of Happiness and Generosity over the years
We'll:
- select only the columns we care about
- group the data by `year` and take the mean
- plot the Happiness and Generosity (in separate plots)
```
df_subset = df_merged[['year', 'Happiness Score', 'Generosity']]
mean_by_year = df_subset.groupby('year').mean()
mean_by_year
mean_by_year.plot(subplots=True, grid=True)
# `subplots=True` will plot the two columns in two separate charts
# `grid=True` will add the axis grid in the background
```
### Average Generosity and Happiness by year AND Country
We'll:
- select only the columns we care about
- group the data by `Country` and `year`
- take the mean
```
df = df_merged[['year', 'Happiness Score', 'Generosity', 'Country']]
mean_by_country_and_year = df.groupby(['Country', 'year']).mean()
mean_by_country_and_year
```
#### Finding the countries and years with highest and lowest Happiness
```
mean_by_country_and_year['Happiness Score'].idxmax() # highest
mean_by_country_and_year['Happiness Score'].idxmin() # lowest
```
#### Happiness by Country and Year
```
happiness_column = mean_by_country_and_year['Happiness Score']
# turning the single column with 2d-index into a table by moving the inner index to columns
happiness_table = happiness_column.unstack()
happiness_table
# for each year, plotting the values in each country
happiness_table.plot(figsize=(20,5),grid=True)
```
# (FYI) Interactive Chart
You can also create interactive charts by using a different library (bokeh).
for more examples: https://colab.research.google.com/notebooks/charts.ipynb
```
uk_happiness = happiness_column['United Kingdom']
from bokeh.plotting import figure, output_notebook, show
output_notebook()
x = uk_happiness.index
y = uk_happiness.values
fig = figure(title="UK Happiness", x_axis_label='x', y_axis_label='y')
fig.line(x, y, legend_label="UK", line_width=2)
show(fig)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
lcs_teams = pd.read_csv('../datasets/summer-playoffs/lcs/LCS 2020 Summer Playoffs - Team Stats - OraclesElixir.csv')
lck_teams = pd.read_csv('../datasets/summer-playoffs/lck/LCK 2020 Summer Playoffs - Team Stats - OraclesElixir.csv')
lec_teams = pd.read_csv('../datasets/summer-playoffs/lec/LEC 2020 Summer Playoffs - Team Stats - OraclesElixir.csv')
lpl_teams = pd.read_csv('../datasets/summer-playoffs/lpl/LPL 2020 Summer Playoffs - Team Stats - OraclesElixir.csv')
summer_teams = pd.concat([lcs_teams,lck_teams,lec_teams,lpl_teams], axis=0, ignore_index=True)
summer_teams
#checking missing values
summer_teams.isnull().sum()
#Replacing missing values with '0%'
summer_teams.fillna('0%', inplace=True)
#split the dataframe by categorical and numerical variables
st_cat = summer_teams.select_dtypes(object)
st_num = summer_teams.select_dtypes(np.number)
print(st_num.head())
print(st_cat.head())
winners_teams = ['Team SoloMid','G2 Esports','Gen.G','LGD Gaming'] #winners from respective region
winners = lambda x: 1 if x in winners_teams else 0
st_cat['WINNER'] = st_cat['Team'].apply(winners) #create label
st_cat
st_cat = st_cat.drop('Team', axis=1)
st_cat
y = st_cat['WINNER'] #the label we want to predict
st_cat = st_cat.drop('WINNER', axis=1)
#remove '%' symbol
def transform_percent(df):
for c in range(len(df.columns)):
for l in range(df.shape[0]):
df.iloc[:,c][l] = df.iloc[:,c][l][:-1]
return df
transform_percent(st_cat)
st_cat
#transform str features into float
st_cat = st_cat.astype('float64')
st_f = pd.concat([st_cat,st_num],axis=1)
st_f
st_f = st_f.drop(['GP','W','L','K','D'], axis=1)
st_f
#plotting correlation
plt.subplots(figsize=(15,10))
sns.heatmap(st_f.corr(),annot = True,cmap='coolwarm');
#droping high correlated features
st_f = st_f.drop(['EGR','GD15'], axis=1)
X = st_f
smt = SMOTE(k_neighbors=3)
X, y = smt.fit_sample(X, y)
X_train, X_val, y_train ,y_val = train_test_split(X, y, test_size = 0.2, random_state = 42)
X_train.head()
print(X_train.shape)
print(X_val.shape)
print(y_train.shape)
print(y_val.shape)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.fit_transform(X_val)
LR = LogisticRegression()
DT = DecisionTreeClassifier(random_state=42)
RF = RandomForestClassifier(n_estimators=100, random_state=42)
LR_fit = LR.fit(X_train, y_train)
DT_fit = DT.fit(X_train, y_train)
RF_fit = RF.fit(X_train, y_train)
LR_pred = LR_fit.predict(X_val)
DT_pred = DT_fit.predict(X_val)
RF_pred = RF_fit.predict(X_val)
print("Logistic Regression accuracy: %f " % (accuracy_score(LR_pred, y_val)*100))
print("Decision Tree accuracy: %f " % (accuracy_score(DT_pred, y_val)*100))
print("Random Forest accuracy: %f " % (accuracy_score(RF_pred, y_val)*100))
print(classification_report(RF_pred, y_val))
worlds_teams = pd.read_csv('datasets/worlds/Worlds 2020 Play-In - Team Stats - OraclesElixir.csv')
worlds_teams
wt = worlds_teams.drop(['Team','GP','W','L','K','D','EGR','GD15'], axis=1)
wt.isnull().sum()
wt.fillna('0%', inplace=True)
wt_cat = wt.select_dtypes(object)
wt_num = wt.select_dtypes(np.number)
wt_cat
transform_percent(wt_cat)
wt_cat = wt_cat.astype('float64')
wt_f = pd.concat([wt_cat,wt_num],axis=1)
wt_f
X_test = scaler.fit_transform(wt_f)
RF_predw = RF_fit.predict(X_test)
pred = pd.concat([worlds_teams['Team'],pd.DataFrame(RF_predw)],axis=1)
pred
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, roc_curve
from mlxtend.plotting import plot_decision_regions
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
import warnings
import numpy as np
from collections import OrderedDict
from lob_data_utils import lob, db_result, overview
from lob_data_utils.svm_calculation import lob_svm
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
data_length = 10000
stock = '11869'
df, df_cv, df_test = lob.load_prepared_data(
stock, data_dir='../queue_imbalance/data/prepared', cv=True, length=data_length)
```
## Logistic
```
log_clf = lob.logistic_regression(df, 0, len(df))
pred_train = log_clf.predict(df['queue_imbalance'].values.reshape(-1, 1))
pred_test = log_clf.predict(df_test['queue_imbalance'].values.reshape(-1, 1))
df['pred_log'] = pred_train
df_test['pred_log'] = pred_test
lob.plot_roc(df, log_clf, stock=int(stock), label='train')
lob.plot_roc(df_test, log_clf, stock=int(stock), label='test')
plt.figure(figsize=(16,2))
plt.scatter(df['queue_imbalance'], np.zeros(len(df)), c=df['mid_price_indicator'])
print(len(df[df['mid_price_indicator'] ==1]), len(df))
lob.plot_learning_curve(log_clf, df['queue_imbalance'].values.reshape(-1, 1), df['mid_price_indicator'])
```
### Let's look inside
```
df_test[df_test['pred_log'] != df_test['mid_price_indicator']][['pred_log', 'mid_price_indicator']].plot(kind='kde')
print(len(df_test[df_test['pred_log'] != df_test['mid_price_indicator']]), len(df_test))
df_test[df_test['pred_log'] != df_test['mid_price_indicator']][['pred_log', 'mid_price_indicator', 'queue_imbalance']].head()
pivot = min(df[df['pred_log'] == 1]['queue_imbalance'])
pivot
print('Amount of positive samples below the pivot and negative above the pivot for training data:')
print(len(df[df['queue_imbalance'] < pivot][df['pred_log'] == 1]),
len(df[df['queue_imbalance'] >= pivot][df['pred_log'] == 0]))
print('Amount of positive samples below the pivot and negative above the pivot for testing data:')
print(len(df_test[df_test['queue_imbalance'] < pivot][df_test['pred_log'] == 1]),
len(df_test[df_test['queue_imbalance'] >= pivot][df_test['pred_log'] == 0]))
```
So this classifier just finds a pivot. But why this particular one is choosen? Let's check what amount of data is below and above the pivot.
```
len(df[df['queue_imbalance'] < pivot]), len(df[df['queue_imbalance'] >= pivot])
df[df['queue_imbalance'] < pivot]['queue_imbalance'].plot(kind='kde')
df[df['queue_imbalance'] >= pivot]['queue_imbalance'].plot(kind='kde')
df['queue_imbalance'].plot(kind='kde')
```
## SVM
```
overview_data = overview.Overview(stock, data_length)
gammas = [0.0005, 0.005, 1, 5, 50, 500, 5000]
cs = [0.0005, 0.005, 1, 5.0, 50, 500, 1000]
coef0s = [0, 0.0005, 0.005, 1, 5, 50, 500, 5000]
df_svm_res = overview_data.write_svm_results(df, df_cv, gammas=gammas, cs=cs, coef0s=coef0s)
unnamed_columns = [c for c in df_svm_res.columns if 'Unnamed' in c]
df_svm_res.drop(columns=unnamed_columns, inplace=True)
df_svm_res.sort_values(by='roc_cv_score').head()
df_svm_res[df_svm_res['svm'] == 'linear'].sort_values(by='roc_cv_score', ascending=False).head()
df_svm_res.sort_values(by='roc_cv_score', ascending=False).head()
```
## Different kernels visualization
```
X = df[['queue_imbalance']].values
y = df['mid_price_indicator'].values.astype(np.integer)
clf = SVC(kernel='sigmoid', C=0.005, gamma=500, coef0=5.0)
clf.fit(df[['queue_imbalance']], df['mid_price_indicator'])
df['pred'] = clf.predict(df[['queue_imbalance']])
plt.figure(figsize=(16,2))
plot_decision_regions(X, y, clf=clf, legend='data')
plt.xlabel('')
plt.xlim(-1, 1)
plt.title('Sigmoid Kernel')
plt.legend()
min(df[df['pred'] == 1]['queue_imbalance']), max(df[df['pred'] == 0]['queue_imbalance']), clf.score(X, y)
X = df[['queue_imbalance']].values
y = df['mid_price_indicator'].values.astype(np.integer)
clf = SVC(kernel='rbf', C=0.005, gamma=50)
clf.fit(df[['queue_imbalance']], df['mid_price_indicator'])
df['pred'] = clf.predict(df[['queue_imbalance']])
plt.figure(figsize=(16,2))
plot_decision_regions(X, y, clf=clf, legend='data')
plt.xlim(-1, 1)
plt.xlabel('')
plt.title('Rbf')
plt.legend()
min(df[df['pred'] == 1]['queue_imbalance']), max(df[df['pred'] == 0]['queue_imbalance']), clf.score(X, y)
X = df[['queue_imbalance']].values
y = df['mid_price_indicator'].values.astype(np.integer)
clf = SVC(kernel='linear', C=0.005)
clf.fit(df[['queue_imbalance']], df['mid_price_indicator'])
df['pred'] = clf.predict(df[['queue_imbalance']])
plt.figure(figsize=(16,2))
plot_decision_regions(X, y, clf=clf, legend='data')
plt.xlabel('')
plt.xlim(-1, 1)
plt.title('Linear')
plt.legend()
min(df[df['pred'] == 1]['queue_imbalance']), max(df[df['pred'] == 0]['queue_imbalance']), clf.score(X, y)
## Some plotly visualizations
import plotly.offline as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly import tools
from itertools import product
py.init_notebook_mode(connected=True)
titles=['s']
clf1 = SVC(kernel='rbf')
clf1.fit(X, y)
fig = tools.make_subplots(rows=1, cols=1,
print_grid=False,
subplot_titles=titles)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
y_ = np.arange(y_min, y_max, 0.1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
trace1 = go.Contour(x=xx[0], y=y_,
z=Z,
colorscale=[[0, 'purple'],
[0.5, 'cyan'],
[1, 'pink']],
opacity=0.5,
showscale=False)
trace2 = go.Scatter(x=X[:, 0], y=X[:, 1],
showlegend=False,
mode='markers',
marker=dict(
color=y,
line=dict(color='black', width=1)))
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 1)
fig['layout'].update(hovermode='closest')
fig['layout'][x].update(showgrid=False, zeroline=False)
#fig['layout'][y].update(showgrid=False, zeroline=False)
py.iplot(fig)
py.init_notebook_mode(connected=True)
from ipywidgets import interact, interactive, fixed, interact_manual, widgets
@interact(C=[1,2,3], gamma=[1,2,3], coef0=[1,2,3])
def _plot_lob(C, gamma, coef0):
py_config = {'displayModeBar': False, 'showLink': False, 'editable': False}
titles=['s']
clf1 = SVC(kernel='rbf', C=C, gamma=gamma)
clf1.fit(X, y)
fig = tools.make_subplots(rows=1, cols=1,
print_grid=False,
subplot_titles=titles)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
y_ = np.arange(y_min, y_max, 0.1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
trace1 = go.Contour(x=xx[0], y=y_,
z=Z,
colorscale=[[0, 'purple'],
[0.5, 'cyan'],
[1, 'pink']],
opacity=0.5,
showscale=False)
trace2 = go.Scatter(x=X[:, 0], y=X[:, 1],
showlegend=False,
mode='markers',
marker=dict(
color=y,
line=dict(color='black', width=1)))
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 1)
fig['layout'].update(hovermode='closest')
fig['layout'][x].update(showgrid=False, zeroline=False)
#fig['layout'][y].update(showgrid=False, zeroline=False)
py.iplot(fig)
```
| github_jupyter |
# TensorFlow Tutorial
Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow:
- Initialize variables
- Start your own session
- Train algorithms
- Implement a Neural Network
Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code.
## <font color='darkblue'>Updates</font>
#### If you were working on the notebook before this update...
* The current notebook is version "v3b".
* You can find your original work saved in the notebook with the previous version name (it may be either TensorFlow Tutorial version 3" or "TensorFlow Tutorial version 3a.)
* To view the file directory, click on the "Coursera" icon in the top left of this notebook.
#### List of updates
* forward_propagation instruction now says 'A1' instead of 'a1' in the formula for Z2;
and are updated to say 'A2' instead of 'Z2' in the formula for Z3.
* create_placeholders instruction refer to the data type "tf.float32" instead of float.
* in the model function, the x axis of the plot now says "iterations (per fives)" instead of iterations(per tens)
* In the linear_function, comments remind students to create the variables in the order suggested by the starter code. The comments are updated to reflect this order.
* The test of the cost function now creates the logits without passing them through a sigmoid function (since the cost function will include the sigmoid in the built-in tensorflow function).
* In the 'model' function, the minibatch_cost is now divided by minibatch_size (instead of num_minibatches).
* Updated print statements and 'expected output that are used to check functions, for easier visual comparison.
## 1 - Exploring the Tensorflow Library
To start, you will import the library:
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
```
Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
$$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$
```
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
```
Writing and running programs in TensorFlow has the following steps:
1. Create Tensors (variables) that are not yet executed/evaluated.
2. Write operations between those Tensors.
3. Initialize your Tensors.
4. Create a Session.
5. Run the Session. This will run the operations you'd written above.
Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.
Now let us look at an easy example. Run the cell below:
```
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
```
As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
```
sess = tf.Session()
print(sess.run(c))
```
Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**.
Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later.
To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.
```
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
```
When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session.
Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.
### 1.1 - Linear function
Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector.
**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):
```python
X = tf.constant(np.random.randn(3,1), name = "X")
```
You might find the following functions helpful:
- tf.matmul(..., ...) to do a matrix multiplication
- tf.add(..., ...) to do an addition
- np.random.randn(...) to initialize randomly
```
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes X to be a random tensor of shape (3,1)
Initializes W to be a random tensor of shape (4,3)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
"""
Note, to ensure that the "random" numbers generated match the expected results,
please create the variables in the order given in the starting code below.
(Do not re-arrange the order).
"""
### START CODE HERE ### (4 lines of code)
X = np.random.randn(3, 1)
W = np.random.randn(4, 3)
b = np.random.randn(4, 1)
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = \n" + str(linear_function()))
```
*** Expected Output ***:
```
result =
[[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
```
### 1.2 - Computing the sigmoid
Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input.
You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session.
** Exercise **: Implement the sigmoid function below. You should use the following:
- `tf.placeholder(tf.float32, name = "...")`
- `tf.sigmoid(...)`
- `sess.run(..., feed_dict = {x: z})`
Note that there are two typical ways to create and use sessions in tensorflow:
**Method 1:**
```python
sess = tf.Session()
# Run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
sess.close() # Close the session
```
**Method 2:**
```python
with tf.Session() as sess:
# run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
# This takes care of closing the session for you :)
```
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32,name = 'x')
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess :
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x: z})
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
```
*** Expected Output ***:
<table>
<tr>
<td>
**sigmoid(0)**
</td>
<td>
0.5
</td>
</tr>
<tr>
<td>
**sigmoid(12)**
</td>
<td>
0.999994
</td>
</tr>
</table>
<font color='blue'>
**To summarize, you how know how to**:
1. Create placeholders
2. Specify the computation graph corresponding to operations you want to compute
3. Create the session
4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values.
### 1.3 - Computing the Cost
You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m:
$$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$
you can do it in one line of code in tensorflow!
**Exercise**: Implement the cross entropy loss. The function you will use is:
- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`
Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes
$$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$
```
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32,name = 'logits')
y = tf.placeholder(tf.float32,name = 'labels')
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={z: logits, y: labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close
### END CODE HERE ###
return cost
logits = np.array([0.2,0.4,0.7,0.9])
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
```
** Expected Output** :
```
cost = [ 0.79813886 0.91301525 0.40318605 0.34115386]
```
### 1.4 - Using One Hot encodings
Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:
<img src="images/onehot.png" style="width:600px;height:150px;">
This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code:
- tf.one_hot(labels, depth, axis)
**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this.
```
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C, name = 'C')
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels,C,axis = 0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = \n" + str(one_hot))
```
**Expected Output**:
```
one_hot =
[[ 0. 0. 0. 1. 0. 0.]
[ 1. 0. 0. 0. 0. 1.]
[ 0. 1. 0. 0. 1. 0.]
[ 0. 0. 1. 0. 0. 0.]]
```
### 1.5 - Initialize with zeros and ones
Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively.
**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones).
- tf.ones(shape)
```
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
```
**Expected Output:**
<table>
<tr>
<td>
**ones**
</td>
<td>
[ 1. 1. 1.]
</td>
</tr>
</table>
# 2 - Building your first neural network in tensorflow
In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:
- Create the computation graph
- Run the graph
Let's delve into the problem you'd like to solve!
### 2.0 - Problem statement: SIGNS Dataset
One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.
- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.
Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.
<img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>
Run the following code to load the dataset.
```
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
```
Change the index below and run the cell to visualize some examples in the dataset.
```
# Example of a picture
index = 78
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
```
As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
```
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one.
**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes.
### 2.1 - Create placeholders
Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session.
**Exercise:** Implement the function below to create the placeholders in tensorflow.
```
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "tf.float32"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "tf.float32"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32,[n_x,None],name = 'X')
Y = tf.placeholder(tf.float32,[n_y,None],name = 'Y')
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
```
**Expected Output**:
<table>
<tr>
<td>
**X**
</td>
<td>
Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)
</td>
</tr>
<tr>
<td>
**Y**
</td>
<td>
Tensor("Placeholder_2:0", shape=(6, ?), dtype=float32) (not necessarily Placeholder_2)
</td>
</tr>
</table>
### 2.2 - Initializing the parameters
Your second task is to initialize the parameters in tensorflow.
**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use:
```python
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
```
Please use `seed = 1` to make sure your results match ours.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable("b2", [12, 1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable("b3", [6, 1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table>
<tr>
<td>
**W1**
</td>
<td>
< tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
< tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
< tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
< tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >
</td>
</tr>
</table>
As expected, the parameters haven't been evaluated yet.
### 2.3 - Forward propagation in tensorflow
You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are:
- `tf.add(...,...)` to do an addition
- `tf.matmul(...,...)` to do a matrix multiplication
- `tf.nn.relu(...)` to apply the ReLU activation
**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
```
**Expected Output**:
<table>
<tr>
<td>
**Z3**
</td>
<td>
Tensor("Add_2:0", shape=(6, ?), dtype=float32)
</td>
</tr>
</table>
You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.
### 2.4 Compute cost
As seen before, it is very easy to compute the cost using:
```python
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))
```
**Question**: Implement the cost function below.
- It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.
- Besides, `tf.reduce_mean` basically does the summation over the examples.
```
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
```
**Expected Output**:
<table>
<tr>
<td>
**cost**
</td>
<td>
Tensor("Mean:0", shape=(), dtype=float32)
</td>
</tr>
</table>
### 2.5 - Backward propagation & parameter updates
This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.
After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.
For instance, for gradient descent the optimizer would be:
```python
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
```
To make the optimization you would do:
```python
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
```
This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.
**Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable).
### 2.6 - Building the model
Now, you will bring it all together!
**Exercise:** Implement the model. You will be calling the functions you had previously implemented.
```
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / minibatch_size
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per fives)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
```
Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.048222. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
```
parameters = model(X_train, Y_train, X_test, Y_test)
```
**Expected Output**:
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.999074
</td>
</tr>
<tr>
<td>
**Test Accuracy**
</td>
<td>
0.716667
</td>
</tr>
</table>
Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.
**Insights**:
- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting.
- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.
### 2.7 - Test with your own image (optional / ungraded exercise)
Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right!
```
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
```
You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects".
<font color='blue'>
**What you should remember**:
- Tensorflow is a programming framework used in deep learning
- The two main object classes in tensorflow are Tensors and Operators.
- When you code in tensorflow you have to take the following steps:
- Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
- Create a session
- Initialize the session
- Run the session to execute the graph
- You can execute the graph multiple times as you've seen in model()
- The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
| github_jupyter |
# Лабораторная работа №3. Однофакторный дисперсионный анализ
> Вариант № ??
**Распределения**:
$X_1$ ~ ?? (объём выборки $n_1$ — ?)
$X_2$ ~ ?? (объём выборки $n_2$ — ?)
$X_3$ ~ ?? (объём выборки $n_3$ — ?)
```
%matplotlib inline
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
# Немного магии для того, чтобы рисунки стали больше
import pylab
pylab.rcParams['figure.figsize'] = (24.0, 16.0)
plt.rcParams.update({'font.size': 22})
# Вспомогательные классы и функции, определённые тут же (см. репозиторий)
# https://github.com/BobNobrain/matstat-labs/tree/master/s
from s import Sample, PooledSample, BartlettHyp, OneWayAnovaHyp, LinearContrastHyp
from s.utils import table, printf
```
## 1. Исходные данные
```
X1 = stats.norm(0, 1)
X2 = stats.norm(0.01, 1.1)
X3 = stats.norm(-0.02, 0.9)
n1 = 100
n2 = 200
n3 = 150
print('Характеристики наблюдаемых случайных величин:')
table(
['СВ', 'Распределение', 'Параметры', '$m_i$', '$\\sigma_i^2$', '$n_i$'],
[
['X1', 'N', '$m=0.0, \\sigma=1.0$', X1.mean(), X1.var(), n1],
['X2', 'N', '$m=0.1, \\sigma=1.1$', X2.mean(), X2.var(), n2],
['X3', 'N', '$m=-0.3, \\sigma=0.9$', X3.mean(), X3.var(), n3]
]
)
printf('Количество случайных величин $k={}$', 3)
x1 = Sample.from_distribution("x1", X1, count=n1)
x2 = Sample.from_distribution("x2", X2, count=n2)
x3 = Sample.from_distribution("x3", X3, count=n3)
x_pooled = PooledSample("Pooled", x1, x2, x3)
print('Выборочные характеристики:')
table(
['СВ', 'Среднее $\\overline{x_i}$', '$s^2_i$', '$s_i$'],
[
[
s._name,
round(s.mean(), 4),
round(s.s() ** 2, 4),
round(s.s(), 4)
] for s in [x1, x2, x3, x_pooled]
]
)
```
## 2. Визуальное представление выборок
Диаграммы *Box-and-Whisker*:
```
fig, ax = plt.subplots()
ax.boxplot([x1.data(), x2.data(), x3.data()])
ax.set_title('Выборки')
plt.show()
```
## 3. Проверка условия применимости дисперсионного анализа
Статистическая гипотеза $H_0: \sigma_1^2 = \sigma_2^2 = \sigma_3^2$
```
print('Критерий Бартлетта')
alpha = 0.05
H0 = BartlettHyp(x_pooled)
criterion_value, interval, p_value, result = H0.full_test(x_pooled, alpha)
table(
[
'Выборочное значение статистики критерия',
'p-value',
'Статистическое решение при $\\alpha={}$'.format(alpha),
'Ошибка статистического решения'
],
[[
round(criterion_value, 4),
round(p_value, 4),
'H0' if result else 'H1',
'TODO'
]]
)
```
## 4. Однофакторный дисперсионный анализ
```
print('Таблица дисперсионного анализа')
# http://datalearning.ru/index.php/textbook?cid=1&mid=5&topic=2, таблица 6.4
table(
['Источник вариации', 'Показатель вариации', 'Число степеней свободы', 'Несмещённая оценка'],
[
['Группировочный признак', '', '', ''],
['Остаточные признаки', '', '', ''],
['Все признаки', '', '', '']
]
)
eta2 = x_pooled.eta_squared()
printf('Эмпирический коэффициент детерминации $\\eta^2 = {}$', round(eta2, 4))
printf('Эмпирическое корреляционное отношение $\\eta = {}$', round(np.sqrt(eta2), 4))
```
Статистическая гипотеза $H_0: m_1 = m_2 = m_3$
```
alpha = 0.05
anova = OneWayAnovaHyp(x_pooled)
criterion_value, (crit_left, crit_right), p_value, result = anova.full_test(x_pooled, alpha)
table(
[
'Выборочное значение статистики критерия',
'p-value',
'Статистическое решение при $\\alpha={}$'.format(alpha),
'Ошибка статистического решения'
],
[[
round(criterion_value, 4),
round(p_value, 4),
'H0' if result else 'H1',
'TODO'
]]
)
```
## 5. Метод линейных контрастов
```
alpha = 0.05
def m_interval(sample):
n = sample.n()
delta = stats.t(n - 1).ppf(1 - alpha / 2) * sample.s() / np.sqrt(n)
mean = sample.mean()
return mean - delta, mean + delta
fig, ax = plt.subplots()
ax.set_title('Доверительные интервалы для $m_{1..k}$')
samples = [x1, x2, x3]
for i in range(len(samples)):
l, r = m_interval(samples[i])
domain = [l, r]
values = [i + 1, i + 1]
ax.plot(
domain,
values,
label='$m_{} \\in [{}; {}]$'.format(i + 1, round(l, 3), round(r, 3)),
linewidth=4
)
ax.fill_between(domain, 0, values, alpha=.2)
plt.legend()
plt.show()
# TODO: вынести в функцию и сделать для всех комбинаций
H0 = LinearContrastHyp(x_pooled)
c, (c_1, c_2), p_value, result = H0.full_test([1, -1, 0], alpha=alpha)
print(c_1, c_2)
table(
[
'Гипотеза',
'Выборочное значение статистики критерия',
'p-value',
'Статистическое решение при $\\alpha={}$'.format(alpha),
'Ошибка статистического решения'
],
[
['$m_1=m_2$', round(c, 4), round(p_value, 4), '$=$' if result else '$\\ne$', 'TODO'],
['$m_2=m_3$', '', '', '', ''],
['$m_1=m_3$', '', '', '', '']
]
)
```
| github_jupyter |
# Horse or Human? In-graph training loop Assignment
This assignment lets you practice how to train a Keras model on the [horses_or_humans](https://www.tensorflow.org/datasets/catalog/horses_or_humans) dataset with the entire training process performed in graph mode. These steps include:
- loading batches
- calculating gradients
- updating parameters
- calculating validation accuracy
- repeating the loop until convergence
## Setup
Import TensorFlow 2.0:
```
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_hub as hub
import matplotlib.pyplot as plt
```
### Prepare the dataset
Load the horses to human dataset, splitting 80% for the training set and 20% for the test set.
```
splits, info = tfds.load('horses_or_humans', as_supervised=True, with_info=True, split=['train[:80%]', 'train[80%:]', 'test'], data_dir='./data')
(train_examples, validation_examples, test_examples) = splits
num_examples = info.splits['train'].num_examples
num_classes = info.features['label'].num_classes
BATCH_SIZE = 32
IMAGE_SIZE = 224
```
## Pre-process an image (please complete this section)
You'll define a mapping function that resizes the image to a height of 224 by 224, and normalizes the pixels to the range of 0 to 1. Note that pixels range from 0 to 255.
- You'll use the following function: [tf.image.resize](https://www.tensorflow.org/api_docs/python/tf/image/resize) and pass in the (height,width) as a tuple (or list).
- To normalize, divide by a floating value so that the pixel range changes from [0,255] to [0,1].
```
# Create a autograph pre-processing function to resize and normalize an image
### START CODE HERE ###
@tf.function
def map_fn(img, label):
image_height = 224
image_width = 224
### START CODE HERE ###
# resize the image
img = tf.image.resize(img, (image_height, image_width))
# normalize the image
img /= 255.
### END CODE HERE
return img, label
## TEST CODE:
test_image, test_label = list(train_examples)[0]
test_result = map_fn(test_image, test_label)
print(test_result[0].shape)
print(test_result[1].shape)
del test_image, test_label, test_result
```
**Expected Output:**
```
(224, 224, 3)
()
```
## Apply pre-processing to the datasets (please complete this section)
Apply the following steps to the training_examples:
- Apply the `map_fn` to the training_examples
- Shuffle the training data using `.shuffle(buffer_size=)` and set the buffer size to the number of examples.
- Group these into batches using `.batch()` and set the batch size given by the parameter.
Hint: You can look at how validation_examples and test_examples are pre-processed to get a sense of how to chain together multiple function calls.
```
# Prepare train dataset by using preprocessing with map_fn, shuffling and batching
def prepare_dataset(train_examples, validation_examples, test_examples, num_examples, map_fn, batch_size):
### START CODE HERE ###
train_ds = train_examples.map(map_fn).shuffle(128).batch(batch_size)
### END CODE HERE ###
valid_ds = validation_examples.map(map_fn).batch(batch_size)
test_ds = test_examples.map(map_fn).batch(batch_size)
return train_ds, valid_ds, test_ds
train_ds, valid_ds, test_ds = prepare_dataset(train_examples, validation_examples, test_examples, num_examples, map_fn, BATCH_SIZE)
## TEST CODE:
test_train_ds = list(train_ds)
print(len(test_train_ds))
print(test_train_ds[0][0].shape)
del test_train_ds
```
**Expected Output:**
```
26
(32, 224, 224, 3)
```
### Define the model
```
MODULE_HANDLE = 'data/resnet_50_feature_vector'
model = tf.keras.Sequential([
hub.KerasLayer(MODULE_HANDLE, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
model.summary()
```
## Define optimizer: (please complete these sections)
Define the [Adam optimizer](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam) that is in the tf.keras.optimizers module.
```
def set_adam_optimizer():
### START CODE HERE ###
# Define the adam optimizer
optimizer = tf.keras.optimizers.Adam()
### END CODE HERE ###
return optimizer
## TEST CODE:
test_optimizer = set_adam_optimizer()
print(type(test_optimizer))
del test_optimizer
```
**Expected Output:**
```
<class 'tensorflow.python.keras.optimizer_v2.adam.Adam'>
```
## Define the loss function (please complete this section)
Define the loss function as the [sparse categorical cross entropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy) that's in the tf.keras.losses module. Use the same function for both training and validation.
```
def set_sparse_cat_crossentropy_loss():
### START CODE HERE ###
# Define object oriented metric of Sparse categorical crossentropy for train and val loss
train_loss = tf.keras.losses.SparseCategoricalCrossentropy()
val_loss = tf.keras.losses.SparseCategoricalCrossentropy()
### END CODE HERE ###
return train_loss, val_loss
## TEST CODE:
test_train_loss, test_val_loss = set_sparse_cat_crossentropy_loss()
print(type(test_train_loss))
print(type(test_val_loss))
del test_train_loss, test_val_loss
```
**Expected Output:**
```
<class 'tensorflow.python.keras.losses.SparseCategoricalCrossentropy'>
<class 'tensorflow.python.keras.losses.SparseCategoricalCrossentropy'>
```
## Define the acccuracy function (please complete this section)
Define the accuracy function as the [spare categorical accuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/SparseCategoricalAccuracy) that's contained in the tf.keras.metrics module. Use the same function for both training and validation.
```
def set_sparse_cat_crossentropy_accuracy():
### START CODE HERE ###
# Define object oriented metric of Sparse categorical accuracy for train and val accuracy
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
### END CODE HERE ###
return train_accuracy, val_accuracy
## TEST CODE:
test_train_accuracy, test_val_accuracy = set_sparse_cat_crossentropy_accuracy()
print(type(test_train_accuracy))
print(type(test_val_accuracy))
del test_train_accuracy, test_val_accuracy
```
**Expected Output:**
```
<class 'tensorflow.python.keras.metrics.SparseCategoricalAccuracy'>
<class 'tensorflow.python.keras.metrics.SparseCategoricalAccuracy'>
```
Call the three functions that you defined to set the optimizer, loss and accuracy
```
optimizer = set_adam_optimizer()
train_loss, val_loss = set_sparse_cat_crossentropy_loss()
train_accuracy, val_accuracy = set_sparse_cat_crossentropy_accuracy()
```
### Define the training loop (please complete this section)
In the training loop:
- Get the model predictions: use the model, passing in the input `x`
- Get the training loss: Call `train_loss`, passing in the true `y` and the predicted `y`.
- Calculate the gradient of the loss with respect to the model's variables: use `tape.gradient` and pass in the loss and the model's `trainable_variables`.
- Optimize the model variables using the gradients: call `optimizer.apply_gradients` and pass in a `zip()` of the two lists: the gradients and the model's `trainable_variables`.
- Calculate accuracy: Call `train_accuracy`, passing in the true `y` and the predicted `y`.
```
# this code uses the GPU if available, otherwise uses a CPU
device = '/gpu:0' if tf.test.is_gpu_available() else '/cpu:0'
EPOCHS = 2
# Custom training step
def train_one_step(model, optimizer, x, y, train_loss, train_accuracy):
'''
Trains on a batch of images for one step.
Args:
model (keras Model) -- image classifier
optimizer (keras Optimizer) -- optimizer to use during training
x (Tensor) -- training images
y (Tensor) -- training labels
train_loss (keras Loss) -- loss object for training
train_accuracy (keras Metric) -- accuracy metric for training
'''
with tf.GradientTape() as tape:
### START CODE HERE ###
# Run the model on input x to get predictions
predictions = model(x)
# Compute the training loss using `train_loss`, passing in the true y and the predicted y
loss = train_loss(y, predictions)
# Using the tape and loss, compute the gradients on model variables using tape.gradient
grads = tape.gradient(loss, model.trainable_weights)
# Zip the gradients and model variables, and then apply the result on the optimizer
optimizer.apply_gradients(zip(grads , model.trainable_weights))
# Call the train accuracy object on ground truth and predictions
train_accuracy(y , predictions)
### END CODE HERE
return loss
## TEST CODE:
def base_model():
inputs = tf.keras.layers.Input(shape=(2))
x = tf.keras.layers.Dense(64, activation='relu')(inputs)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
test_model = base_model()
test_optimizer = set_adam_optimizer()
test_image = tf.ones((2,2))
test_label = tf.ones((1,))
test_train_loss, _ = set_sparse_cat_crossentropy_loss()
test_train_accuracy, _ = set_sparse_cat_crossentropy_accuracy()
test_result = train_one_step(test_model, test_optimizer, test_image, test_label, test_train_loss, test_train_accuracy)
print(test_result)
del test_result, test_model, test_optimizer, test_image, test_label, test_train_loss, test_train_accuracy
```
**Expected Output:**
You will see a Tensor with the same shape and dtype. The value might be different.
```
tf.Tensor(0.6931472, shape=(), dtype=float32)
```
## Define the 'train' function (please complete this section)
You'll first loop through the training batches to train the model. (Please complete these sections)
- The `train` function will use a for loop to iteratively call the `train_one_step` function that you just defined.
- You'll use `tf.print` to print the step number, loss, and train_accuracy.result() at each step. Remember to use tf.print when you plan to generate autograph code.
Next, you'll loop through the batches of the validation set to calculation the validation loss and validation accuracy. (This code is provided for you). At each iteration of the loop:
- Use the model to predict on x, where x is the input from the validation set.
- Use val_loss to calculate the validation loss between the true validation 'y' and predicted y.
- Use val_accuracy to calculate the accuracy of the predicted y compared to the true y.
Finally, you'll print the validation loss and accuracy using tf.print. (Please complete this section)
- print the final `loss`, which is the validation loss calculated by the last loop through the validation dataset.
- Also print the val_accuracy.result().
**HINT**
If you submit your assignment and see this error for your stderr output:
```
Cannot convert 1e-07 to EagerTensor of dtype int64
```
Please check your calls to train_accuracy and val_accuracy to make sure that you pass in the true and predicted values in the correct order (check the documentation to verify the order of parameters).
```
# Decorate this function with tf.function to enable autograph on the training loop
@tf.function
def train(model, optimizer, epochs, device, train_ds, train_loss, train_accuracy, valid_ds, val_loss, val_accuracy):
'''
Performs the entire training loop. Prints the loss and accuracy per step and epoch.
Args:
model (keras Model) -- image classifier
optimizer (keras Optimizer) -- optimizer to use during training
epochs (int) -- number of epochs
train_ds (tf Dataset) -- the train set containing image-label pairs
train_loss (keras Loss) -- loss function for training
train_accuracy (keras Metric) -- accuracy metric for training
valid_ds (Tensor) -- the val set containing image-label pairs
val_loss (keras Loss) -- loss object for validation
val_accuracy (keras Metric) -- accuracy metric for validation
'''
step = 0
loss = 0.0
for epoch in range(epochs):
for x, y in train_ds:
# training step number increments at each iteration
step += 1
with tf.device(device_name=device):
### START CODE HERE ###
# Run one training step by passing appropriate model parameters
# required by the function and finally get the loss to report the results
loss = train_one_step(model, optimizer, x, y, train_loss, train_accuracy)
### END CODE HERE ###
# Use tf.print to report your results.
# Print the training step number, loss and accuracy
tf.print('Step', step,
': train loss', loss,
'; train accuracy', train_accuracy.result())
with tf.device(device_name=device):
for x, y in valid_ds:
# Call the model on the batches of inputs x and get the predictions
y_pred = model(x)
loss = val_loss(y, y_pred)
val_accuracy(y, y_pred)
# Print the validation loss and accuracy
### START CODE HERE ###
tf.print('val loss', loss, '; val accuracy', val_accuracy.result())
### END CODE HERE ###
```
Run the `train` function to train your model! You should see the loss generally decreasing and the accuracy increasing.
**Note**: **Please let the training finish before submitting** and **do not** modify the next cell. It is required for grading. This will take around 5 minutes to run.
```
train(model, optimizer, EPOCHS, device, train_ds, train_loss, train_accuracy, valid_ds, val_loss, val_accuracy)
```
# Evaluation
You can now see how your model performs on test images. First, let's load the test dataset and generate predictions:
```
test_imgs = []
test_labels = []
predictions = []
with tf.device(device_name=device):
for images, labels in test_ds:
preds = model(images)
preds = preds.numpy()
predictions.extend(preds)
test_imgs.extend(images.numpy())
test_labels.extend(labels.numpy())
```
Let's define a utility function for plotting an image and its prediction.
```
# Utilities for plotting
class_names = ['horse', 'human']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img = np.squeeze(img)
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
# green-colored annotations will mark correct predictions. red otherwise.
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
# print the true label first
print(true_label)
# show the image and overlay the prediction
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
```
### Plot the result of a single image
Choose an index and display the model's prediction for that image.
```
# Visualize the outputs
# you can modify the index value here from 0 to 255 to test different images
index = 8
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(index, predictions, test_labels, test_imgs)
plt.show()
```
| github_jupyter |
# Rejection Sampling
Rejection sampling, or "accept-reject Monte Carlo" is a Monte Carlo method used to generate obsrvations from distributions. As it is a Monte Carlo it can also be used for numerical integration.
## Monte Carlo Integration
### Example: Approximation of $\pi$
Enclose a quadrant of a circle of radius $1$ in a square of side length $1$. Then uniformly sample points inside the bounds of the square in Cartesian coordinates. If the point lies inside the circle quadrant record this information. At the ends of many throws the ratio of points inside the circle to all points thrown will approximate the ratio of the area of the cricle quadrant to the area of the square
$$
\frac{\text{points inside circle}}{\text{all points thrown}} \approx \frac{\text{area of circle quadrant}}{\text{area of square}} = \frac{\pi r^2}{4\, l^2} = \frac{\pi}{4},
$$
thus, an approximation of $\pi$ can be found to be
$$
\pi \approx 4 \cdot \frac{\text{points inside circle}}{\text{all points thrown}}.
$$
```
import numpy as np
import matplotlib.pyplot as plt
def approximate_pi(n_throws=10000, draw=True):
n_circle_points = 0
x_coord = np.random.uniform(0, 1, n_throws)
y_coord = np.random.uniform(0, 1, n_throws)
circle_x = []
circle_y = []
outside_x = []
outside_y = []
for x, y in zip(x_coord, y_coord):
radius = np.sqrt(x ** 2 + y ** 2)
if 1 > radius:
n_circle_points += 1
circle_x.append(x)
circle_y.append(y)
else:
outside_x.append(x)
outside_y.append(y)
approx_pi = 4 * (n_circle_points / n_throws)
print(f"The approximation of pi after {n_throws} throws is: {approx_pi}")
if draw:
plt.plot(circle_x, circle_y, "ro")
plt.plot(outside_x, outside_y, "bo")
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
approximate_pi()
```
## Sampling Distributions
To approximate a statistical distribution one can also use accept-reject Monte Carlo to approximate the distribution.
### Example: Approximation of Gaussian Distribution
```
import scipy.stats as stats
```
The Gaussian has a known analytic form
$$
f\left(\vec{x}\,\middle|\,\mu, \sigma\right) = \frac{1}{\sqrt{2\pi}\, \sigma} e^{-\left(x-\mu\right)^2/2\sigma^2}
$$
```
x = np.linspace(-5.0, 5.0, num=10000)
plt.plot(x, stats.norm.pdf(x, 0, 1), linewidth=2, color="black")
# Axes
# plt.title('Plot of $f(x;\mu,\sigma)$')
plt.xlabel(r"$x$")
plt.ylabel(r"$f(\vec{x}|\mu,\sigma)$")
# dist_window_w = sigma * 2
plt.xlim([-5, 5])
plt.show()
```
Given this it is seen that the Gaussian's maximum is at its mean. For the standard Gaussian this is at $\mu = 0$, and so it has a maximum at $1/\sqrt{2\pi}\,\sigma \approx 0.39$. Thus, this can be the maximum height of a rectangle that we need to throw our points in.
```
def approximate_Guassian(n_throws=10000, x_range=[-5, 5], draw=True):
n_accept = 0
x_coord = np.random.uniform(x_range[0], x_range[1], n_throws)
y_coord = np.random.uniform(0, stats.norm.pdf(0, 0, 1), n_throws)
# Use Freedman–Diaconis rule
# https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
h = 2 * stats.iqr(x_coord) / np.cbrt([n_throws])
n_bins = int((x_range[1] - x_range[0]) / h)
accept_x = []
accept_y = []
reject_x = []
reject_y = []
for x, y in zip(x_coord, y_coord):
if stats.norm.pdf(x, 0, 1) > y:
n_accept += 1
accept_x.append(x)
accept_y.append(y)
else:
reject_x.append(x)
reject_y.append(y)
if draw:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(1.2 * 14, 1.2 * 4.5))
x_space = np.linspace(x_range[0], x_range[1], num=10000)
axes[0].plot(accept_x, accept_y, "ro")
axes[0].plot(reject_x, reject_y, "bo")
axes[0].plot(x_space, stats.norm.pdf(x_space, 0, 1), linewidth=2, color="black")
axes[0].set_xlabel(r"$x$")
axes[0].set_ylabel(r"$y$")
axes[0].set_title(r"Sampled space of $f(\vec{x}|\mu,\sigma)$")
hist_count, bins, _ = axes[1].hist(accept_x, n_bins, density=True)
axes[1].set_xlabel(r"$x$")
axes[1].set_ylabel("Arbitrary normalized units")
axes[1].set_title(r"Normalized binned distribution of accepted toys")
plt.xlim(x_range)
plt.show()
approximate_Guassian()
```
This exercise is trivial but for more complex functional forms with more difficult integrals it can be a powerful numerical technique.
| github_jupyter |
# Ways to visualize top count with atoti
Given different categories of items, we will explore how to achieve the following with atoti:
- Visualize top 10 apps with the highest rating in table
- Visualize top 10 categories with most number of apps rated 5 in Pie chart
- Visualize top 10 apps for each category in subplots
See [pandas.ipynb](pandas.ipynb) to see how we can achieve the similar top count with Pandas.
__Note on data:__
We are using the [Google Play Store Apps data](https://www.kaggle.com/lava18/google-play-store-apps) from Kaggle. Data has been processed to convert strings with millions and thousands abbreviations into numeric data.
## Top count with atoti
```
import atoti as tt
from atoti.config import create_config
config = create_config(metadata_db="./metadata.db")
session = tt.create_session(config=config)
playstore = session.read_csv(
"s3://data.atoti.io/notebooks/topcount/googleplaystore_cleaned.csv",
store_name="playstore",
keys=["App", "Category", "Genres", "Current Ver"],
sampling_mode=tt.sampling.FULL,
types={"Reviews": tt.types.FLOAT, "Installs": tt.types.FLOAT},
)
playstore.head()
cube = session.create_cube(playstore, "Google Playstore")
cube.schema
```
### Top 10 apps with highest rating across categories
Use the content editor to apply a top count filter on the pivot table.
```
cube.visualize("Top 10 apps with highest rating across categories")
```
### Top 10 categories with the most number of apps rated 5
```
h = cube.hierarchies
l = cube.levels
m = cube.measures
m
```
#### Number of apps rated 5
Create a measure that counts the number of apps rated 5 within categories and at levels below the category.
```
m["Count with rating 5"] = tt.agg.sum(
tt.where(m["Rating.MEAN"] == 5, m["contributors.COUNT"], 0),
scope=tt.scope.origin(l["Category"], l["App"]),
)
```
We can drill down to different levels from category and the count is computed on the fly.
```
cube.visualize("Categories with apps rated 5")
```
Apply top count filter from **atoti editor** on the category by the `Count with rating 5` measure. The atoti editor is the atoti's Jupyterlab extension on the right with the <img src="https://data.atoti.io/notebooks/topcount/atoti_editor.png" alt="a." width="50"> icon.
```
cube.visualize("Top 10 categories with most number of apps rated 5")
```
### Top 10 apps for each category
Since we are performing top 10 apps filtering for each category, it's only right that we classify `App` under `Category`.
In this case, we create a multi-level hierarchy such as the following:
```
h["App Categories"] = [l["Category"], l["App"]]
h
```
This structure allows us to select at which level we want to apply the top count on from the atoti editor.
<img src="https://data.atoti.io/notebooks/topcount/filter_by_level.png" alt="Filter by level" width="30%">
```
cube.visualize("Top 10 apps with highest rating for each category")
```
#### Creating subplot to visualize top count per category
Again, go to the atoti's Jupyterlab extension and click on the ellipsis to show the subplot controls.

You should be able to add `Category` level to the subplot section sliced by `Apps`. Apply filter on `App` level of the `App Categories`
```
cube.visualize("Top 10 apps within each categories")
```
You can use the filter to select the categories that you want to view.
Alternative, use `session.url` to access the web application to build an interactive dashboard with quick filters. Check out the link below.
```
session.url + "/#/dashboard/767"
```
| github_jupyter |
```
from HSICLassoVI.models import api
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import *
```
## Data1: Additive model
```
N, P = 1000, 256
mean = np.zeros(P)
cov = np.eye(P)
np.random.seed(1)
```
$$
y\in\mathbb{R}^{1000}, X\in\mathbb{R}^{1000\times256}
$$
```
X = np.random.multivariate_normal(mean = mean, cov = cov, size = N)
y = -2*np.sin(2*X[:,0]) + np.power(X[:,1],2) + X[:,2] + np.exp(-X[:,3]) + np.random.normal(loc=0, scale=1, size=N)
```
$$
y=-2\sin(2X_1)+X_2^2+X_3+\exp(-X_4)+\epsilon
$$
```
data1 = pd.DataFrame(np.c_[y.reshape(-1,1),X], columns = ['y'] + [f'X{p+1}' for p in range(P)])
data1.describe()
```
### Proposed (HSIC)
```
model_PH1 = api.Proposed_HSIC_Lasso(lam = [np.inf, 1e-5])
model_PH1.input(X,y,featname = data1.columns[1:])
model_PH1.regression_multi(kernels=['Gaussian'])
plt.figure(figsize=[8,6], dpi=200)
plt.bar(np.arange(20),model_PH1.get_index_score()[:20])
plt.xticks(np.arange(20),model_PH1.get_features()[:20], rotation=40)
plt.grid()
plt.show()
```
### Proposed (NOCCO)
```
model_PN1 = api.Proposed_NOCCO_Lasso(lam = [np.inf, 5e-5], eps = 1e-3)
model_PN1.input(X,y,featname = data1.columns[1:])
model_PN1.regression_multi(kernels=['Gaussian'])
plt.figure(figsize=[8,6], dpi=200)
plt.bar(np.arange(20),model_PN1.get_index_score()[:20])
plt.xticks(np.arange(20),model_PN1.get_features()[:20], rotation=40)
plt.grid()
plt.show()
```
## Data2: Non-additive model
```
N, P = 1000, 1000
mean = np.zeros(P)
cov = np.eye(P)
np.random.seed(2)
```
$$
y\in\mathbb{R}^{1000}, X\in\mathbb{R}^{1000\times1000}
$$
```
X = np.random.multivariate_normal(mean = mean, cov = cov, size = N)
y = X[:,0] * np.exp(2 * X[:,1]) + X[:,2]**2 + np.random.normal(loc=0, scale=1, size=N)
```
$$
y=X_1\exp(2X_2)+X_3^2+\epsilon
$$
```
data2 = pd.DataFrame(np.c_[y.reshape(-1,1),X], columns = ['y'] + [f'X{p+1}' for p in range(P)])
data2.describe()
```
### Proposed (HSIC)
```
model_PH2 = api.Proposed_HSIC_Lasso(lam = [np.inf,4e-6])
model_PH2.input(X,y,featname = data2.columns[1:])
model_PH2.regression_multi(kernels=['Gaussian'])
plt.figure(figsize=[8,6], dpi=200)
plt.bar(np.arange(20),model_PH2.get_index_score()[:20])
plt.xticks(np.arange(20),model_PH2.get_features()[:20], rotation=40)
plt.grid()
plt.show()
```
### Proposed (NOCCO)
```
model_PN2 = api.Proposed_NOCCO_Lasso(lam = [np.inf,2e-5], eps = 1e-3)
model_PN2.input(X,y,featname = data2.columns[1:])
model_PN2.regression_multi(kernels=['Gaussian'])
plt.figure(figsize=[8,6], dpi=200)
plt.bar(np.arange(20),model_PN2.get_index_score()[:20])
plt.xticks(np.arange(20),model_PN2.get_features()[:20], rotation=40)
plt.grid()
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D5_DeepLearning2/W3D5_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 3, Day 5, Tutorial 2
# Deep Learning 2: Autoencoder extensions
__Content creators:__ Marco Brigham and the [CCNSS](https://www.ccnss.org/) team (2014-2018)
__Content reviewers:__ Itzel Olivos, Karen Schroeder, Karolina Stosio, Kshitij Dwivedi, Spiros Chavlis, Michael Waskom
---
# Tutorial Objectives
## Architecture
How can we improve the internal representation of shallow autoencoder with 2D bottleneck layer?
We may try the following architecture changes:
* Introducing additional hidden layers
* Wrapping latent space as a sphere

Adding hidden layers increases the number of learnable parameters to better use non-linear operations in encoding/decoding. Spherical geometry of latent space forces the network to use these additional degrees of freedom more efficiently.
Let's dive deeper into the technical aspects of autoencoders and improve their internal representations to reach the levels required for the *MNIST cognitive task*.
In this tutorial, you will:
- Increase the capacity of the network by introducing additional hidden layers
- Understand the effect of constraints in the geometry of latent space
```
# @title Video 1: Extensions
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="pgkrU9UqXiU", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
---
# Setup
Please execute the cell(s) below to initialize the notebook environment.
```
# Imports
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from sklearn.datasets import fetch_openml
# @title Figure settings
!pip install plotly --quiet
import plotly.graph_objects as go
from plotly.colors import qualitative
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def downloadMNIST():
"""
Download MNIST dataset and transform it to torch.Tensor
Args:
None
Returns:
x_train : training images (torch.Tensor) (60000, 28, 28)
x_test : test images (torch.Tensor) (10000, 28, 28)
y_train : training labels (torch.Tensor) (60000, )
y_train : test labels (torch.Tensor) (10000, )
"""
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# Trunk the data
n_train = 60000
n_test = 10000
train_idx = np.arange(0, n_train)
test_idx = np.arange(n_train, n_train + n_test)
x_train, y_train = X[train_idx], y[train_idx]
x_test, y_test = X[test_idx], y[test_idx]
# Transform np.ndarrays to torch.Tensor
x_train = torch.from_numpy(np.reshape(x_train,
(len(x_train),
28, 28)).astype(np.float32))
x_test = torch.from_numpy(np.reshape(x_test,
(len(x_test),
28, 28)).astype(np.float32))
y_train = torch.from_numpy(y_train.astype(int))
y_test = torch.from_numpy(y_test.astype(int))
return (x_train, y_train, x_test, y_test)
def init_weights_kaiming_uniform(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming uniform distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming uniform distribution
nn.init.kaiming_uniform_(layer.weight.data)
def init_weights_kaiming_normal(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming normal distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming normal distribution
nn.init.kaiming_normal_(layer.weight.data)
def get_layer_weights(layer):
"""
Retrieves learnable parameters from PyTorch layer.
Args:
layer (torch.Module)
Pytorch layer
Returns:
list with learnable parameters
"""
# initialize output list
weights = []
# check whether layer has learnable parameters
if layer.parameters():
# copy numpy array representation of each set of learnable parameters
for item in layer.parameters():
weights.append(item.detach().numpy())
return weights
def print_parameter_count(net):
"""
Prints count of learnable parameters per layer from PyTorch network.
Args:
net (torch.Sequential)
Pytorch network
Returns:
Nothing.
"""
params_n = 0
# loop all layers in network
for layer_idx, layer in enumerate(net):
# retrieve learnable parameters
weights = get_layer_weights(layer)
params_layer_n = 0
# loop list of learnable parameters and count them
for params in weights:
params_layer_n += params.size
params_n += params_layer_n
print(f'{layer_idx}\t {params_layer_n}\t {layer}')
print(f'\nTotal:\t {params_n}')
def eval_mse(y_pred, y_true):
"""
Evaluates mean square error (MSE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
MSE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.MSELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def eval_bce(y_pred, y_true):
"""
Evaluates binary cross-entropy (BCE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
BCE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.BCELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def plot_row(images, show_n=10, image_shape=None):
"""
Plots rows of images from list of iterables (iterables: list, numpy array
or torch.Tensor). Also accepts single iterable.
Randomly selects images in each list element if item count > show_n.
Args:
images (iterable or list of iterables)
single iterable with images, or list of iterables
show_n (integer)
maximum number of images per row
image_shape (tuple or list)
original shape of image if vectorized form
Returns:
Nothing.
"""
if not isinstance(images, (list, tuple)):
images = [images]
for items_idx, items in enumerate(images):
items = np.array(items)
if items.ndim == 1:
items = np.expand_dims(items, axis=0)
if len(items) > show_n:
selected = np.random.choice(len(items), show_n, replace=False)
items = items[selected]
if image_shape is not None:
items = items.reshape([-1]+list(image_shape))
plt.figure(figsize=(len(items) * 1.5, 2))
for image_idx, image in enumerate(items):
plt.subplot(1, len(items), image_idx + 1)
plt.imshow(image, cmap='gray', vmin=image.min(), vmax=image.max())
plt.axis('off')
plt.tight_layout()
def to_s2(u):
"""
Projects 3D coordinates to spherical coordinates (theta, phi) surface of
unit sphere S2.
theta: [0, pi]
phi: [-pi, pi]
Args:
u (list, numpy array or torch.Tensor of floats)
3D coordinates
Returns:
Sperical coordinates (theta, phi) on surface of unit sphere S2.
"""
x, y, z = (u[:, 0], u[:, 1], u[:, 2])
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z / r)
phi = np.arctan2(x, y)
return np.array([theta, phi]).T
def to_u3(s):
"""
Converts from 2D coordinates on surface of unit sphere S2 to 3D coordinates
(on surface of S2), i.e. (theta, phi) ---> (1, theta, phi).
Args:
s (list, numpy array or torch.Tensor of floats)
2D coordinates on unit sphere S_2
Returns:
3D coordinates on surface of unit sphere S_2
"""
theta, phi = (s[:, 0], s[:, 1])
x = np.sin(theta) * np.sin(phi)
y = np.sin(theta) * np.cos(phi)
z = np.cos(theta)
return np.array([x, y, z]).T
def xy_lim(x):
"""
Return arguments for plt.xlim and plt.ylim calculated from minimum
and maximum of x.
Args:
x (list, numpy array or torch.Tensor of floats)
data to be plotted
Returns:
Nothing.
"""
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x_min = x_min - np.abs(x_max - x_min) * 0.05 - np.finfo(float).eps
x_max = x_max + np.abs(x_max - x_min) * 0.05 + np.finfo(float).eps
return [x_min[0], x_max[0]], [x_min[1], x_max[1]]
def plot_generative(x, decoder_fn, image_shape, n_row=16, s2=False):
"""
Plots images reconstructed by decoder_fn from a 2D grid in
latent space that is determined by minimum and maximum values in x.
Args:
x (list, numpy array or torch.Tensor of floats)
2D or 3D coordinates in latent space
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
n_row (integer)
number of rows in grid
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
if s2:
x = to_s2(np.array(x))
xlim, ylim = xy_lim(np.array(x))
dx = (xlim[1] - xlim[0]) / n_row
grid = [np.linspace(ylim[0] + dx / 2, ylim[1] - dx / 2, n_row),
np.linspace(xlim[0] + dx / 2, xlim[1] - dx / 2, n_row)]
canvas = np.zeros((image_shape[0] * n_row, image_shape[1] * n_row))
cmap = plt.get_cmap('gray')
for j, latent_y in enumerate(grid[0][::-1]):
for i, latent_x in enumerate(grid[1]):
latent = np.array([[latent_x, latent_y]], dtype=np.float32)
if s2:
latent = to_u3(latent)
with torch.no_grad():
x_decoded = decoder_fn(torch.from_numpy(latent))
x_decoded = x_decoded.reshape(image_shape)
canvas[j * image_shape[0]: (j + 1) * image_shape[0],
i * image_shape[1]: (i + 1) * image_shape[1]] = x_decoded
plt.imshow(canvas, cmap=cmap, vmin=canvas.min(), vmax=canvas.max())
plt.axis('off')
def plot_latent(x, y, show_n=500, s2=False, fontdict=None, xy_labels=None):
"""
Plots digit class of each sample in 2D latent space coordinates.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
n_row (integer)
number of samples
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
fontdict (dictionary)
style option for plt.text
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
if fontdict is None:
fontdict = {'weight': 'bold', 'size': 12}
if s2:
x = to_s2(np.array(x))
cmap = plt.get_cmap('tab10')
if len(x) > show_n:
selected = np.random.choice(len(x), show_n, replace=False)
x = x[selected]
y = y[selected]
for my_x, my_y in zip(x, y):
plt.text(my_x[0], my_x[1], str(int(my_y)),
color=cmap(int(my_y) / 10.),
fontdict=fontdict,
horizontalalignment='center',
verticalalignment='center',
alpha=0.8)
xlim, ylim = xy_lim(np.array(x))
plt.xlim(xlim)
plt.ylim(ylim)
if s2:
if xy_labels is None:
xy_labels = [r'$\varphi$', r'$\theta$']
plt.xticks(np.arange(0, np.pi + np.pi / 6, np.pi / 6),
['0', '$\pi/6$', '$\pi/3$', '$\pi/2$',
'$2\pi/3$', '$5\pi/6$', '$\pi$'])
plt.yticks(np.arange(-np.pi, np.pi + np.pi / 3, np.pi / 3),
['$-\pi$', '$-2\pi/3$', '$-\pi/3$', '0',
'$\pi/3$', '$2\pi/3$', '$\pi$'])
if xy_labels is None:
xy_labels = ['$Z_1$', '$Z_2$']
plt.xlabel(xy_labels[0])
plt.ylabel(xy_labels[1])
def plot_latent_generative(x, y, decoder_fn, image_shape, s2=False,
title=None, xy_labels=None):
"""
Two horizontal subplots generated with encoder map and decoder grid.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
fig = plt.figure(figsize=(12, 6))
if title is not None:
fig.suptitle(title, y=1.05)
ax = fig.add_subplot(121)
ax.set_title('Encoder map', y=1.05)
plot_latent(x, y, s2=s2, xy_labels=xy_labels)
ax = fig.add_subplot(122)
ax.set_title('Decoder grid', y=1.05)
plot_generative(x, decoder_fn, image_shape, s2=s2)
plt.tight_layout()
plt.show()
def plot_latent_3d(my_x, my_y, show_text=True, show_n=500):
"""
Plot digit class or marker in 3D latent space coordinates.
Args:
my_x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
my_y (list, numpy array or torch.Tensor of floats)
digit class of each sample
show_text (boolean)
whether to show text
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
Returns:
Nothing.
"""
layout = {'margin': {'l': 0, 'r': 0, 'b': 0, 't': 0},
'scene': {'xaxis': {'showspikes': False,
'title': 'z1'},
'yaxis': {'showspikes': False,
'title': 'z2'},
'zaxis': {'showspikes': False,
'title': 'z3'}}
}
selected_idx = np.random.choice(len(my_x), show_n, replace=False)
colors = [qualitative.T10[idx] for idx in my_y[selected_idx]]
x = my_x[selected_idx, 0]
y = my_x[selected_idx, 1]
z = my_x[selected_idx, 2]
text = my_y[selected_idx]
if show_text:
trace = go.Scatter3d(x=x, y=y, z=z, text=text,
mode='text',
textfont={'color': colors, 'size': 12}
)
layout['hovermode'] = False
else:
trace = go.Scatter3d(x=x, y=y, z=z, text=text,
hoverinfo='text', mode='markers',
marker={'size': 5, 'color': colors, 'opacity': 0.8}
)
fig = go.Figure(data=trace, layout=layout)
fig.show()
def runSGD(net, input_train, input_test, criterion='bce',
n_epochs=10, batch_size=32, verbose=False):
"""
Trains autoencoder network with stochastic gradient descent with Adam
optimizer and loss criterion. Train samples are shuffled, and loss is
displayed at the end of each opoch for both MSE and BCE. Plots training loss
at each minibatch (maximum of 500 randomly selected values).
Args:
net (torch network)
ANN object (nn.Module)
input_train (torch.Tensor)
vectorized input images from train set
input_test (torch.Tensor)
vectorized input images from test set
criterion (string)
train loss: 'bce' or 'mse'
n_epochs (boolean)
number of full iterations of training data
batch_size (integer)
number of element in mini-batches
verbose (boolean)
print final loss
Returns:
Nothing.
"""
# Initialize loss function
if criterion == 'mse':
loss_fn = nn.MSELoss()
elif criterion == 'bce':
loss_fn = nn.BCELoss()
else:
print('Please specify either "mse" or "bce" for loss criterion')
# Initialize SGD optimizer
optimizer = optim.Adam(net.parameters())
# Placeholder for loss
track_loss = []
print('Epoch', '\t', 'Loss train', '\t', 'Loss test')
for i in range(n_epochs):
shuffle_idx = np.random.permutation(len(input_train))
batches = torch.split(input_train[shuffle_idx], batch_size)
for batch in batches:
output_train = net(batch)
loss = loss_fn(output_train, batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Keep track of loss at each epoch
track_loss += [float(loss)]
loss_epoch = f'{i+1}/{n_epochs}'
with torch.no_grad():
output_train = net(input_train)
loss_train = loss_fn(output_train, input_train)
loss_epoch += f'\t {loss_train:.4f}'
output_test = net(input_test)
loss_test = loss_fn(output_test, input_test)
loss_epoch += f'\t\t {loss_test:.4f}'
print(loss_epoch)
if verbose:
# Print loss
loss_mse = f'\nMSE\t {eval_mse(output_train, input_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, input_test):0.4f}'
print(loss_mse)
loss_bce = f'BCE\t {eval_bce(output_train, input_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, input_test):0.4f}'
print(loss_bce)
# Plot loss
step = int(np.ceil(len(track_loss) / 500))
x_range = np.arange(0, len(track_loss), step)
plt.figure()
plt.plot(x_range, track_loss[::step], 'C0')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.xlim([0, None])
plt.ylim([0, None])
plt.show()
class NormalizeLayer(nn.Module):
"""
pyTorch layer (nn.Module) that normalizes activations by their L2 norm.
Args:
None.
Returns:
Object inherited from nn.Module class.
"""
def __init__(self):
super().__init__()
def forward(self, x):
return nn.functional.normalize(x, p=2, dim=1)
```
---
# Section 1: Download and prepare MNIST dataset
We use the helper function `downloadMNIST` to download the dataset and transform it into `torch.Tensor` and assign train and test sets to (`x_train`, `y_train`) and (`x_test`, `y_test`).
The variable `input_size` stores the length of *vectorized* versions of the images `input_train` and `input_test` for training and test images.
**Instructions:**
* Please execute the cell below
```
# Download MNIST
x_train, y_train, x_test, y_test = downloadMNIST()
x_train = x_train / 255
x_test = x_test / 255
image_shape = x_train.shape[1:]
input_size = np.prod(image_shape)
input_train = x_train.reshape([-1, input_size])
input_test = x_test.reshape([-1, input_size])
test_selected_idx = np.random.choice(len(x_test), 10, replace=False)
train_selected_idx = np.random.choice(len(x_train), 10, replace=False)
print(f'shape image \t \t {image_shape}')
print(f'shape input_train \t {input_train.shape}')
print(f'shape input_test \t {input_test.shape}')
```
---
# Section 2: Deeper autoencoder (2D)
The internal representation of shallow autoencoder with 2D latent space is similar to PCA, which shows that the autoencoder is not fully leveraging non-linear capabilities to model data. Adding capacity in terms of learnable parameters takes advantage of non-linear operations in encoding/decoding to capture non-linear patterns in data.
Adding hidden layers enables us to introduce additional parameters, either layerwise or depthwise. The same amount $N$ of additional parameters can be added in a single layer or distributed among several layers. Adding several hidden layers reduces the compression/decompression ratio of each layer.
## Exercise 1: Build deeper autoencoder (2D)
Implement this deeper version of the ANN autoencoder by adding four hidden layers. The number of units per layer in the encoder is the following:
```
784 -> 392 -> 64 -> 2
```
The shallow autoencoder has a compression ratio of **784:2 = 392:1**. The first additional hidden layer has a compression ratio of **2:1**, followed by a hidden layer that sets the bottleneck compression ratio of **32:1**.
The choice of hidden layer size aims to reduce the compression rate in the bottleneck layer while increasing the count of trainable parameters. For example, if the compression rate of the first hidden layer doubles from **2:1** to **4:1**, the count of trainable parameters halves from 667K to 333K.
This deep autoencoder's performance may be further improved by adding additional hidden layers and by increasing the count of trainable parameters in each layer. These improvements have a diminishing return due to challenges associated with training under high parameter count and depth. One option explored in the *Bonus* section is to add a first hidden layer with 2x - 3x the input size. This size increase results in millions of parameters at the cost of longer training time.
Weight initialization is particularly important in deep networks. The availability of large datasets and weight initialization likely drove the deep learning revolution of 2010. We'll implement Kaiming normal as follows:
```
model[:-2].apply(init_weights_kaiming_normal)
```
**Instructions:**
* Add four additional layers and activation functions to the network
* Adjust the definitions of `encoder` and `decoder`
* Check learnable parameter count for this autoencoder by executing the last cell
```
encoding_size = 2
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
#################################################
## TODO for students: add layers to build deeper autoencoder
#################################################
# Add activation function
# ...,
# Add another layer
# nn.Linear(..., ...),
# Add activation function
# ...,
# Add another layer
# nn.Linear(..., ...),
# Add activation function
# ...,
# Add another layer
# nn.Linear(..., ...),
# Add activation function
# ...,
# Add another layer
# nn.Linear(..., ...),
# Add activation function
# ....
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
# n_l = ...
# uncomment when you fill the code
# encoder = model[:n_l]
# decoder = model[n_l:]
# print(f'Encoder \n\n {encoder}\n')
# print(f'Decoder \n\n {decoder}')
# to_remove solution
encoding_size = 2
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(encoding_size * 32, encoding_size),
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(encoding_size, encoding_size * 32),
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(encoding_size * 32, int(input_size / 2)),
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 2), input_size),
# Add activation function
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
n_l = 6
# uncomment when you fill the code
encoder = model[:n_l]
decoder = model[n_l:]
print(f'Encoder \n\n {encoder}\n')
print(f'Decoder \n\n {decoder}')
```
**Helper function:** `print_parameter_count`
Please uncomment the line below to inspect this function.
```
# help(print_parameter_count)
```
## Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128`, and observe how the internal representation successfully captures additional digit classes.
The encoder map shows well-separated clusters that correspond to the associated digits in the decoder grid. The decoder grid also shows that the network is robust to digit skewness, i.e., digits leaning to the left or the right are recognized in the same digit class.
**Instructions:**
* Please execute the cells below
```
n_epochs = 10
batch_size = 128
runSGD(model, input_train, input_test, n_epochs=n_epochs,
batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
latent_test = encoder(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder, image_shape=image_shape)
```
---
# Section 3: Spherical latent space
The previous architecture generates representations that typically spread in different directions from coordinate $(z_1, z_2)=(0,0)$. This effect is due to the initialization of weights distributed randomly around `0`.
Adding a third unit to the bottleneck layer defines a coordinate $(z_1, z_2, z_3)$ in 3D space. The latent space from such a network will still spread out from $(z_1, z_2, z_3)=(0, 0, 0)$.
Collapsing the latent space on the surface of a sphere removes the possibility of spreading indefinitely from the origin $(0, 0, 0)$ in any direction since this will eventually lead back to the origin. This constraint generates a representation that fills the surface of the sphere.

Projecting to the surface of the sphere is implemented by dividing the coordinates $(z_1, z_2, z_3)$ by their $L_2$ norm.
$(z_1, z_2, z_3)\longmapsto (s_1, s_2, s_3)=(z_1, z_2, z_3)/\|(z_1, z_2, z_3)\|_2=(z_1, z_2, z_3)/ \sqrt{z_1^2+z_2^2+z_3^2}$
This mapping projects to the surface of the [$S_2$ sphere](https://en.wikipedia.org/wiki/N-sphere) with unit radius. (Why?)
## Section 3.1: Build and train autoencoder (3D)
We start by adding one unit to the bottleneck layer and visualize the latent space in 3D.
Please execute the cell below.
```
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
encoder = model[:6]
decoder = model[6:]
print(f'Autoencoder \n\n {model}')
```
## Section 3.2: Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128`. Observe how the internal representation spreads from the origin and reaches much lower loss due to the additional degree of freedom in the bottleneck layer.
**Instructions:**
* Please execute the cell below
```
n_epochs = 10
batch_size = 128
runSGD(model, input_train, input_test, n_epochs=n_epochs,
batch_size=batch_size)
```
## Section 3.3: Visualize the latent space in 3D
**Helper function**: `plot_latent_3d`
Please uncomment the line below to inspect this function.
```
# help(plot_latent_3d)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_3d(latent_test, y_test)
```
### Exercise 2: Build deep autoencoder (2D) with latent spherical space
We now constrain the latent space to the surface of a sphere $S_2$.
**Instructions:**
* Add the custom layer `NormalizeLayer` after the bottleneck layer
* Adjust the definitions of `encoder` and `decoder`
* Experiment with keyword `show_text=False` for `plot_latent_3d`
**Helper function**: `NormalizeLayer`
Please uncomment the line below to inspect this function.
```
# help(NormalizeLayer)
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
#################################################
## TODO for students: add custom normalize layer
#################################################
# add the normalization layer
# ...,
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
# n_l = ...
# uncomment when you fill the code
# encoder = model[:n_l]
# decoder = model[n_l:]
# print(f'Encoder \n\n {encoder}\n')
# print(f'Decoder \n\n {decoder}')
# to_remove solution
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
# add the normalization layer
NormalizeLayer(),
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
n_l = 7
# uncomment when you fill the code
encoder = model[:n_l]
decoder = model[n_l:]
print(f'Encoder \n\n {encoder}\n')
print(f'Decoder \n\n {decoder}')
```
## Section 3.4: Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128` and observe how loss raises again and is comparable to the model with 2D latent space.
**Instructions:**
* Please execute the cell below
```
n_epochs = 10
batch_size = 128
runSGD(model, input_train, input_test, n_epochs=n_epochs,
batch_size=batch_size)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_3d(latent_test, y_test)
```
## Section 3.5: Visualize latent space on surface of $S_2$
The 3D coordinates $(s_1, s_2, s_3)$ on the surface of the unit sphere $S_2$ can be mapped to [spherical coordinates](https://en.wikipedia.org/wiki/Spherical_coordinate_system) $(r, \theta, \phi)$, as follows:
$$
\begin{aligned}
r &= \sqrt{s_1^2 + s_2^2 + s_3^2} \\
\phi &= \arctan \frac{s_2}{s_1} \\
\theta &= \arccos\frac{s_3}{r}
\end{aligned}
$$

What is the domain (numerical range) spanned by ($\theta, \phi)$?
We return to a 2D representation since the angles $(\theta, \phi)$ are the only degrees of freedom on the surface of the sphere. Add the keyword `s2=True` to `plot_latent_generative` to un-wrap the sphere's surface similar to a world map.
Task: Check the numerical range of the plot axis to help identify $\theta$ and $\phi$, and visualize the unfolding of the 3D plot from the previous exercise.
**Instructions:**
* Please execute the cells below
```
with torch.no_grad():
output_test = model(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder,
image_shape=image_shape, s2=True)
```
---
# Summary
We learned two techniques to improve representation capacity: adding a few hidden layers and projecting latent space on the sphere $S_2$.
The expressive power of autoencoder improves with additional hidden layers. Projecting latent space on the surface of $S_2$ spreads out digits classes in a more visually pleasing way but may not always produce a lower loss.
**Deep autoencoder architectures have rich internal representations to deal with sophisticated tasks such as the MNIST cognitive task.**
We now have powerful tools to explore how simple algorithms build robust models of the world by capturing relevant data patterns.
```
# @title Video 2: Wrap-up
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="GnkmzCqEK3E", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
---
# Bonus
## Deep and thick autoencoder
In this exercise, we first expand the first hidden layer to double the input size, followed by compression to half the input size leading to 3.8M parameters. Please **do not train this network during tutorial** due to long training time.
**Instructions:**
* Please uncomment and execute the cells below
```
# encoding_size = 3
# model = nn.Sequential(
# nn.Linear(input_size, int(input_size * 2)),
# nn.PReLU(),
# nn.Linear(int(input_size * 2), int(input_size / 2)),
# nn.PReLU(),
# nn.Linear(int(input_size / 2), encoding_size * 32),
# nn.PReLU(),
# nn.Linear(encoding_size * 32, encoding_size),
# nn.PReLU(),
# NormalizeLayer(),
# nn.Linear(encoding_size, encoding_size * 32),
# nn.PReLU(),
# nn.Linear(encoding_size * 32, int(input_size / 2)),
# nn.PReLU(),
# nn.Linear(int(input_size / 2), int(input_size * 2)),
# nn.PReLU(),
# nn.Linear(int(input_size * 2), input_size),
# nn.Sigmoid()
# )
# model[:-2].apply(init_weights_kaiming_normal)
# encoder = model[:9]
# decoder = model[9:]
# print_parameter_count(model)
# n_epochs = 5
# batch_size = 128
# runSGD(model, input_train, input_test, n_epochs=n_epochs,
# batch_size=batch_size)
# Visualization
# with torch.no_grad():
# output_test = model(input_test)
# plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
# image_shape=image_shape)
# plot_latent_generative(latent_test, y_test, decoder,
# image_shape=image_shape, s2=True)
```
| github_jupyter |
```
pip install cached_property
from PIL import Image
from cached_property import cached_property
from skimage import io
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import Dataset
from torchvision import transforms, datasets
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
is_cuda = torch.cuda.is_available()
%matplotlib inline
class FashionDataset(Dataset):
def __init__(self, csv_file, target_column, root_dir, transform=None, is_train=True, training_size=0.8, is_debug=True):
self.target_column = target_column
self.training_size = training_size
self.csv_file = csv_file
self.root_dir = root_dir
self.transform = transform
self.train = is_train
self.is_debug = is_debug
self.train_df, self.test_df = self._get_df()
if self.train:
self.train_labels = self.train_df[self.target_column].to_list()
self.labels_set = set(self.train_labels)
print(f"# of labels: {len(self.labels_set)}")
else:
self.test_labels = self.test_df[self.target_column].to_list()
self.labels_set = set(self.test_labels)
print(f"# of labels: {len(self.labels_set)}")
def _get_df(self):
df = pd.read_csv(os.path.join(self.root_dir, self.csv_file), error_bad_lines=False, warn_bad_lines=False)
df = df.sample(frac=1, random_state=29, axis="index")
df = df.dropna(axis=0, subset=[self.target_column])
if self.is_debug:
df = df.head(10000)
image_ids = []
for fd in glob.glob(os.path.join(self.root_dir, "images/*.jpg")):
image_id = os.path.split(fd)[1][:-4]
image_ids.append(int(image_id))
# Take a inner set
common_ids = set(df.id) & set(image_ids)
print("Common ids: ", len(common_ids))
df = df.loc[df.id.isin(common_ids)]
# Split to training and test
random_state = np.random.RandomState(29)
df["is_train"] = random_state.choice([True, False], size=df.shape[0], p=[self.training_size, 1-self.training_size])
train_df = df.loc[(df.is_train==True)]
self.train_df = self.filter_insufficient_labels(train_df, 5)
# Only keep the labels in the training set.
train_labels_set = set(self.train_df[self.target_column].to_list())
test_df = df.loc[(df.is_train==False)]
test_df = test_df.loc[(test_df[self.target_column].isin(train_labels_set))]
self.test_df = self.filter_insufficient_labels(test_df, 1)
return self.train_df, self.test_df
def filter_insufficient_labels(self, df, thredhold):
count_df = df.groupby(self.target_column).count().id.reset_index(name="counts")
df = df.merge(count_df, on=self.target_column, how="left")
return df.loc[(df.counts > thredhold)].reset_index()
@cached_property
def train_data(self):
print(f"is_training: {self.train}")
self.image_ids = self.train_df.id.to_list()
return self._read_images()
@cached_property
def test_data(self):
print(f"is_training: {self.train}")
self.image_ids = self.test_df.id.to_list()
return self._read_images()
def _read_images(self):
data = []
for i in self.image_ids:
filename = os.path.join(self.root_dir, "images", str(i) + ".jpg")
img = Image.open(filename).convert("L")
if self.transform:
img = self.transform(img)
data.append(img)
data = torch.stack(data, dim=0)
return data
def __len__(self):
if self.train:
return len(self.train_df)
else:
return len(self.test_df)
def __getitem__(self, idx):
if self.train:
filename = os.path.join(self.root_dir, "images", str(self.train_df.loc[idx].id) + ".jpg")
label = self.train_df.loc[idx][self.target_column]
else:
filename = os.path.join(self.root_dir, "images", str(self.test_df.loc[idx].id) + ".jpg")
label = self.test_df.loc[idx][self.target_column]
sample = Image.open(filename).convert("L")
if self.transform:
sample = self.transform(sample)
return (sample, label)
class TripletDataset(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, dataset):
self.dataset = dataset
self.train = self.dataset.train
if self.train:
self.train_labels = self.dataset.train_labels
self.train_data = self.dataset.train_data
self.labels_set = set(self.train_labels)
self.label_to_indices = {label: np.where(np.array(self.train_labels) == label)[0]
for label in self.labels_set}
else:
self.test_labels = self.dataset.test_labels
self.test_data = self.dataset.test_data
self.labels_set = set(self.test_labels)
self.label_to_indices = {label: np.where(np.array(self.test_labels) == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
# Generate fixed triplets for testing len(self.test_data)
triplets = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i]]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i]]))
)
])
]
for i in range(len(self.test_data))]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index]
positive_index = index
# print(f"Anchor: {index}")
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
# print("Randomly selecting the Postive sample...")
# print(f"Positive: {positive_index}")
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
# print(f"Negative: {negative_index}")
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
# (Anchor, Positive, Negative)
return (img1, img2, img3), []
def __len__(self):
return len(self.dataset)
class EmbeddingNet(nn.Module):
def __init__(self):
super(EmbeddingNet, self).__init__()
self.convnet = nn.Sequential(nn.Conv2d(1, 32, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2))
self.fc = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, 2) # the embedding space is 2
)
def forward(self, x):
output = self.convnet(x)
output = output.view(output.size()[0], -1)
output = self.fc(output)
return output
def get_embedding(self, x):
return self.forward(x)
class TripletNet(nn.Module):
def __init__(self, embedding_net):
super(TripletNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
def fit(train_loader, val_loader, model, loss_fn, optimizer, scheduler, n_epochs, is_cuda, log_interval, metrics=[],
start_epoch=0):
"""
Trainer
"""
for epoch in range(0, start_epoch):
scheduler.step()
print("===> Start training...")
for epoch in range(start_epoch, n_epochs):
scheduler.step()
# Train stage
train_loss, metrics = train_epoch(train_loader, model, loss_fn, optimizer, is_cuda, log_interval, metrics)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss)
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
# Validation stage
val_loss, metrics = test_epoch(val_loader, model, loss_fn, is_cuda, metrics)
val_loss /= len(val_loader)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, val_loss)
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
print(message)
print("===> Finish training!")
def train_epoch(train_loader, model, loss_fn, optimizer, is_cuda, log_interval, metrics):
for metric in metrics:
metric.reset()
model.train()
losses = []
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if is_cuda:
data = tuple(d.cuda() for d in data)
if target is not None:
target = target.cuda()
optimizer.zero_grad()
# data = [len(triplet)=3, batch_size, c, w, h]
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
loss_outputs = loss_fn(*loss_inputs)
loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs
losses.append(loss.item())
total_loss += loss.item()
loss.backward()
optimizer.step()
for metric in metrics:
metric(outputs, target, loss_outputs)
if batch_idx % log_interval == 0:
message = 'Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses))
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
print(message)
losses = []
total_loss /= (batch_idx + 1)
return total_loss, metrics
def test_epoch(val_loader, model, loss_fn, is_cuda, metrics):
with torch.no_grad():
for metric in metrics:
metric.reset()
model.eval()
val_loss = 0
for batch_idx, (data, target) in enumerate(val_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if is_cuda:
data = tuple(d.cuda() for d in data)
if target is not None:
target = target.cuda()
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
loss_outputs = loss_fn(*loss_inputs)
loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs
val_loss += loss.item()
for metric in metrics:
metric(outputs, target, loss_outputs)
return val_loss, metrics
# Read raw dataset
mean, std = 0.1307, 0.3081
transform = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
# transforms.Normalize((mean,), (std,))
])
target_column = "subCategory"
is_debug = True
root_dir="/kaggle/input/fashion-product-images-small/myntradataset"
train_dataset = FashionDataset(is_train=True, target_column=target_column, root_dir=root_dir, csv_file="styles.csv", transform=transform, is_debug=is_debug)
print(train_dataset.train_data.shape)
test_dataset = FashionDataset(is_train=False, target_column=target_column, root_dir=root_dir, csv_file="styles.csv", transform=transform, is_debug=is_debug)
print(test_dataset.test_data.shape)
# Construct triplet dataset
triplet_train_dataset = TripletDataset(train_dataset)
triplet_test_dataset = TripletDataset(test_dataset)
# Construct triplet dataset loader
batch_size = 128
kwargs = {'num_workers': 10, 'pin_memory': True} if is_cuda else {}
triplet_train_loader = torch.utils.data.DataLoader(triplet_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
triplet_test_loader = torch.utils.data.DataLoader(triplet_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Construct raw dataset loader for embedding and plotting
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network
# from networks import EmbeddingNet, TripletNet
embedding_net = EmbeddingNet()
model = TripletNet(embedding_net)
if is_cuda:
model.cuda()
# Set up the loss function
loss_fn = nn.TripletMarginLoss(margin=1.0)
# Set up the optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
# Set training parameters
n_epochs = 20
log_interval = 50
fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, is_cuda, log_interval)
def plot_embeddings(embeddings, labels, classes, xlim=None, ylim=None):
plt.figure(figsize=(8, 8))
for i, c in enumerate(classes):
inds = np.where(labels==c)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=colors[c])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(classes, bbox_to_anchor=(1, 1.))
def extract_embeddings(dataloader, dataset, model):
with torch.no_grad():
classes = dataset.labels_set
model.eval()
embeddings = np.zeros((len(dataloader.dataset), 2)) # the embedding space is 2
labels = []
k = 0
for images, targets in dataloader:
if is_cuda:
images = images.cuda()
embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy()
labels[k:k+len(images)] = list(targets)
k += len(images)
labels = np.array(labels)
return embeddings, labels, classes
classes = train_dataset.labels_set
random_state = np.random.RandomState(30)
colors = {}
for c in classes:
r = lambda: random_state.randint(0, 255)
color = '#%02X%02X%02X' % (r(),r(),r())
colors[c] = color
train_embeddings, train_labels, classes = extract_embeddings(train_loader, train_dataset, model)
plot_embeddings(train_embeddings, train_labels, classes)
val_embeddings, val_labels, classes = extract_embeddings(test_loader, test_dataset, model)
plot_embeddings(val_embeddings, val_labels, classes)
model
embedding_net
```
| github_jupyter |
# Author : Vedanti Ekre
# Email: vedantiekre@gmail.com
## Task 1 : Prediction using Supervised Machine Learning
___
## GRIP @ The Sparks Foundation
____
# Role : Data Science and Business Analytics [Batch May-2021]
## TABLE OF CONTENTS:
1. [Introduction](#intro)
2. [Importing the dependencies](#libs)
3. [Loading the Data](#DL)
4. [Understanding data](#UD)
5. [Spliting data in Test and Train](#split)
6. [Use Simple Linear Regression Model to do prediction](#LR)
7. [Task](#PT)
8. [Evaluate the model using MAE and MSE metrics](#Eval)
9. [Conslusion](#conclu)
## **Introduction**<a class="anchor" id="intro"></a>
● We have given Student dataset,which have only two features Hours and scores.<br>
● Predict the percentage of an student based on the no. of study hours.<br>
● This is a simple linear regression task as it involves just 2 variables.<br>
● You can use R, Python, SAS Enterprise Miner or any other tool<br>
● Data can be found at http://bit.ly/w-data
## Importing dependencies<a class="anchor" id="libs"></a>
```
#importing packages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
```
## **Loading the Data**<a class="anchor" id="DL"></a>
```
#importing datasets
url = "http://bit.ly/w-data"
data = pd.read_csv(url)
```
## **Understanding data**<a class="anchor" id="UD"></a>
```
display(data.head(3),data.tail(3))
print(type(data))
print('-'*45)
print('The data set has {} rows and {} columns'.format(data.shape[0],data.shape[1]))
print('-'*45)
print('Data types :')
print(data.dtypes.value_counts())
#print('Total : ',data.dtypes.value_counts().sum())
print('-'*45)
data.info()
# Checking for the missing values
data.isnull().sum()
```
### Observation :<br>
- ```There is no missing or null value & hence we don't need to do data preprocessing```
## **Data Visualization**
```
x = data.iloc[:,:-1].values #spliting data in X & Y
y = data.iloc[:,-1].values
print(x[:5])
print(y[:5])
plt.xlabel('hours')
plt.ylabel('scores')
plt.scatter(x,y,color='red',marker='+')
plt.grid()
plt.show()
```
### Observation :
- ```From the graph we can safely assume a positive linear relation between the number of hours studied and percentage of score```
## **Spliting data in x_train , x_test and y_train , y_test** <a class="anchor" id="split"></a>
```
from sklearn.model_selection import train_test_split #spliting data in x_train , x_test & y_train,y_test
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25, random_state = 0)
```
## **Apply Linear Regression on train data**
```
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x_train,y_train)
plt.xlabel('hours')
plt.ylabel('scores')
plt.scatter(x_train,y_train,color='purple',marker='+',label='scatter plot')
plt.plot(x_train,lin_reg.predict(x_train),color='green',label='reg_line')
plt.legend()
plt.grid()
plt.show()
```
## **Apply Linear Regression on test data** <a class="anchor" id="LR"></a>
```
plt.xlabel('hours')
plt.ylabel('scores')
plt.scatter(x_test,y_test,color='blue',marker='+',label='scatter plot')
plt.plot(x_train,lin_reg.predict(x_train),color='purple',label='reg_line')
plt.legend()
plt.grid()
plt.show()
```
### **Coefficents and y-intercept**
```
print('coefficents : ',lin_reg.coef_)
print('y-intercept : ',lin_reg.intercept_)
y_pred = lin_reg.predict(x_test)
y_pred
```
## Comparing Actual value with Predicted value
```
result = pd.DataFrame({'Actual values':y_test,'Predicted values':y_pred})
result
# Plotting the Bar graph to depict the difference between the actual and predicted value
result.plot(kind='bar',figsize=(9,9))
plt.grid(which='major', linewidth='0.5', color='red')
plt.grid(which='minor', linewidth='0.5', color='blue')
plt.show()
diff = np.array(np.abs(y_test-y_pred))
diff
```
## Displot distribution of Actual value with Predicted value
```
sns.set_style('whitegrid')
sns.kdeplot(diff,shade=True)
plt.show()
```
# **Task** <a class="anchor" id="PT"></a>
## - What will be predicted score if a student studies for 9.25 hrs/ day?
```
import math
# y = mx + c
res = lin_reg.intercept_+9.25*lin_reg.coef_
hr= 9.25
print("If student study for {} hrs/day student will get {}% score in exam".format(hr,math.floor(res[0])))
print('-'*80)
```
# Model Evaluation <a class="anchor" id="Eval"></a>
## MAE :
- MAE measures the differences between prediction and actual observation.<br>
Formula is :<br>

```
from sklearn import metrics
print('Mean Absolute Error:',
metrics.mean_absolute_error(y_test, y_pred))
```
## MSE :
- MSE simply refers to the mean of the squared difference between the predicted value and the observed value.<br>
Formula : <br>
```
from sklearn import metrics
print('Mean Squared Error:',
metrics.mean_squared_error(y_test, y_pred))
```
## **R-Square** :
- R-squared is measure of how close the data are to the fitted regression line.<br>
Formula : <br>
```
from sklearn.metrics import r2_score
r2_score(y_test,y_pred)
```
# **Conclusion :**<br><a class="anchor" id="conclu"></a>
- We have successfully created a Simple linear Regression model to predict score of the student given number of hours one studies.
- By the MAE and MSE , we are not getting much difference in actual or predicted value , means error is less.
- The Score of R-Square **0.93** quite close to **1**.
| github_jupyter |
## Genere un conjunto de datos nuevos a partir de CSV con al menos 50mil registros y prediga cuáles serían las respuestas de una nueva encuestaenel 2018.(Adjunte el análisis y el algoritmo que le permitió generar los registros manteniendo la línea de tendencia con base en las encuestas anteriores)
```
import pandas as pd
import os
data1 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_0_16000.json')
data2 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_16000_32000.json')
data3 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_32000_48000.json')
data4 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_48000_64000.json')
data5 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_64000_80000.json')
data6 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_80000_96000.json')
data7 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_96000_112000.json')
data8 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_112000_128000.json')
data9 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_128000_144000.json')
data10 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_144000_160000.json')
data11 = pd.read_json(r'.\Base_Datos_Encuestas\BD_Encuesta_160000_160111.json')
Total = pd.concat([data1, data2, data3, data4, data5, data6, data7, data8, data9, data10, data11], axis=0)
Total.columns.values
Total.head()
```
### Para crear los 50.000 registros del año 2018 se realizarán los siguientes pasos:
1. Asumiendo que el comportamiento de 2018 es similar al comportamiento de 2017, se realiza una descripción general de los datos de 2017.
2. Dado que desconocemos la distribución de probabilidad de la cual se generan los datos, se respetará la distribución de probabilidad empirica de la muestra de 2017.
3. Se crea para cada variable una función generadora de datos que siga la distribución de probabilidad, la cual utiliza como variables de entrada variables contincuas de ditribución uniformes entre 0 y 1.
4. se crea una función generadora de un registro que toma como insumos las funciones generadoras de datos
5. Se crea una función de varios registros
## Pasos 1 y 2:
```
data_2017 = Total.loc[Total["Year"]==2017].reset_index()
data_2017.shape
Total.groupby('Year').count()
Prob_Lang=data_2017.groupby('Year').count()/data_2017.shape[0]
Prob_Lang
```
A continuación se extrae la probabilidad de ocurrencia de los lenguajes de programación en 2017
```
from collections import OrderedDict
P_JavaScript = Prob_Lang.JavaScript.item()
P_JavaScript
P_AngularJS = Prob_Lang.AngularJS.item()
P_C = Prob_Lang.C.item()
P_CPlusPlus = Prob_Lang.CPlusPlus.item()
P_CSS = Prob_Lang.CSS.item()
P_CSharp = Prob_Lang.CSharp.item()
P_HTML5 = Prob_Lang.HTML5.item()
P_Java = Prob_Lang.Java.item()
P_JavaScript = Prob_Lang.JavaScript.item()
P_Nodejs = Prob_Lang.Nodejs.item()
P_ObjectiveC = Prob_Lang.ObjectiveC.item()
P_PHP = Prob_Lang.PHP.item()
P_Perl = Prob_Lang.Perl.item()
P_Python = Prob_Lang.Python.item()
P_Ruby = Prob_Lang.Ruby.item()
P_SQL = Prob_Lang.SQL.item()
P_SQL_Server = Prob_Lang.SQL_Server.item()
P_TypeScript = Prob_Lang.TypeScript.item()
P_jQuery = Prob_Lang.jQuery.item()
```
Se crean diccionarios con los rangos de probabilidad de ocurrencia de los datos descriptivos de 2017 (dimensiones): país, edad, experiencia, genero, salario
```
temp1 = data_2017.Id_Country.value_counts()/data_2017.shape[0]
temp2 = data_2017.Id_Age.value_counts()/data_2017.shape[0]
temp3 = data_2017.Id_Experience.value_counts()/data_2017.shape[0]
temp4 = data_2017.Id_Gender.value_counts()/data_2017.shape[0]
temp5 = data_2017.Id_Salary.value_counts()/data_2017.shape[0]
```
La siguiene función toma como insumo la serie de Pandas y la transforma en un diccionario ordenado de Python
```
def dic_other_vars(temp1):
#temp = df[col].value_counts()/data_2017.shape[0]
temp = temp1.to_frame() #se convierte de serie de datos a data frame
#print (temp)
L=temp.index.tolist() # exporta el indice a una lista
#print (L)
#print (temp.loc[213].item())
#dict_var = {}
dict_var = OrderedDict() #se genera un diccionario ordenado vacío
sum=0
for key in L:
#print (key)
#print (temp.loc[key])
dict_var[key]=sum+temp.loc[key].item()
sum = sum + temp.loc[key].item()
return dict_var
```
Se crean los diccionarios de probabilidades para las variables descriptivas
```
dict_Id_Country = dic_other_vars(temp1)
dict_Id_Age = dic_other_vars(temp2)
dict_Id_Experience = dic_other_vars(temp3)
dict_Id_Gender = dic_other_vars(temp4)
dict_Id_Salary = dic_other_vars(temp5)
dict_Id_Salary
```
Para el año 2017 la variable edad esta en NULL
## Paso 3: Funciones generadoras de datos por variable
Primero la función generadora de datos para los leguajes de programación
```
import random
random.random()
P_JavaScript
def gen_L_Prog(p_lang, label):
'''se ingresa la probabilidad de occurrencia del lenguaje y la equiqueta que se quiere generar
'''
output=None
temp_p = random.random() #prob entre 0 y 1 uniforme
#print (temp_p)
#print (temp_p<p_lang)
if temp_p < p_lang:
output = label
else:
pass
return output
```
Prueba de la funcion para 10 datos
```
for i in range(10):
prueba = gen_L_Prog(P_JavaScript, "JavaScript")
print (prueba)
```
Ahora se definen las funciones para la generación de las otras variables descriptivas: país, edad, experiencia, genero, salario
```
dict_Id_Experience
def gen_V_Desc(Input_Dict):
temp_p = random.random() #prob entre 0 y 1 uniforme
#print (temp_p)
output=None
for k,v in Input_Dict.items():
#print (k,v)
if temp_p < v:
output = k
break
return output
```
prueba de generación para experiencia
```
for i in range(5):
prueba = gen_V_Desc(dict_Id_Experience)
print (prueba)
```
prueba de generación para país
```
for i in range(5):
prueba = gen_V_Desc(dict_Id_Country)
print (prueba)
```
## 4. Función que genera un registro aleatorio con la distribución de probabilidad empírica de 2017
```
Total.columns.values
def registro ():
temp = []
temp.append(gen_L_Prog(P_AngularJS, "AngularJS"))
temp.append(gen_L_Prog(P_C, "C"))
temp.append(gen_L_Prog(P_CPlusPlus, "CPlusPlus"))
temp.append(gen_L_Prog(P_CSS, "CSS"))
temp.append(gen_L_Prog(P_CSharp, "CSharp"))
temp.append(gen_L_Prog(P_HTML5, "HTML5"))
temp.append(gen_V_Desc(dict_Id_Age))
temp.append(gen_V_Desc(dict_Id_Country))
temp.append(gen_V_Desc(dict_Id_Experience))
temp.append(gen_V_Desc(dict_Id_Gender))
temp.append(gen_V_Desc(dict_Id_Salary))
temp.append(gen_L_Prog(P_Java, "Java"))
temp.append(gen_L_Prog(P_JavaScript, "JavaScript"))
temp.append(gen_L_Prog(P_Nodejs, "Nodejs"))
temp.append(gen_L_Prog(P_ObjectiveC, "Objective-C"))
temp.append(gen_L_Prog(P_PHP, "PHP"))
temp.append(gen_L_Prog(P_Perl, "Perl"))
temp.append(gen_L_Prog(P_Python, "Python"))
temp.append(gen_L_Prog(P_Ruby, "Ruby"))
temp.append(gen_L_Prog(P_SQL, "SQL"))
temp.append(gen_L_Prog(P_SQL_Server, "SQL_Server"))
temp.append(gen_L_Prog(P_TypeScript, "TypeScript"))
temp.append(2018)
temp.append(gen_L_Prog(P_jQuery, "jQuery"))
return temp
print (registro())
```
## 5. Función para generar N registros
```
def n_registros(n):
data=[]
for i in range(n):
data.append(registro())
return data
tabla = n_registros(50000)
header = ['AngularJS', 'C', 'CPlusPlus', 'CSS', 'CSharp', 'HTML5', 'Id_Age',
'Id_Country', 'Id_Experience', 'Id_Gender', 'Id_Salary', 'Java',
'JavaScript', 'Nodejs', 'ObjectiveC', 'PHP', 'Perl', 'Python',
'Ruby', 'SQL', 'SQL_Server', 'TypeScript', 'Year', 'jQuery']
tabla_df = pd.DataFrame(tabla, columns = header)
tabla_df.head()
tabla_df.tail()
```
### DATOS TABLA 2017
```
Prob_Lang=data_2017.groupby('Year').count()/data_2017.shape[0]
Prob_Lang
```
## DATOS SIMULADOS 2018
```
datos_tabla_df = tabla_df.groupby('Year').count()/tabla_df.shape[0]
datos_tabla_df
```
| github_jupyter |
```
import run_info_utils
df = run_info_utils.get_df_run_info()
df.head()
# print(list(df.columns))
experiment_name = 'jordan_cp9_add_sub_maxstep'
df = df.loc[df['experiment_name'] == experiment_name]
cols = ['run_id', 'operator', 'rnn_type', 'confidence_prob', 'operand_bits', 'hidden_activation', 'max_steps',
'dev/last_carry-0_mean_correct_answer_step',
'dev/last_carry-1_mean_correct_answer_step',
'dev/last_carry-2_mean_correct_answer_step',
'dev/last_carry-3_mean_correct_answer_step',
'dev/last_carry-4_mean_correct_answer_step',
'dev/last_mean_correct_answer_step', 'dev/last_accuracy']
df = df[cols]
df = df.loc[df['dev/last_accuracy'] == 1.0]
df.shape
df_add_jordan_9_relu_maxstep10 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 10')
df_sub_jordan_9_relu_maxstep10 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 10')
df_add_jordan_9_relu_maxstep20 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 20')
df_sub_jordan_9_relu_maxstep20 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 20')
df_add_jordan_9_relu_maxstep30 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 30')
df_sub_jordan_9_relu_maxstep30 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 30')
df_add_jordan_9_relu_maxstep40 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 40')
df_sub_jordan_9_relu_maxstep40 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 40')
df_add_jordan_9_relu_maxstep50 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 50')
df_sub_jordan_9_relu_maxstep50 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 50')
df_add_jordan_9_relu_maxstep60 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 60')
df_sub_jordan_9_relu_maxstep60 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 60')
df_add_jordan_9_relu_maxstep90 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "relu"').query('max_steps == 90')
df_add_jordan_9_tanh_maxstep10 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 10')
df_sub_jordan_9_tanh_maxstep10 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 10')
df_add_jordan_9_tanh_maxstep20 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 20')
df_sub_jordan_9_tanh_maxstep20 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 20')
df_add_jordan_9_tanh_maxstep30 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 30')
df_sub_jordan_9_tanh_maxstep30 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 30')
df_add_jordan_9_tanh_maxstep40 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 40')
df_sub_jordan_9_tanh_maxstep40 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 40')
df_add_jordan_9_tanh_maxstep50 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 50')
df_sub_jordan_9_tanh_maxstep50 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 50')
df_add_jordan_9_tanh_maxstep60 = df.query('operator == "add"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 60')
df_sub_jordan_9_tanh_maxstep60 = df.query('operator == "subtract"').query('rnn_type == "jordan"').query('confidence_prob == 0.9').query('hidden_activation == "tanh"').query('max_steps == 60')
print(df_add_jordan_9_relu_maxstep10.shape)
print(df_sub_jordan_9_relu_maxstep10.shape)
print(df_add_jordan_9_relu_maxstep20.shape)
print(df_sub_jordan_9_relu_maxstep20.shape)
print(df_add_jordan_9_relu_maxstep30.shape)
print(df_sub_jordan_9_relu_maxstep30.shape)
print(df_add_jordan_9_relu_maxstep40.shape)
print(df_sub_jordan_9_relu_maxstep40.shape)
print(df_add_jordan_9_relu_maxstep50.shape)
print(df_sub_jordan_9_relu_maxstep50.shape)
print(df_add_jordan_9_relu_maxstep60.shape)
print(df_sub_jordan_9_relu_maxstep60.shape)
print(df_add_jordan_9_relu_maxstep90.shape)
print(df_add_jordan_9_tanh_maxstep10.shape)
print(df_sub_jordan_9_tanh_maxstep10.shape)
print(df_add_jordan_9_tanh_maxstep20.shape)
print(df_sub_jordan_9_tanh_maxstep20.shape)
print(df_add_jordan_9_tanh_maxstep30.shape)
print(df_sub_jordan_9_tanh_maxstep30.shape)
print(df_add_jordan_9_tanh_maxstep40.shape)
print(df_sub_jordan_9_tanh_maxstep40.shape)
print(df_add_jordan_9_tanh_maxstep50.shape)
print(df_sub_jordan_9_tanh_maxstep50.shape)
print(df_add_jordan_9_tanh_maxstep60.shape)
print(df_sub_jordan_9_tanh_maxstep60.shape)
```
# Result
```
df_add_jordan_9_relu_maxstep40.describe()
df_sub_jordan_9_relu_maxstep40.describe()
```
# Export as CSV
## Functionalize
```
import pandas as pd
import numpy as np
import data_utils
from utils import create_dir
from os.path import join
def get_csv_df(df, filename, experiment_name):
# Get configurations
n_rows = df.shape[0]
operator = df['operator'].iloc[0]
operand_digits = df['operand_bits'].iloc[0]
carry_list = list(data_utils.import_carry_datasets(operand_digits, operator).keys())
# Gather for each
csv_df_list = list()
for carries in carry_list:
col = 'dev/last_carry-{}_mean_correct_answer_step'.format(carries)
csv_df = pd.DataFrame(data={'mean_anwer_steps':df[col], 'carries':np.full((n_rows), carries)})
csv_df_list.append(csv_df)
csv_df = pd.concat(csv_df_list, ignore_index=True)
# Change the order of columns
csv_df = csv_df[['mean_anwer_steps', 'carries']]
# Create dir
dir_to_save = join('result_statistics', experiment_name)
create_dir(dir_to_save)
# Save the dataframe to a CSV file.
csv_df.to_csv(join(dir_to_save, filename), index=False)
experiment_name = 'jordan_cp9_add_sub_maxstep'
get_csv_df(df_add_jordan_9_relu_maxstep10, 'df_add_jordan_9_relu_maxstep10.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep10, 'df_sub_jordan_9_relu_maxstep10.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep20, 'df_add_jordan_9_relu_maxstep20.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep20, 'df_sub_jordan_9_relu_maxstep20.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep30, 'df_add_jordan_9_relu_maxstep30.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep30, 'df_sub_jordan_9_relu_maxstep30.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep40, 'df_add_jordan_9_relu_maxstep40.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep40, 'df_sub_jordan_9_relu_maxstep40.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep50, 'df_add_jordan_9_relu_maxstep50.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep50, 'df_sub_jordan_9_relu_maxstep50.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep60, 'df_add_jordan_9_relu_maxstep60.csv', experiment_name)
get_csv_df(df_sub_jordan_9_relu_maxstep60, 'df_sub_jordan_9_relu_maxstep60.csv', experiment_name)
get_csv_df(df_add_jordan_9_relu_maxstep90, 'df_add_jordan_9_relu_maxstep90.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep10, 'df_add_jordan_9_tanh_maxstep10.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep10, 'df_sub_jordan_9_tanh_maxstep10.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep20, 'df_add_jordan_9_tanh_maxstep20.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep20, 'df_sub_jordan_9_tanh_maxstep20.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep30, 'df_add_jordan_9_tanh_maxstep30.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep30, 'df_sub_jordan_9_tanh_maxstep30.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep40, 'df_add_jordan_9_tanh_maxstep40.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep40, 'df_sub_jordan_9_tanh_maxstep40.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep50, 'df_add_jordan_9_tanh_maxstep50.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep50, 'df_sub_jordan_9_tanh_maxstep50.csv', experiment_name)
get_csv_df(df_add_jordan_9_tanh_maxstep60, 'df_add_jordan_9_tanh_maxstep60.csv', experiment_name)
get_csv_df(df_sub_jordan_9_tanh_maxstep60, 'df_sub_jordan_9_tanh_maxstep60.csv', experiment_name)
```
| github_jupyter |
<p style="font-family: Arial; font-size:3.75em;color:purple; font-style:bold"><br>
Pandas</p><br>
*pandas* is a Python library for data analysis. It offers a number of data exploration, cleaning and transformation operations that are critical in working with data in Python.
*pandas* build upon *numpy* and *scipy* providing easy-to-use data structures and data manipulation functions with integrated indexing.
The main data structures *pandas* provides are *Series* and *DataFrames*. After a brief introduction to these two data structures and data ingestion, the key features of *pandas* this notebook covers are:
* Generating descriptive statistics on data
* Data cleaning using built in pandas functions
* Frequent data operations for subsetting, filtering, insertion, deletion and aggregation of data
* Merging multiple datasets using dataframes
* Working with timestamps and time-series data
**Additional Recommended Resources:**
* *pandas* Documentation: http://pandas.pydata.org/pandas-docs/stable/
* *Python for Data Analysis* by Wes McKinney
* *Python Data Science Handbook* by Jake VanderPlas
Let's get started with our first *pandas* notebook!
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br>
Import Libraries
</p>
```
import pandas as pd
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Introduction to pandas Data Structures</p>
<br>
*pandas* has two main data structures it uses, namely, *Series* and *DataFrames*.
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
pandas Series</p>
*pandas Series* one-dimensional labeled array.
```
ser = pd.Series([100, 'foo', 300, 'bar', 500], ['tom', 'bob', 'nancy', 'dan', 'eric'])
ser
ser.index
ser.loc[['nancy','bob']]
ser[[4, 3, 1]]
ser.iloc[2]
'bob' in ser
ser
ser * 2
ser[['nancy', 'eric']] ** 2
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
pandas DataFrame</p>
*pandas DataFrame* is a 2-dimensional labeled data structure.
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Create DataFrame from dictionary of Python Series</p>
```
d = {'one' : pd.Series([100., 200., 300.], index=['apple', 'ball', 'clock']),
'two' : pd.Series([111., 222., 333., 4444.], index=['apple', 'ball', 'cerill', 'dancy'])}
df = pd.DataFrame(d)
print(df)
df.index
df.columns
pd.DataFrame(d, index=['dancy', 'ball', 'apple'])
pd.DataFrame(d, index=['dancy', 'ball', 'apple'], columns=['two', 'five'])
```
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Create DataFrame from list of Python dictionaries</p>
```
data = [{'alex': 1, 'joe': 2}, {'ema': 5, 'dora': 10, 'alice': 20}]
pd.DataFrame(data)
pd.DataFrame(data, index=['orange', 'red'])
pd.DataFrame(data, columns=['joe', 'dora','alice'])
```
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Basic DataFrame operations</p>
```
df
df['one']
df['three'] = df['one'] * df['two']
df
df['flag'] = df['one'] > 250
df
three = df.pop('three')
three
df
del df['two']
df
df.insert(2, 'copy_of_one', df['one'])
df
df['one_upper_half'] = df['one'][:2]
df
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Case Study: Movie Data Analysis</p>
<br>This notebook uses a dataset from the MovieLens website. We will describe the dataset further as we explore with it using *pandas*.
## Download the Dataset
Please note that **you will need to download the dataset**. Although the video for this notebook says that the data is in your folder, the folder turned out to be too large to fit on the edX platform due to size constraints.
Here are the links to the data source and location:
* **Data Source:** MovieLens web site (filename: ml-20m.zip)
* **Location:** https://grouplens.org/datasets/movielens/
Once the download completes, please make sure the data files are in a directory called *movielens* in your *Week-3-pandas* folder.
Let us look at the files in this dataset using the UNIX command ls.
```
# Note: Adjust the name of the folder to match your local directory
!ls ./movielens
!cat ./movielens/movies.csv | wc -l
!head -5 ./movielens/ratings.csv
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Use Pandas to Read the Dataset<br>
</p>
<br>
In this notebook, we will be using three CSV files:
* **ratings.csv :** *userId*,*movieId*,*rating*, *timestamp*
* **tags.csv :** *userId*,*movieId*, *tag*, *timestamp*
* **movies.csv :** *movieId*, *title*, *genres* <br>
Using the *read_csv* function in pandas, we will ingest these three files.
```
movies = pd.read_csv('./movielens/movies.csv', sep=',')
print(type(movies))
movies.head(15)
# Timestamps represent seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970
tags = pd.read_csv('./movielens/tags.csv', sep=',')
tags.head()
ratings = pd.read_csv('./movielens/ratings.csv', sep=',', parse_dates=['timestamp'])
ratings.head()
# For current analysis, we will remove timestamp (we will come back to it!)
del ratings['timestamp']
del tags['timestamp']
```
<h1 style="font-size:2em;color:#2467C0">Data Structures </h1>
<h1 style="font-size:1.5em;color:#2467C0">Series</h1>
```
#Extract 0th row: notice that it is infact a Series
row_0 = tags.iloc[0]
type(row_0)
print(row_0)
row_0.index
row_0['userId']
'rating' in row_0
row_0.name
row_0 = row_0.rename('first_row')
row_0.name
```
<h1 style="font-size:1.5em;color:#2467C0">DataFrames </h1>
```
tags.head()
tags.index
tags.columns
# Extract row 0, 11, 2000 from DataFrame
tags.iloc[ [0,11,2000] ]
```
<h1 style="font-size:2em;color:#2467C0">Descriptive Statistics</h1>
Let's look how the ratings are distributed!
```
ratings['rating'].describe()
ratings.describe()
ratings['rating'].mean()
ratings.mean()
ratings['rating'].min()
ratings['rating'].max()
ratings['rating'].std()
ratings['rating'].mode()
ratings.corr()
filter_1 = ratings['rating'] > 5
print(filter_1)
filter_1.any()
filter_2 = ratings['rating'] > 0
filter_2.all()
```
<h1 style="font-size:2em;color:#2467C0">Data Cleaning: Handling Missing Data</h1>
```
movies.shape
#is any row NULL ?
movies.isnull().any()
```
Thats nice ! No NULL values !
```
ratings.shape
#is any row NULL ?
ratings.isnull().any()
```
Thats nice ! No NULL values !
```
tags.shape
#is any row NULL ?
tags.isnull().any()
```
We have some tags which are NULL.
```
tags = tags.dropna()
#Check again: is any row NULL ?
tags.isnull().any()
tags.shape
```
Thats nice ! No NULL values ! Notice the number of lines have reduced.
<h1 style="font-size:2em;color:#2467C0">Data Visualization</h1>
```
%matplotlib inline
ratings.hist(column='rating', figsize=(15,10))
ratings.boxplot(column='rating', figsize=(15,20))
```
<h1 style="font-size:2em;color:#2467C0">Slicing Out Columns</h1>
```
tags['tag'].head()
movies[['title','genres']].head()
ratings[-10:]
tag_counts = tags['tag'].value_counts()
tag_counts[-10:]
tag_counts[:10].plot(kind='bar', figsize=(15,10))
```
<h1 style="font-size:2em;color:#2467C0">Filters for Selecting Rows</h1>
```
is_highly_rated = ratings['rating'] >= 4.0
ratings[is_highly_rated][30:50]
is_animation = movies['genres'].str.contains('Animation')
movies[is_animation][5:15]
movies[is_animation].head(15)
```
<h1 style="font-size:2em;color:#2467C0">Group By and Aggregate </h1>
```
ratings_count = ratings[['movieId','rating']].groupby('rating').count()
ratings_count
average_rating = ratings[['movieId','rating']].groupby('movieId').mean()
average_rating.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.tail()
```
<h1 style="font-size:2em;color:#2467C0">Merge Dataframes</h1>
```
tags.head()
movies.head()
t = movies.merge(tags, on='movieId', how='inner')
t.head()
```
More examples: http://pandas.pydata.org/pandas-docs/stable/merging.html
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br>
Combine aggreagation, merging, and filters to get useful analytics
</p>
```
avg_ratings = ratings.groupby('movieId', as_index=False).mean()
del avg_ratings['userId']
avg_ratings.head()
box_office = movies.merge(avg_ratings, on='movieId', how='inner')
box_office.tail()
is_highly_rated = box_office['rating'] >= 4.0
box_office[is_highly_rated][-5:]
is_comedy = box_office['genres'].str.contains('Comedy')
box_office[is_comedy][:5]
box_office[is_comedy & is_highly_rated][-5:]
```
<h1 style="font-size:2em;color:#2467C0">Vectorized String Operations</h1>
```
movies.head()
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Split 'genres' into multiple columns
<br> </p>
```
movie_genres = movies['genres'].str.split('|', expand=True)
movie_genres[:10]
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Add a new column for comedy genre flag
<br> </p>
```
movie_genres['isComedy'] = movies['genres'].str.contains('Comedy')
movie_genres[:10]
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Extract year from title e.g. (1995)
<br> </p>
```
movies['year'] = movies['title'].str.extract('.*\((.*)\).*', expand=True)
movies.tail()
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
More here: http://pandas.pydata.org/pandas-docs/stable/text.html#text-string-methods
<br> </p>
<h1 style="font-size:2em;color:#2467C0">Parsing Timestamps</h1>
Timestamps are common in sensor data or other time series datasets.
Let us revisit the *tags.csv* dataset and read the timestamps!
```
tags = pd.read_csv('./movielens/tags.csv', sep=',')
tags.dtypes
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Unix time / POSIX time / epoch time records
time in seconds <br> since midnight Coordinated Universal Time (UTC) of January 1, 1970
</p>
```
tags.head(5)
tags['parsed_time'] = pd.to_datetime(tags['timestamp'], unit='s')
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Data Type datetime64[ns] maps to either <M8[ns] or >M8[ns] depending on the hardware
</p>
```
tags['parsed_time'].dtype
tags.head(2)
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Selecting rows based on timestamps
</p>
```
greater_than_t = tags['parsed_time'] > '2015-02-01'
selected_rows = tags[greater_than_t]
tags.shape, selected_rows.shape
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Sorting the table using the timestamps
</p>
```
tags.sort_values(by='parsed_time', ascending=True)[:10]
```
<h1 style="font-size:2em;color:#2467C0">Average Movie Ratings over Time </h1>
## Are Movie ratings related to the year of launch?
```
average_rating = ratings[['movieId','rating']].groupby('movieId', as_index=False).mean()
average_rating.tail()
joined = movies.merge(average_rating, on='movieId', how='inner')
joined.head()
joined.corr()
yearly_average = joined[['year','rating']].groupby('year', as_index=False).mean()
yearly_average[:10]
yearly_average[-20:].plot(x='year', y='rating', figsize=(15,10), grid=True)
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Do some years look better for the boxoffice movies than others? <br><br>
Does any data point seem like an outlier in some sense?
</p>
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import string
df = pd.read_csv('./Documents/listings.csv')
df.head()
Ht = pd.read_csv('./Documents/Hawaii_Tourisim_Data.csv')
Ht
Ht['Value']=pd.to_numeric(Ht['Value']) # convert the data type to numeric
visitor_month=Ht.groupby('month',sort=False,as_index=False)['Value'].sum() #grouping by month and sorting to see the trend of visitors by month
visitor_month=visitor_month[visitor_month.month!='All'] # removing uncessary data
plt.bar(x=visitor_month["month"],height=visitor_month["Value"]) # plotting a bar chart sorted by month to identify trends
island_visitors=Ht.groupby(['State','Attribute'],sort=False)['Value'].sum() #grouping by month to see the trend of visitors by month
#island_visitors
island_visitors=Ht.groupby(['State','Attribute'],sort=False)['Value'].sum().reset_index(name='Value') # reseting the index
island_visitors1=island_visitors[island_visitors.Attribute=='Visitor arrivals'] # filtering to visitor arrivals attribute
island_visitors1['Pct'] = (island_visitors1.Value/island_visitors1.Value.sum())*100 # calculating the percentage of visitors by island
island_visitors1
plt.bar(x=island_visitors1["State"],height=island_visitors1["Pct"]) # Plotting to visualize the trend
plt.title("Islands comparision")
fig = plt.gcf() # Adjusting the size of the graph to increase visibility
fig.set_size_inches(11,8)
Length_of_stay=Ht.groupby(['State','Attribute'])['Value'].mean() #Grouping by state and attribute to know the length of stay
Length_of_stay=Ht.groupby(['State','Attribute'],sort=False)['Value'].mean().reset_index(name='Value') # reseting the index
Length_of_stay=Length_of_stay[island_visitors.Attribute=='Length of stay(days)'] #Filtering to length of stay attribute
Length_of_stay
plt.bar(x=Length_of_stay["State"],height=Length_of_stay["Value"]) # Plotting to visualize the trend
plt.title("Length of stay comparision")
fig = plt.gcf()
fig.set_size_inches(11,8)
airbnb_reviews = df.groupby(['neighbourhood_group_cleansed','neighbourhood_cleansed'])['host_id'].agg('count') # Groupby islands
# and neighrbourhoods to see the number of airbnbs listed
airbnb_reviews = airbnb_reviews.unstack(level=0) # unstack them to create a heatmap dataset
fig, ax = plt.subplots(figsize=(11, 9)) # adjusting the size of the plot for better visibility
sb.heatmap(airbnb_reviews) # generating a heatmap
plt.show()
#Trying to filter the signs from the number and get number
df['price'] = df['price'].str.replace('$', '')
df['price'] = df['price'].str.replace(',', '')
df['price']=pd.to_numeric(df['price']) # convert the data type to numeric
airbnb_price_variations = df.groupby(['neighbourhood_group_cleansed'])['price'].sum().reset_index(name='price')
# grouping by price to understand the price variation across the airbnbs
sb.set(rc={'figure.figsize':(14,6)}) # adjusting the size of the plot
df[['neighbourhood_group_cleansed','price']] # creating the dataset for plotting box plot
ax = sb.boxplot(x="neighbourhood_group_cleansed", y="price", data=df[['neighbourhood_group_cleansed','price']],showfliers=False)
# plotting the box plot by removing the outliers
airbnb_price_variations_sub = df.groupby(['neighbourhood_group_cleansed','neighbourhood_cleansed','host_id'])['price'].sum().reset_index(name='price')
# Grouping by island and neighourhood to see the price variation
airbnb_price_variations_sub = airbnb_price_variations_sub[airbnb_price_variations_sub.neighbourhood_group_cleansed=='Hawaii']
# Filtering to Hawaii islands
sb.set(rc={'figure.figsize':(14,6)})
ax = sb.boxplot(x="neighbourhood_cleansed", y="price", data=airbnb_price_variations_sub,showfliers=False)
# Plotting the price variation in Hawaii islands
Reviews_check=df.groupby(['neighbourhood_cleansed','host_id'])['review_scores_value','number_of_reviews'].mean().reset_index()
# getting the review score value and number of reviews by neighbourhood
Reviews_check['review_score_normalized'] = Reviews_check.review_scores_value*Reviews_check.number_of_reviews
# Normalizing the scores to get a good KPI to measure the success of airbnbs
valuesin = ['Kau','Puna']
Reviews_check=Reviews_check[Reviews_check.neighbourhood_cleansed.isin(valuesin)] # Filtering to Kau and Puna region
Reviews_check =Reviews_check.dropna() # Dropping 'na' values to filter the data to only those airbnbs that have valid scores
#Reviews_check
sb.set(rc={'figure.figsize':(14,6)})
ax = sb.boxplot(x="neighbourhood_cleansed", y="review_score_normalized", data=Reviews_check,showfliers=False)
# Plotting to see the difference between the two regions
```
| github_jupyter |
# What you will learn
- What is a CSV file
- Reading and writting on a csv file
### CSV = Comma seperated values
Chances are you have worked with .csv files before. There are simply values sperated by commas ...
#### Note
- All files used or created will be stored under week 3 in a folder called "data"
Here is some exampe output:
If this kinda looks like data in excel that is because excel spread-sheets can be exported as .csv files.
Above each column for a given row has a distinction. The first row shown above are the column headers - i.e. the name of each column. We'll show how to work with this data but first the basics:
## Reading a CSV file
```
import csv
import os
def run():
with open("./data/example1.csv", "r") as csvFile: # the "r" means read
fileReader = csv.reader(csvFile)
i = 0
for row in fileReader:
print(row)
i += 1
if i == 10:
break
if __name__=="__main__":
run()
```
#### Note
- The "if i == 10" break is just so all 5001 rows are not printed
- the "r" tells the file to be opened for reading only
### What is the "with open"
To operate (meaning to read or to write) on a file that file has to be accessed. This involves opening the file. But what happens when you are done? That file needs to be closed. This is where "with" comes in. It is a context manager meaning that when you fall out of the with indentation the file will automatically be closed.
You cannot write to a file that another program is using - this is like two people trying to use the remote at the same time; chaoas and confusion will result. Context mangers allow clean closing of files without having to explicitly state that the file closes.
We could write the above code as seen below - but THIS IS BAD FORM!
```
# this is bad code
csvFile = open("./data/example1.csv")
fileReader = csv.reader(csvFile)
i = 0
for row in fileReader:
print(row)
i += 1
if i == 10:
break
csvFile.close()
```
## Writing to a CSV file
Above we used "r" to read from a file. Well now that becomes "w" to write.
#### Important
If you write to a file that already exists then the file will be overwritten. There will be no warning as with many office tools saying you are about to overwrite something.
If you with to add something to a csv file we will cover that below.
```
import csv
import os
import time
import random
HEADERS = ['data', 'timeStamp', 'dataType']
def makeRow(): # random data to put in csv
return [random.randint(0,101), time.time(), "random Float"]
def makeFile(): # makes a csv
with open("./data/example2.csv", "w") as csvFile: # the "w" means write
fileWriter = csv.writer(csvFile)
fileWriter.writerow(HEADERS)
for i in range(10):
fileWriter.writerow(makeRow())
def readFile(): # read from the csv you created to see it!
with open("./data/example2.csv", "r") as csvFile:
fileReader = csv.reader(csvFile)
for row in fileReader:
print(row)
if __name__=="__main__":
makeFile()
readFile()
```
#### Note
- If you re-run the above bit of code you will notice that the all the values change
- This is, again, because the "w" mode will overwrite your files!
- Please don't accedently overwrite the results of an experiment have to redo it ...
## From csv to list of dics
Often it is good to organize data in a csv to a list of dics. This is easier to later operate on
```
# we'll take the values that you just created in the above code and turn them into the list of dics!
def csvToDict(csvData):
headers = next(csvData) # next will push the iterable object one forword
data = list(map(lambda row: dict(zip(headers, row)) , csvData))
return data
def run(): # read from the csv you created to see it!
with open("./data/example2.csv", "r") as csvFile:
fileReader = csv.reader(csvFile)
listOfDics = csvToDict(fileReader)
print(listOfDics)
if __name__=="__main__":
run()
```
## Appending a csv file
Let's take the csv file you created above and append it with 10 more rows
```
import csv
import os
import time
import random
HEADERS = ['data', 'timeStamp', 'dataType']
def makeRow(): # random data to put in csv
return [random.randint(0,101), time.time(), "random Float"]
def makeFile(): # makes a csv
with open("./data/example2.csv", "a") as csvFile: # the "a" means append
fileWriter = csv.writer(csvFile)
for i in range(10):
fileWriter.writerow(makeRow())
def readFile(): # read from the csv you created to see it!
with open("./data/example2.csv", "r") as csvFile:
fileReader = csv.reader(csvFile)
for row in fileReader:
print(row)
if __name__=="__main__":
makeFile()
readFile()
```
#### Note
- Yep, it is that simple to append. Just change "w" to "a"
- If you try to append a file that does not exist then that file will be created for you
#### Note
- There are some other options besides "r", "w", "a" but those three should work for now. Just know ther are a few more.
- See here for details: https://docs.python.org/3/library/functions.html#open
# What you need to do
- Read in the csv file from example1.csv
- Turn it into a list of dicts
- Replace all timeStamps with their equivalent time since epoch value
- Find the mean of the timeStamps
- Discard all data points that have timeStamps lower thean the that mean
- Write all data points that have a sensorType of "BME280" to a .csv file called "BME280" that is in the "data folder"
- Write all data points that have a sensorType of "ADC1115_Pyra" to a .csv file called "ADC1115_Pyra" that is in the "data folder"
| github_jupyter |
# Simple training tutorial
The objective of this tutorial is to show you the basics of the library and how it can be used to simplify the audio processing pipeline.
This page is generated from the corresponding jupyter notebook, that can be found on [this folder](https://github.com/fastaudio/fastaudio/tree/master/docs)
To install the library, uncomment and run this cell:
```
# !pip install git+https://github.com/fastaudio/fastaudio.git
```
**COLAB USERS: Before you continue and import the lib, go to the `Runtime` menu and select `Restart Runtime`.**
```
from fastai.vision.all import *
from fastaudio.core.all import *
from fastaudio.augment.all import *
```
# ESC-50: Dataset for Environmental Sound Classification
```
#The first time this will download a dataset that is ~650mb
path = untar_data(URLs.ESC50, dest="ESC50")
```
The audio files are inside a subfolder `audio/`
```
(path/"audio").ls()
```
And there's another folder `meta/` with some metadata about all the files and the labels
```
(path/"meta").ls()
```
Opening the metadata file
```
df = pd.read_csv(path/"meta"/"esc50.csv")
df.head()
```
## Datablock and Basic End to End Training
```
# Helper function to split the data
def CrossValidationSplitter(col='fold', fold=1):
"Split `items` (supposed to be a dataframe) by fold in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
col_values = o.iloc[:,col] if isinstance(col, int) else o[col]
valid_idx = (col_values == fold).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
```
Creating the Audio to Spectrogram transform from a predefined config.
```
cfg = AudioConfig.BasicMelSpectrogram(n_fft=512)
a2s = AudioToSpec.from_cfg(cfg)
```
Creating the Datablock
```
auds = DataBlock(blocks=(AudioBlock, CategoryBlock),
get_x=ColReader("filename", pref=path/"audio"),
splitter=CrossValidationSplitter(fold=1),
batch_tfms = [a2s],
get_y=ColReader("category"))
dbunch = auds.dataloaders(df, bs=64)
```
Visualizing one batch of data. Notice that the title of each Spectrogram is the corresponding label.
```
dbunch.show_batch(figsize=(10, 5))
```
# Learner and Training
While creating the learner, we need to pass a special cnn_config to indicate that our input spectrograms only have one channel. Besides that, it's the usual vision learner.
```
learn = cnn_learner(dbunch,
resnet18,
config={"n_in":1}, #<- Only audio specific modification here
loss_func=CrossEntropyLossFlat(),
metrics=[accuracy])
from fastaudio.ci import skip_if_ci
@skip_if_ci
def learn():
learn.fine_tune(10)
```
| github_jupyter |
# Table of Contents
<p><div class="lev1"><a href="#Dependent-Things"><span class="toc-item-num">1 </span>Dependent Things</a></div><div class="lev1"><a href="#Cancer-Example"><span class="toc-item-num">2 </span>Cancer Example</a></div><div class="lev2"><a href="#Question-1"><span class="toc-item-num">2.1 </span>Question 1</a></div><div class="lev2"><a href="#Question-2"><span class="toc-item-num">2.2 </span>Question 2</a></div><div class="lev2"><a href="#Question-3"><span class="toc-item-num">2.3 </span>Question 3</a></div><div class="lev2"><a href="#Question-4"><span class="toc-item-num">2.4 </span>Question 4</a></div><div class="lev2"><a href="#Question-5"><span class="toc-item-num">2.5 </span>Question 5</a></div><div class="lev2"><a href="#Question-6"><span class="toc-item-num">2.6 </span>Question 6</a></div><div class="lev2"><a href="#Question-7"><span class="toc-item-num">2.7 </span>Question 7</a></div><div class="lev1"><a href="#Total-Probability"><span class="toc-item-num">3 </span>Total Probability</a></div><div class="lev1"><a href="#Two-Coins"><span class="toc-item-num">4 </span>Two Coins</a></div><div class="lev2"><a href="#Question-1"><span class="toc-item-num">4.1 </span>Question 1</a></div><div class="lev2"><a href="#Question-2"><span class="toc-item-num">4.2 </span>Question 2</a></div><div class="lev2"><a href="#Question-3"><span class="toc-item-num">4.3 </span>Question 3</a></div><div class="lev2"><a href="#Question-4"><span class="toc-item-num">4.4 </span>Question 4</a></div><div class="lev1"><a href="#Summary"><span class="toc-item-num">5 </span>Summary</a></div>
# Dependent Things
In real life, things depend on each other.
Say you can be born smart or dumb and for the sake of simplicity, let's assume whether you're smart or dumb is just nature's flip of a coin. Now whether you become a professor at Standford is non-entirely independent. I would argue becoming a professor in Standford is generally not very likely, so probability might be 0.001 but it also depends on whether you're born smart or dumb. If you are born smart the probability might be larger, whereas if you're born dumb, the probability might be marked more smaller.
Now this just is an example, but if you can think of the most two consecutive coin flips. The first is whether you are born smart or dumb. The second is whether you get a job on a certain time. And now if we take them in these two coin flips, they are not independent anymore. So whereas in our last unit, we assumed that the coin flips were independent, that is, the outcome of the first didn't affect the outcome of the second. From now on, we're going to study the more interesting cases where the outcome of the first does impact the outcome of the second, and to do so you need to use more variables to express these cases.
<img src="images/Screen Shot 2016-04-27 at 8.49.53 AM.png"/>
# Cancer Example
## Question 1
To do so, let's study a medical example--supposed there's a patient in the hospital who might suffer from a medical condition like cancer. Let's say the probability of having this cancer is 0.1. That means you can tell me what's the probability of being cancer free.
**Answer**
- The answer is 0.9 with just 1 minus the cancer.
<img src="images/Screen Shot 2016-04-27 at 8.52.39 AM.png"/>
## Question 2
Of course, in reality, we don't know whether a person suffers cancer, but we can run a test like a blood test. The outcome of it blood test may be positive or negative, but like any good test, it tells me something about the thing I really care about--whether the person has cancer or not.
Let's say, if the person has the cancer, the test comes up positive with the probability of 0.9, and that implies if the person has cancer, the negative outcome will have 0.1 probability and that's because these two things have to add to 1.
I've just given you a fairly complicated notation that says the outcome of the test depends on whether the person has cancer or not. We call this thing over here a conditional probability, and the way to understand this is a very funny notation. There's a bar in the middle, and the bar says what's the probability of the stuff on the left given that we assume the stuff on the right is actually the case.
Now, in reality, we don't know whether the person has cancer or not, and in a later unit, we're going to reason about whether the person has cancer given a certain data set, but for now, we assume we have god-like capabilities. We can tell with absolute certainty that the person has cancer, and we can determine what the outcome of the test is. This is a test that isn't exactly deterministic--it makes mistakes, but it only makes a mistake in 10% of the cases, as illustrated by the 0.1 down here.
Now, it turns out, I haven't fully specified the test. The same test might also be applied to a situation where the person does not have cancer. So this little thing over here is my shortcut of not having cancer. And now, let me say the probability of the test giving me a positive results--a false positive result when there's no cancer is 0.2. You can now tell me what's the probability of a negative outcome in case we know for a fact the person doesn't have cancer, so please tell me.
**Answer**
And the answer is 0.8. As I'm sure you noticed in the case where there is cancer, the possible test outcomes add up to 1. In the where there isn't cancer, the possible test outcomes add up to 1. So 1 - 0.2 = 0.8.
<img src="images/Screen Shot 2016-04-28 at 7.24.34 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487236390923)*
<!--TEASER_END-->
## Question 3
Look at this, this is very nontrivial but armed with this, we can now build up the truth table for all the cases of the two different variables, cancer and non-cancer and positive and negative tests outcome.
So, let me write down cancer and test and let me go through different possibilities. We could have cancer or not, and the test may come up positive or negative. So, please give me the probability of the combination of those for the very first one, and as a hint, it's kind of the same as before where we multiply two things, but you have to find the right things to multiple in this table over here.
**Answer**
And the answer is probability of cancer is 0.1, probability of test being positive given that he has cancer is the one over here--0.9, multiplying those two together gives us 0.09.
<img src="images/Screen Shot 2016-04-28 at 7.31.47 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486789140923)*
<!--TEASER_END-->
## Question 4
Moving to the next case--what do you think the probability is that the person does have cancer but the test comes back negative? What's the combined probability of these two cases?
**Answer**
And once again, we'd like to refer the corresponding numbers over here on the right side 0.1 for the cancer times the probability of getting a negative result conditioned on having cancer and that is 0.1 0.1, which is 0.01.
<img src="images/Screen Shot 2016-04-28 at 7.34.42 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486977510923)*
<!--TEASER_END-->
## Question 5
Moving on to the next two, we have:
**Answer**:
- **Cancer (N) - Test (P)**: Here the answer is 0.18 by multiplying the probability of not having cancer, which is 0.9, with the probability of getting a positive test result for a non-cancer patient 0.2. Multiplying 0.9 with 0.2 gives me 0.18.
- **Cancer (N) - Test (N)**: Here you get 0.72, which is the product of not having cancer in the first place 0.9 and the probability of getting a negative test result under the condition of not having cancer.
<img src="images/Screen Shot 2016-04-28 at 7.39.14 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486987400923)*
<!--TEASER_END-->
## Question 6
Now quickly add all of those probabilities up.
**Answer**
And as usual, the answer is 1. That is, we study in the truth table all possible cases. and when we add up the probabilities, you should always get the answer of 1.
<img src="images/Screen Shot 2016-04-28 at 7.41.53 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487214940923)*
<!--TEASER_END-->
## Question 7
Now let me ask you a really tricky question. What is the probability of a positive test result? Can you sum or determine, irrespective of whether there's cancer or not, what is the probability you get a positive test result?
** Answer**
And the result, once again, is found in the truth table, which is why this table is so powerful. Let's look at where in the truth table we get a positive test result. I would say it is right here, right here. If you take corresponding probabilities of 0.09 and 0.18, and add them up, we get 0.27.
<img src="images/Screen Shot 2016-04-28 at 7.44.52 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486987410923)*
<!--TEASER_END-->
# Total Probability
Putting all of this into mathematical notation we've given the probability of having cancer and from there, it follows the probability of not having cancer. And they give me 2 conditional probability that are the test being positive.
If we have have cancer, from which we can now predict the probability of the test being negative of having cancer. And the probability of the test being positive can be cancer free which can complete the probability of a negative test result in the cancer-free case. So these things are just easily inferred by the 1 minus rule.
Then when we read this, you complete the probability of a positive test result as the sum of a positive test result given cancer times the probability of cancer, which is our truth table entry for the combination of P and C plus the same given we don't have of cancer.
Now this notation is confusing and complicated if we ever dive deep into probability, that's called total probability, but it's useful to know that this is very, very intuitive and to further develop intuition let me just give you another exercise of exactly the same type.
<img src="images/Screen Shot 2016-04-28 at 7.50.03 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487202500923)*
<!--TEASER_END-->
# Two Coins
## Question 1
This time around, we have a bag, and in the bag are 2 coins,coin 1 and coin 2. And in advance, we know that coin 1 is fair. So P of coin 1 of coming up heads is 0.5 whereas coin 2 is loaded, that is, P of coin 2 coming up heads is 0.9. Quickly, give me the following numbers of the probability of coming up tails for coin 1 and for coin 2.
**Answer**
And the answer is 0.5 for coin 1and 0.1 for coin 2, because these things have to add up to 1 for each of the coins.
<img src="images/Screen Shot 2016-04-28 at 7.57.29 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487236400923)*
<!--TEASER_END-->
## Question 2
So now what happens is, I'm going to remove one of the coins from this bag, and each coin, coin 1 or coin 2, is being picked with equal probability. Let me now flip that coin once, and I want you to tell me, what's the probability that this coin which could be 50% chance fair coin 1and 50% chance a loaded coin. What's the probability that this coin comes up heads? Again, this is an exercise in conditional probability.
**Answer**
And let’s do the truth table. You have a pick event followed by a flip event
- We can pick coin 1 or coin 2. There is a 0.5 chance for each of the coins. Then we can flip and get heads or tails for the coin we've chosen. Now what are the probabilities?
- I'd argue picking 1 at 0.5 and once I pick the fair coin, I know that the probability of heads is, once again, 0.5 which makes it 0.25 The same is true for picking the fair coin and expecting tails
- But as we pick the unfair coin with a 0.5 chance we get a 0.9 chance of heads So 0.5 times 0.95 gives you 0.45 whereas the unfair coin, the probability of tails is 0.1 multiply by the probability of picking it at 0.5 gives us
- Now when they ask you, what's the probability of heads we'll find that 2 of those cases indeed come up with heads so if you add 0.25 and 0.45 and we get 0.7. So this example is a 0.7 chance that we might generate heads.
<img src="images/Screen Shot 2016-04-28 at 8.03.35 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486938400923)*
<!--TEASER_END-->
## Question 3
Now let me up the ante by flipping this coin twice. Once again, I'm drawing a coin from this bag, and I pick one at 50% chance. I don't know which one I have picked. It might be fair or loaded. And in flipping it twice, I get first heads, and then tails. What's the probability that if I do the following, I draw a coin at random with the probabilities shown, and then I flip it twice, that same coin. I just draw it once and then flip it twice. What's the probability of seeing heads first and then tails? Again, you might derive this using truth tables.
**Answer**
This is a non-trivial question, and the right way to do this is to go through the truth table, which I've drawn over here. There's 3 different things happening. We've taken initial pick of the coin, which can take coin 1 or coin 2 with equal probability, and then you go flip it for the first time, and there's heads or tails outcomes, and we flip it for the second time with the second outcome. So these different cases summarize my truth table.
I now need to observe just the cases where head is followed by tail. This one right here and over here. Then we compute the probability for those 2 cases.
- The probability of picking coin 1 is 0.5. For the fair coin, we get 0.5 for heads, followed by 0.5 for tails. They're together is 0.125.
- Let's do it with the second case. There's a 0.5 chance of taking coin 2. Now that one comes up with heads at 0.9. It comes up with tails at 0.1. So multiply these together, gives us 0.045, a smaller number than up here.
- Adding these 2 things together results in 0.17, which is the right answer to the question over here.
<img src="images/Screen Shot 2016-04-28 at 8.09.25 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/486957990923)*
<!--TEASER_END-->
## Question 4
Let me do this once again. There are 2 coins in the bag, coin 1 and coin 2. And as before, taking coin 1 at 0.5 probability. But now I'm telling you that coin 1 is loaded, so give you heads with probability of 1. Think of it as a coin that only has heads. And coin 2 is also loaded. It gives you heads with 0.6 probability. Now work out for me into this experiment, what's the probability of seeing tails twice?
**Answer**
And the answer is depressing. If you, once again, draw the truth table, you find, for the different combinations, that if you've drawn coin 1, you'd never see tails. So this case over here, which indeed has tails, tails. We have 0 probability.
- We can work this out probability of drawing the first coin at 0.5, but the probability of tails given the first coin must be 0, because the probability of heads is 1, so 0.5 times 0 times 0, that is 0.
- So the only case where you might see tails/tails is when you actually drew coin 2, and this has a probability of 0.5 times the probability of tails given that we drew the second coin, which is 0.4 times 0.4 again, and that's the same as 0.08 would have been the correct answer.
<img src="images/Screen Shot 2016-04-28 at 8.34.08 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487015560923)*
<!--TEASER_END-->
# Summary
So there're important lessons in what we just learned, the key thing is we talked about conditional probabilities. We said that the outcome in a variable, like a test is actually not like the random coin flip but it depends on something else, like a disease.
When we looked at this, we were able to predict what's the probability of a test outcome even if we don't know whether the person has a disease or not. And we did this using the truth table, and in the truth table, we summarized multiple lines.
- For example, we multiplied the probability of a test outcome condition on this unknown variable, whether the person is diseased multiplied by the probability of the disease being present. Then we added a second row of the truth table, where our unobserved disease variable took the opposite value of not diseased.
- Written this way, it looks really clumsy, but that's effectively what we did when we went to the truth table. So we now understand that certain coin flips are dependent on other coin flips, so if god, for example, flips the coin of us having a disease or not, then the medical test again has a random outcome, but its probability really depends on whether we have the disease or not. We have to consider this when we do probabilistic inference. In the next unit, we're going to ask the real question. Say we really care about whether we have a disease like cancer or not. What do you think the probability is, given that our doctor just gave us a positive test result?
And I can tell you, you will be in for a surprise.
<img src="images/Screen Shot 2016-04-28 at 8.44.56 AM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48729372/concepts/487015560923)*
<!--TEASER_END-->
| github_jupyter |
# 0.0. IMPORTS
```
import pandas as pd
import inflection
import math
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import Image
import datetime
from scipy import stats
```
## 0.1. Helper Functions
```
def cramer_v(x, y):
cm = pd.crosstab(x, y).as_matrix()
n = cm.sum()
r, k = cm.shape
chi2 = stats.chi2_contingency(cm)[0]
chi2_corrected = chi2 - ((k - 1)*(r - 1))/(n - 1)
k_corrected = k - ((k - 1)**2)/(n - 1)
r_corrected = r - ((r - 1)**2)/(n - 1)
return np.sqrt((chi2_corrected/n) / (min(k_corrected - 1, r_corrected - 1)))
```
## 0.2. Loading Data
```
df_sales_raw = pd.read_csv('data/train.csv', low_memory=False)
df_store_raw = pd.read_csv('data/store.csv', low_memory=False)
# merge dataframes
df_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store' )
df_raw.sample()
```
# 1.0. DESCRICAO DOS DADOS
```
# Original dataframe copy
df1 = df_raw.copy()
```
## 1.1. Rename columns
```
df1.columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo',
'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval']
# Changing write pattern to snakecase
snakecase = lambda x: inflection.underscore( x )
cols_new = list( map( snakecase, cols_old ) )
# Renaming
df1.columns = cols_new
df1.columns
```
## 1.2. Data Dimension
```
print( 'Number of rows: {}'.format(df1.shape[0]))
print( 'Number of rows: {}'.format(df1.shape[1]))
```
## 1.3. Data Types
```
df1.dtypes
df1['date'] = pd.to_datetime(df1['date'])
df1.dtypes
```
## 1.4. Check NA
```
df1.isna().sum()
```
## 1.5. Fillout NA
```
# First, we must analyze what every variable and data with NA values
# competition_distance
## Assumption: if it is NA, maybe it is because the store doesnt have an near competitor
## What has been done: CONSIDER AN EXTREME DISTANT RANGE FROM NEAR COMPETITOR
df1['competition_distance'].max()
df1['competition_distance'] = df1['competition_distance'].apply( lambda x: 100000 if math.isnan(x) else x )
# competition_open_since_month
## Assumption: there are two main reasons that this data is NA: (i) the store doesnt have a near competitor or
## (ii) the store has an near competitor, but it the opening data is unknown, either it is older than the store or data is unavailable
## What has been done: CONSIDER THE SAME MONTH THAT THE STORE HAS BEEN OPEN (because it maybe older than the store)
# Error: EDIT Solved
df1['competition_open_since_month'] = df1.apply( lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis=1)
#Alternative:
#df1.competition_open_since_month.fillna(df1.date.dt.month, inplace=True)
# competition_open_since_year
## Same ideia from variable above
#Error: EDIT: Solved
df1['competition_open_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis=1)
#Alternative:
#df1.competition_open_since_year.fillna(df1.date.dt.month, inplace=True)
# promo2
## Doesnt have any NA
# promo2_since_week
## Assumption: it is possible that the NA values are due to lack of participation/extension of any promotions.
## What I think should have been done: ALL NA VALUES ARE CONSIDERED "0", AS THE STORE IS NOT EXTENDING PROMOTIONS
## What has actually been done: CONSIDER THE SAME VALUE AS THE DATE
df1['promo2_since_week'] = df1.apply( lambda x: x['date'].month if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)
# promo2_since_year
## Same logic as above
df1['promo2_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)
# promo_interval
## The problem here is that, it is hard to understand the way it has been inserted.
## What has been done: (i) Analyze the interval of the promo; (ii) Check if sale month is in promo_interval
## if it is, (iii) apply value 1 to new column is_promo, else 0.
## This way, it will be easy to check if sale is inside a promotion interval.
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec', }
df1['promo_interval'].fillna(0, inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply( lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1 )
df1.isna().sum()
df1.sample(5).T
```
## 1.6. Change Types
```
df1.dtypes
# Competion and promos since are portrayed as float types, while it should be int type.
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
df1.dtypes
```
## 1.7. Descriptive Statistical
```
num_attributes = df1.select_dtypes( include=['int64','float64'])
cat_attributes = df1.select_dtypes( exclude=['int64','float64', 'datetime64[ns]'])
num_attributes.sample(5)
```
### 1.7.1. Numerical Attributes
```
# Central tendency - mean, median
ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attributes.apply(np.median)).T
# Dispersion - std, min, max, range, skew, kurtosis
d1 = pd.DataFrame(num_attributes.apply(np.std)).T
d2 = pd.DataFrame(num_attributes.apply(min)).T
d3 = pd.DataFrame(num_attributes.apply(max)).T
d4 = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T
d5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T
d6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T
# Concatenate
n = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index()
n.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
n
sns.distplot( df1['competition_distance'] )
```
### 1.7.2. Categorical Attributes
```
cat_attributes.apply( lambda x: x.unique().shape[0] )
filter1 = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]
sns.boxplot(x='state_holiday', y='sales', data=filter1)
plt.subplot(1,3,1)
sns.boxplot(x='state_holiday', y='sales', data=filter1)
plt.subplot(1,3,2)
sns.boxplot(x='store_type', y='sales', data=filter1)
plt.subplot(1,3,3)
sns.boxplot(x='assortment', y='sales', data=filter1)
```
# 2.0. FEATURE ENGINEERING
```
df2 = df1.copy()
df2.sample(5)
Image('img/MindMapHypothesis.png')
```
## 2.1. Hypothesis mental map
### 2.1.1. Stores Hypothesis
**1.** Stores with more employees **should** have more sales
**2.** Stores with more product stock **should** have more sales
**3.** Smaller stores **should** have less sales
**4.** Bigger stores **should** have more sales
**5.** Stores with more product options **should** have more sales
### 2.1.2. Product Hypothesis
**1.** Stores with more marketing investment **should** have more sales
**2.** Stores that exposes more the products **should** sell more those products
**3.** Stores with smaller product prices **should** have more sales of those products
**4.** Stores with smaller product prices for the longest possible time **should** have more sales
### 2.1.3. Time Hypothesis
**1.** Stores with more holidays **should** have less sales
**2.** Stores that opens at the first 6 months **should** have more sales
**3.** Stores that opens at the weekends **should** have more sales
## 2.2. Hypothesis priorization
The hypothesis that should have prioritized are the ones with the data available at the start of the proccess
**1.** Stores with more product options **should** have more sales
**2.** Stores with closer competitors **should** have less sales
**3.** Stores with competitors open for the longest time **should** have more sales
**4.** Stores with more active promotions **should** have more sales
**5.** Stores with more promotion days **should** have more sales
**6.** Stores with consecutive promotions **should** have more sales
**7.** Stores open at Xmas **should** have more sales
**8.** Stores sales **should** grow through the years
**9.** Stores **should** sell more at the second half of the year
**10.** Stores **should** sell more after the tenth day of each month
**11.** Stores **should** sell less at the weekends
**12.** Stores **should** sell less at school holydays
## 2.3 Feature Engineering
```
#year
df2['year'] = df2['date'].dt.year
#month
df2['month'] = df2['date'].dt.month
#day
df2['day'] = df2['date'].dt.day
#weekofyear
df2['week_of_year'] = df2['date'].dt.weekofyear
#year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
#competitionsince
df2['competition_since'] = df2.apply( lambda x: datetime.datetime(year=x['competition_open_since_year'], month=x['competition_open_since_month'], day=1), axis=1 )
df2['competition_time_month'] = ((df2['date'] - df2['competition_since']) / 30).apply(lambda x: x.days).astype(int)
#promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime(x + '-1', '%Y-%W-%w') - datetime.timedelta(days=7) )
df2['promo_time_week'] = ((df2['date'] - df2['promo_since']) / 7).apply(lambda x: x.days ).astype(int)
#assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x=='a' else 'extra' if x=='b' else 'extended')
#state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x=='a' else 'easter_holiday' if x=='b' else 'christmas' if x=='c' else 'regular_day')
df2.sample(5).T
```
# 3.0. VARIABLE FILTERING
```
df3 = df2.copy()
```
## 3.1. Line Filtering
```
df3 = df3[(df3['open'] != 0) & (df3['sales'] > 0)]
```
## 3.2. Column FIltering
```
cols_drop = ['customers', 'open', 'promo_interval', 'month_map']
df3 = df3.drop(cols_drop, axis = 1)
```
# 4.0. EXPLORATORY DATA ANALYSIS
```
df4 = df3.copy()
```
## 4.1. Univariate Analysis
### 4.1.1. Response Variable
```
sns.distplot(df4['sales'])
# The more the graph seems like a bell curve, the better.
# It is because some models and algorithms are based on normal shape curve
# Applying functions such as log makes the graph looks more like the bell.
```
### 4.1.2. Numerical Variable
```
num_attributes.hist(bins=25)
plt.show()
```
### 4.1.3 Categorical Variable
```
cat_attributes.sample(5)
# State holiday
plt.subplot(3, 2, 1)
#a = df4[df4['state_holiday'] != 'regular_day']
sns.countplot(cat_attributes['state_holiday'])
plt.subplot(3, 2, 2)
sns.kdeplot( df4[df4['state_holiday'] == 'public_holiday']['sales'], label='public_holiday', shade=True)
sns.kdeplot( df4[df4['state_holiday'] == 'easter_holiday']['sales'], label='easter_holiday', shade=True)
sns.kdeplot( df4[df4['state_holiday'] == 'christmas']['sales'], label='christmas', shade=True)
# Store type
plt.subplot(3, 2, 3)
sns.countplot(cat_attributes['store_type'])
plt.subplot(3, 2, 4)
sns.kdeplot( df4[df4['store_type'] == 'a']['sales'], label='a', shade=True)
sns.kdeplot( df4[df4['store_type'] == 'b']['sales'], label='b', shade=True)
sns.kdeplot( df4[df4['store_type'] == 'c']['sales'], label='c', shade=True)
sns.kdeplot( df4[df4['store_type'] == 'd']['sales'], label='d', shade=True)
# Assortment
plt.subplot(3, 2, 5)
sns.countplot(df4['assortment'])
plt.subplot(3, 2, 6)
sns.kdeplot( df4[df4['assortment'] == 'extended']['sales'], label='extended', shade=True)
sns.kdeplot( df4[df4['assortment'] == 'basic']['sales'], label='basic', shade=True)
sns.kdeplot( df4[df4['assortment'] == 'extra']['sales'], label='extra', shade=True)
```
## 4.2. Bivariate Analysis
### Hypothesis **1.** Stores with more product options **should** have more sales
```
aux1 = df4[['assortment', 'sales']].groupby('assortment').sum().reset_index()
sns.barplot(x='assortment', y='sales', data= aux1);
aux2 = df4[['year_week','assortment', 'sales']].groupby(['year_week','assortment']).sum().reset_index()
aux2.pivot(index='year_week', columns='assortment', values='sales').plot()
#sns.barplot(x='assortment', y='sales', data= aux2);
```
**False** Results shows that the basic assortment store type, sells **more** than a store with more assortment
### Hypothesis **2.** Stores with closer competitors **should** have less sales
```
aux1 = df4[['competition_distance', 'sales']].groupby('competition_distance').sum().reset_index()
plt.subplot (1, 3, 1)
bins = list(np.arange(0, 20000, 1000))
aux1['competition_distance_binned'] = pd.cut(aux1['competition_distance'], bins=bins)
#sns.barplot(x='competition_distance_binned', y='sales', data= aux1);
sns.scatterplot(x='competition_distance', y='sales', data=aux1)
plt.subplot(1, 3, 2)
aux2 = aux1[['competition_distance_binned','sales']].groupby('competition_distance_binned').sum().reset_index()
sns.barplot(x='competition_distance_binned', y='sales', data= aux2);
plt.xticks(rotation=90)
plt.subplot(1, 3, 3)
x = sns.heatmap(aux1.corr(method='pearson'), annot=True)
bottom, top = x.get_ylim()
x.set_ylim(bottom+0.5, top-0.5);
```
**False** Stores with closer competitors actually sells **more** than stores with distant competitors
### Hypothesis **3.** Stores with competitors open for the longest time **should** have more sales
```
plt.subplot(1, 3, 1)
aux1 = df4[['competition_time_month', 'sales']].groupby('competition_time_month').sum().reset_index()
aux4 = aux1[(aux1['competition_time_month'] < 120 ) & (aux1['competition_time_month'] != 0) ]
sns.barplot(x='competition_time_month', y='sales', data=aux4)
plt.xticks(rotation=90);
plt.subplot(1, 3, 2)
sns.regplot(x='competition_time_month', y='sales', data=aux4)
plt.subplot(1, 3, 3)
x = sns.heatmap(aux4.corr(method='pearson'), annot=True)
bottom, top = x.get_ylim()
x.set_ylim(bottom+0.5, top-0.5);
```
**False** stores with longer competition sells **less**. However, the behavior of sales is different before competition starts and after it starts.
## 4.3. Multivariate Analysis
### 4.3.1. Numerical Attributes
```
correlation = num_attributes.corr(method='pearson')
sns.heatmap(correlation, annot=True, fmt='.2f')
```
### 4.3.2. Categorical Attributes
```
from scipy import stats
categorical = df4.select_dtypes(include='object')
categorical.head()
cm = pd.crosstab(categorical['state_holiday'], categorical['store_type']).as_matrix()
a1 = cramer_v(categorical['state_holiday'], categorical['state_holiday'])
a2 = cramer_v(categorical['state_holiday'], categorical['store_type'])
a3 = cramer_v(categorical['state_holiday'], categorical['assortment'])
a4 = cramer_v(categorical['store_type'], categorical['state_holiday'])
a5 = cramer_v(categorical['store_type'], categorical['store_type'])
a6 = cramer_v(categorical['store_type'], categorical['assortment'])
a7 = cramer_v(categorical['assortment'], categorical['state_holiday'])
a8 = cramer_v(categorical['assortment'], categorical['store_type'])
a9 = cramer_v(categorical['assortment'], categorical['assortment'])
d = pd.DataFrame({'state_holiday': [a1, a2, a3],
'store_type': [a4, a5, a6],
'assortment': [a7, a8, a9] })
d = d.set_index(d.columns)
d.head()
sns.heatmap(d, annot=True)
```
| github_jupyter |
<h3 align=center> Combining Datasets: Merge and Join</h3>
One essential feature offered by Pandas is its high-performance, in-memory join and merge operations.
If you have ever worked with databases, you should be familiar with this type of data interaction.
The main interface for this is the ``pd.merge`` function, and we'll see few examples of how this can work in practice.
For convenience, we will start by redefining the ``display()`` functionality from the previous section:
```
import pandas as pd
import numpy as np
class display(object):
"""Display HTML representation of multiple objects"""
template = """<div style="float: left; padding: 10px;">
<p style='font-family:"Courier New", Courier, monospace'>{0}</p>{1}
</div>"""
def __init__(self, *args):
self.args = args
def _repr_html_(self):
return '\n'.join(self.template.format(a, eval(a)._repr_html_())
for a in self.args)
def __repr__(self):
return '\n\n'.join(a + '\n' + repr(eval(a))
for a in self.args)
```
## Relational Algebra
The behavior implemented in ``pd.merge()`` is a subset of what is known as *relational algebra*, which is a formal set of rules for manipulating relational data, and forms the conceptual foundation of operations available in most databases.
The strength of the relational algebra approach is that it proposes several primitive operations, which become the building blocks of more complicated operations on any dataset.
With this lexicon of fundamental operations implemented efficiently in a database or other program, a wide range of fairly complicated composite operations can be performed.
Pandas implements several of these fundamental building-blocks in the ``pd.merge()`` function and the related ``join()`` method of ``Series`` and ``Dataframe``s.
As we will see, these let you efficiently link data from different sources.
## Categories of Joins
The ``pd.merge()`` function implements a number of types of joins: the
1. *one-to-one*,
2. *many-to-one*, and
3. *many-to-many* joins.
All three types of joins are accessed via an identical call to the ``pd.merge()`` interface; the type of join performed depends on the form of the input data.
Here we will show simple examples of the three types of merges, and discuss detailed options further below.
### One-to-one joins
Perhaps the simplest type of merge expresion is the one-to-one join, which is in many ways very similar to the column-wise concatenation seen in [Combining Datasets: Concat & Append](03.06-Concat-And-Append.ipynb).
As a concrete example, consider the following two ``DataFrames`` which contain information on several employees in a company:
```
df1 = pd.DataFrame({'employee': ['Raju', 'Rani', 'Ramesh', 'Ram'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Raju', 'Rani', 'Ramesh', 'Ram'],
'hire_date': [2004, 2008, 2012, 2014]})
display('df1', 'df2','pd.merge(df1, df2)')
```
To combine this information into a single ``DataFrame``, we can use the ``pd.merge()`` function:
```
df3 = pd.merge(df1, df2)
df3
```
The ``pd.merge()`` function recognizes that each ``DataFrame`` has an "employee" column, and automatically joins using this column as a key.
The result of the merge is a new ``DataFrame`` that combines the information from the two inputs.
Notice that the order of entries in each column is not necessarily maintained: in this case, the order of the "employee" column differs between ``df1`` and ``df2``, and the ``pd.merge()`` function correctly accounts for this.
Additionally, keep in mind that the merge in general discards the index, except in the special case of merges by index (see the ``left_index`` and ``right_index`` keywords, discussed momentarily).
### Many-to-one joins
Many-to-one joins are joins in which one of the two key columns contains duplicate entries.
For the many-to-one case, the resulting ``DataFrame`` will preserve those duplicate entries as appropriate.
Consider the following example of a many-to-one join:
```
df4 = pd.DataFrame({'group': ['Accounting', 'Engineering', 'HR'],
'supervisor': ['Carly', 'Guido', 'Steve']})
df4
pd.merge(df3, df4)
display('df3', 'df4', 'pd.merge(df3, df4)')
```
The resulting ``DataFrame`` has an aditional column with the "supervisor" information, where the information is repeated in one or more locations as required by the inputs.
### Many-to-many joins
Many-to-many joins are a bit confusing conceptually, but are nevertheless well defined.
If the key column in both the left and right array contains duplicates, then the result is a many-to-many merge.
This will be perhaps most clear with a concrete example.
Consider the following, where we have a ``DataFrame`` showing one or more skills associated with a particular group.
By performing a many-to-many join, we can recover the skills associated with any individual person:
```
df5 = pd.DataFrame({'group': ['Accounting', 'Accounting',
'Engineering', 'Engineering', 'HR', 'Hdf4'],
'skills': ['math', 'spreadsheets', 'coding', 'linux',
'spreadsheets', 'organization']})
df5
display('df1', 'df5', "pd.merge(df1, df5)")
pd.merge(df1, df5)
```
These three types of joins can be used with other Pandas tools to implement a wide array of functionality.
But in practice, datasets are rarely as clean as the one we're working with here.
In the following section we'll consider some of the options provided by ``pd.merge()`` that enable you to tune how the join operations work.
## Specification of the Merge Key
We've already seen the default behavior of ``pd.merge()``: it looks for one or more matching column names between the two inputs, and uses this as the key.
However, often the column names will not match so nicely, and ``pd.merge()`` provides a variety of options for handling this.
### The ``on`` keyword
Most simply, you can explicitly specify the name of the key column using the ``on`` keyword, which takes a column name or a list of column names:
```
display('df1', 'df2', "pd.merge(df1, df2, on='employee')")
pd.merge(df1, df2, on='employee')
```
This option works only if both the left and right ``DataFrame``s have the specified column name.
### The ``left_on`` and ``right_on`` keywords
At times you may wish to merge two datasets with different column names; for example, we may have a dataset in which the employee name is labeled as "name" rather than "employee".
In this case, we can use the ``left_on`` and ``right_on`` keywords to specify the two column names:
```
df3 = pd.DataFrame({'name': ['Raju', 'Rani', 'Ramesh', 'Ram'],
'salary': [70000, 80000, 120000, 90000]})
display('df1', 'df3', 'pd.merge(df1, df3, left_on="employee", right_on="name")')
```
The result has a redundant column that we can drop if desired–for example, by using the ``drop()`` method of ``DataFrame``s:
```
pd.merge(df1, df3, left_on="employee", right_on="name").drop('name', axis=1) #Duplicat Col
```
### The ``left_index`` and ``right_index`` keywords
Sometimes, rather than merging on a column, you would instead like to merge on an index.
For example, your data might look like this:
```
df1a = df1.set_index('employee')
df2a = df2.set_index('employee')
display('df1a', 'df2a')
```
You can use the index as the key for merging by specifying the ``left_index`` and/or ``right_index`` flags in ``pd.merge()``:
```
display('df1a', 'df2a',
"pd.merge(df1a, df2a, left_index=True, right_index=True)")
pd.merge(df1a, df2a, left_index=True, right_index=True)
```
For convenience, ``DataFrame``s implement the ``join()`` method, which performs a merge that defaults to joining on indices:
```
display('df1a', 'df2a', 'df1a.join(df2a)')
```
If you'd like to mix indices and columns, you can combine ``left_index`` with ``right_on`` or ``left_on`` with ``right_index`` to get the desired behavior:
```
display('df1a', 'df3', "pd.merge(df1a, df3, left_index=True, right_on='name')")
```
All of these options also work with multiple indices and/or multiple columns; the interface for this behavior is very intuitive.
For more information on this, see the ["Merge, Join, and Concatenate" section](http://pandas.pydata.org/pandas-docs/stable/merging.html) of the Pandas documentation.
## Specifying Set Arithmetic for Joins
In all the preceding examples we have glossed over one important consideration in performing a join: the type of set arithmetic used in the join.
This comes up when a value appears in one key column but not the other. Consider this example:
```
df6 = pd.DataFrame({'name': ['Peter', 'Paul', 'Mary'],
'food': ['fish', 'beans', 'bread']},
columns=['name', 'food'])
df7 = pd.DataFrame({'name': ['Mary', 'Joseph','Paul'],
'drink': ['wine', 'beer','Water']},
columns=['name', 'drink'])
display('df6', 'df7', 'pd.merge(df6, df7)')
```
Here we have merged two datasets that have only a single "name" entry in common: Mary.
By default, the result contains the *intersection* of the two sets of inputs; this is what is known as an *inner join*.
We can specify this explicitly using the ``how`` keyword, which defaults to ``"inner"``:
```
pd.merge(df6, df7, how='inner') # by Defautl inner to join the data
pd.merge(df6, df7, how='outer')
```
Other options for the ``how`` keyword are ``'outer'``, ``'left'``, and ``'right'``.
An *outer join* returns a join over the union of the input columns, and fills in all missing values with NAs:
```
display('df6', 'df7', "pd.merge(df6, df7, how='outer')")
```
The *left join* and *right join* return joins over the left entries and right entries, respectively.
For example:
```
display('df6', 'df7', "pd.merge(df6, df7, how='left')")
display('df6', 'df7', "pd.merge(df6, df7, how='right')")
```
The output rows now correspond to the entries in the left input. Using
``how='right'`` works in a similar manner.
All of these options can be applied straightforwardly to any of the preceding join types.
## Overlapping Column Names: The ``suffixes`` Keyword
Finally, you may end up in a case where your two input ``DataFrame``s have conflicting column names.
Consider this example:
```
df8 = pd.DataFrame({'name': ['Bob', 'Jake', 'Lisa', 'Sue'],
'rank': [1, 2, 3, 4]})
df9 = pd.DataFrame({'name': ['Bob', 'Jake', 'Lisa', 'Sue'],
'rank': [3, 1, 4, 2]})
display('df8', 'df9', 'pd.merge(df8, df9, on="name")')
```
Because the output would have two conflicting column names, the merge function automatically appends a suffix ``_x`` or ``_y`` to make the output columns unique.
If these defaults are inappropriate, it is possible to specify a custom suffix using the ``suffixes`` keyword:
```
display('df8', 'df9', 'pd.merge(df8, df9, on="name", suffixes=["_L", "_R"])')
```
These suffixes work in any of the possible join patterns, and work also if there are multiple overlapping columns.
For more information on these patterns, see [Aggregation and Grouping](03.08-Aggregation-and-Grouping.ipynb) where we dive a bit deeper into relational algebra.
Also see the [Pandas "Merge, Join and Concatenate" documentation](http://pandas.pydata.org/pandas-docs/stable/merging.html) for further discussion of these topics.
## Example: US States Data
Merge and join operations come up most often when combining data from different sources.
Here we will consider an example of some data about US states and their populations.
The data files can be found at [DataSet](https://github.com/reddyprasade/Data-Sets-For-Machine-Learnig-and-Data-Science/tree/master/DataSets)
Let's take a look at the three datasets, using the Pandas ``read_csv()`` function:
```
pop = pd.read_csv('data/state-population.csv')
areas = pd.read_csv('data/state-areas.csv')
abbrevs = pd.read_csv('data/state-abbrevs.csv')
display('pop.head()', 'areas.head()', 'abbrevs.head()')
pop.shape,areas.shape,abbrevs.shape
pop.isna().sum()
areas.isna().sum()
abbrevs.isna().sum()
```
Given this information, say we want to compute a relatively straightforward result: rank US states and territories by their 2010 population density.
We clearly have the data here to find this result, but we'll have to combine the datasets to find the result.
We'll start with a many-to-one merge that will give us the full state name within the population ``DataFrame``.
We want to merge based on the ``state/region`` column of ``pop``, and the ``abbreviation`` column of ``abbrevs``.
We'll use ``how='outer'`` to make sure no data is thrown away due to mismatched labels.
```
merged = pd.merge(pop, abbrevs, how='outer',
left_on='state/region', right_on='abbreviation')
merged.head()
merged.isna().sum()
merged.tail()
merged = merged.drop('abbreviation', 1) # drop duplicate info
merged.head()
```
Let's double-check whether there were any mismatches here, which we can do by looking for rows with nulls:
```
merged.isnull().any()
merged.isnull().sum()
```
Some of the ``population`` info is null; let's figure out which these are!
```
merged['population'].isnull().sum()
merged['state'].isnull().sum()
merged[merged['population'].isnull()]
merged[merged['state'].isnull()]
```
It appears that all the null population values are from Puerto Rico prior to the year 2000; this is likely due to this data not being available from the original source.
More importantly, we see also that some of the new ``state`` entries are also null, which means that there was no corresponding entry in the ``abbrevs`` key!
Let's figure out which regions lack this match:
```
merged.loc[merged['state'].isnull(), 'state/region'].unique()
```
We can quickly infer the issue: our population data includes entries for Puerto Rico (PR) and the United States as a whole (USA), while these entries do not appear in the state abbreviation key.
We can fix these quickly by filling in appropriate entries:
```
merged.loc[merged['state/region'] == 'PR', 'state'] = 'Puerto Rico'
merged.loc[merged['state/region'] == 'USA', 'state'] = 'United States'
merged.isnull().any()
merged.isnull().sum()
```
No more nulls in the ``state`` column: we're all set!
Now we can merge the result with the area data using a similar procedure.
Examining our results, we will want to join on the ``state`` column in both:
```
final = pd.merge(merged, areas, on='state', how='left')
final.head()
```
Again, let's check for nulls to see if there were any mismatches:
```
final.isnull().any()
final.isna().sum()
```
There are nulls in the ``area`` column; we can take a look to see which regions were ignored here:
```
final['state'][final['area (sq. mi)'].isnull()].unique()
```
We see that our ``areas`` ``DataFrame`` does not contain the area of the United States as a whole.
We could insert the appropriate value (using the sum of all state areas, for instance), but in this case we'll just drop the null values because the population density of the entire United States is not relevant to our current discussion:
```
final.dropna(inplace=True)
final.head()
final.shape
final.isnull().info()
final.isna().sum()
```
Now we have all the data we need. To answer the question of interest, let's first select the portion of the data corresponding with the year 2000, and the total population.
We'll use the ``query()`` function to do this quickly (this requires the ``numexpr`` package to be installed; see [High-Performance Pandas: ``eval()`` and ``query()``](03.12-Performance-Eval-and-Query.ipynb)):
```
data2010 = final.query("year == 2010 & ages == 'total'") # SQL Select Stastement
data2010.head()
```
Now let's compute the population density and display it in order.
We'll start by re-indexing our data on the state, and then compute the result:
```
data2010.set_index('state', inplace=True)
data2010
density = data2010['population'] / data2010['area (sq. mi)']
density.head()
density.sort_values(ascending=True, inplace=True)
density.head()
```
The result is a ranking of US states plus Washington, DC, and Puerto Rico in order of their 2010 population density, in residents per square mile.
We can see that by far the densest region in this dataset is Washington, DC (i.e., the District of Columbia); among states, the densest is New Jersey.
We can also check the end of the list:
```
density.tail()
final.isnull().describe()
```
Converting the Data Frame into Pickle File Formate
```
Data = pd.to_pickle(final,'Data/US_States_Data.plk')# Save the Data in the from of Pickled
final.to_csv('Data/US_States_Data.csv')# Save the Data Csv
unpickled_df = pd.read_pickle("Data/US_States_Data.plk")
unpickled_df
```
We see that the least dense state, by far, is Alaska, averaging slightly over one resident per square mile.
This type of messy data merging is a common task when trying to answer questions using real-world data sources.
I hope that this example has given you an idea of the ways you can combine tools we've covered in order to gain insight from your data!
| github_jupyter |
<a href="https://colab.research.google.com/github/RugvedKatole/Learning-Single-Camera-Depth-Estimation-using-Dual-Pixels/blob/main/Dual_Pixel_Net.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Dual Pixel Net implementation
Link to Paper: [Learning Single Camera Depth Estimation using Dual Pixels](https://arxiv.org/abs/1904.05822)
Import libraries
```
import keras
import os
import copy
import json
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from scipy.interpolate import interp2d
import numpy.random as random
from tensorflow.keras.layers import Input, Conv2D ,Conv2DTranspose, MaxPooling2D, concatenate, Add, Dense, Dropout, Activation, Flatten, BatchNormalization, SeparableConv2D, LeakyReLU
from tensorflow.keras.optimizers import Adam
```
Paper uses a Unet Architecture with Residual Blocks.
Unet Architecture consists of a Encoder Decoder Network. Encoder Downsamples given images while decoder upsamples the downsampled images.k
```
# Encoder block A
def EncoderA(inputs=None, i_filters=32, o=32, s=2, max_pooling=True):
"""
Convolutional downsampling block
Arguments:
inputs -- Input tensor
n_filters -- Number of filters for the convolutional layers
dropout_prob -- Dropout probability
max_pooling -- Use MaxPooling2D to reduce the spatial dimensions of the output volume
Returns:
next_layer, skip_connection -- Next layer and skip connection outputs
"""
# first Layer of Encoder Block
#Note E_a(i,o,s) == E(i,o,s)
conv = BatchNormalization()(inputs)
conv = Conv2D(i_filters, # Number of filters i.e i in paper (E(i,o,s))
(3,3), # 3x3 Kernel size
padding='same',
strides=(s,s))(conv) # s from E(i,o,s)
conv = LeakyReLU(alpha=0.05)(conv)
# Second Layer of Encoder Block Is a Depthwise Separable Convolution layer with 3x3 kernel
conv = BatchNormalization()(conv)
conv = SeparableConv2D(i_filters,(3,3),
padding = 'same')(conv)
conv = LeakyReLU(alpha=0.05)(conv)
# Third layer of Encoder Block is 1x1 convolution Layer with o filters from E(i,o,s)
conv = BatchNormalization()(conv)
conv = Conv2D(o,(1,1), padding = 'same')(conv)
conv = LeakyReLU(alpha=0.05)(conv)
next_layer = BatchNormalization()(inputs)
next_layer = SeparableConv2D(o,(3,3),
padding = 'same')(next_layer)
next_layer = LeakyReLU(alpha=0.05)(next_layer)
next_layer = MaxPooling2D(pool_size=(s,s), strides=(s,s),padding='same')(next_layer)
next_layer = Add()([conv,next_layer])
skip_connection = conv
return next_layer, skip_connection
# Encoder Block B
def EncoderB(inputs=None, o=32, s=2, max_pooling=True):
"""
Convolutional downsampling block
Arguments:
inputs -- Input tensor
n_filters -- Number of filters for the convolutional layers
dropout_prob -- Dropout probability
max_pooling -- Use MaxPooling2D to reduce the spatial dimensions of the output volume
Returns:
next_layer, skip_connection -- Next layer and skip connection outputs
"""
# first Layer of Encoder Block
conv = BatchNormalization()(inputs)
conv = Conv2D(o, # Number of filters i.e o in paper (E_b(o,s))
(7,7), # 3x3 Kernel size
padding='same',
kernel_initializer='he_normal',
strides=(s,s))(conv) # s from E(o,s)
conv = LeakyReLU(alpha=0.05)(conv)
# the output of conv is added to max pooled input images
Pooled_input = MaxPooling2D(pool_size=(s,s), strides=(s,s))(inputs)
next_layer = concatenate([conv,Pooled_input],axis = 3)
skip_connection = conv
return next_layer, skip_connection
```
Now we create a Decoder block for our Network
```
# Decoder Block
def Decoder(expansive_input, contractive_input, i_filters = 32, o = 32):
"""
Convolutional upsampling block
Arguments:
expansive_input -- Input tensor from previous layer
contractive_input -- Input tensor from previous skip layer
i_filters -- Number of filters for the convolutional layers (o from (D(i,o)))
Returns:
conv -- Tensor output
"""
# first layer of decoder block i.e transpose conv to previous layer
up = BatchNormalization()(expansive_input)
up = Conv2DTranspose(
i_filters, # number of filters
(4,4), # Kernel size
strides=(2,2),
padding='same')(up)
up = LeakyReLU(alpha=0.05)(up)
# second layer of decoder block i.e 3x3 depth seperable conv
up = BatchNormalization()(up)
up = SeparableConv2D(i_filters,(3,3),
padding = 'same')(up)
up = LeakyReLU(alpha=0.05)(up)
# Third layer of Decoder Block i.e 1x1 conv with i filters
up = BatchNormalization()(up)
up = Conv2D(i_filters,(1,1), padding = 'same')(up)
up = LeakyReLU(alpha=0.05)(up)
#fourth layer of Decoder block i.e 3x3
up = BatchNormalization()(up)
up = SeparableConv2D(i_filters,(3,3),strides=(2,2),padding = 'same')(up)
up = LeakyReLU(alpha=0.05)(up)
# fifth layer
up = BatchNormalization()(up)
contractive_input = SeparableConv2D(i_filters,(3,3),
padding = 'same')(contractive_input)
# BC kitne layers hai
next_layer = Add()([up,contractive_input])
next_layer = LeakyReLU(alpha=0.05)(next_layer)
#Finally the final layer
next_layer = BatchNormalization()(next_layer)
next_layer = Conv2D(o,(1,1), padding = 'same')(next_layer)
next_layer = LeakyReLU(alpha=0.05)(next_layer)
return next_layer
```
Now we have completed the require Encoder Decoder blocks with now create our model architecture
```
def Unet_model(input_size=(1024,1024,1)):
"""
Unet model
Arguments:
input_size -- Input shape
Returns:
model -- tf.keras.Model
"""
#Encoding
inputs = Input(input_size)
Block1E_b = EncoderB(inputs,8,2)
Block1E_a = EncoderA(Block1E_b[0],11,11,1) # E^1_a
Block2E_a = EncoderA(Block1E_b[0],16,32,2)
Block2E_a = EncoderA(Block1E_b[0],16,32,1)
Block2E_a = EncoderA(Block1E_b[0],16,32,1) # E^2_a
Block3E_a = EncoderA(Block2E_a[0],16,64,2)
Block3E_a = EncoderA(Block2E_a[0],16,64,1)
Block3E_a = EncoderA(Block2E_a[0],16,64,1) #E^3_a
Block4E_a = EncoderA(Block3E_a[0],32,128,2)
Block4E_a = EncoderA(Block3E_a[0],32,128,1)
Block4E_a = EncoderA(Block3E_a[0],32,128,1) #E^4_a
Block5E_a = EncoderA(Block4E_a[0],32,128,2)
Block5E_a = EncoderA(Block4E_a[0],32.128,1)
Block5E_a = EncoderA(Block4E_a[0],32,128,1)
#Decoding
Block4D = Decoder(Block5E_a[0],Block4E_a[1],32,128) #D^4
Block3D = Decoder(Block4D,Block3E_a[1],16,64) #D^4
Block2D = Decoder(Block3D,Block2E_a[1],16,32) #D^4
Block1D = Decoder(Block2D,Block1E_a[1],8,8) #D^4
#Creating model
model = tf.keras.Model(inputs=inputs, outputs=Block1D)
return model
model=Unet_model((256,256,1))
model.compile(optimizer= Adam(beta_2 = 0.9),loss='mean_squared_error',metrics=['mse'])
model.summary()
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/Naren-Jegan/Deep-Learning-Keras/blob/master/One_Shot_Classification_V1.ipynb)
# One Shot Learning on Omniglot Dataset
The [Omniglot](https://github.com/brendenlake/omniglot) dataset contains 1623 different handwritten characters from 50 different alphabets.
Each of the 1623 characters was drawn online via Amazon's Mechanical Turk by 20 different people.
This dataset has been the baseline for any one-shot learning algorithm.
Some of the machine learning algorithms used for learning this dataset over the years are listed below in order of accuracy:
* Hierarchical Bayesian Program Learning - 95.2%
* Convolutional Siamese Net - 92.0%
* Affine model - 81.8%
* Hierarchical Deep - 65.2%
* Deep Boltzmann Machine - 62.0%
* Siamese Neural Net - 58.3%
* Simple Stroke - 35.2%
* 1-Nearest Neighbor - 21.7%
This notebook implements a [Convolutional Siamese Neural Network](https://https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf) using a background set of 30 alphabets for training and evaluate on set of 20 alphabets.
```
from google.colab import auth, drive
auth.authenticate_user()
drive.mount('/content/drive')
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
import os
from PIL import Image, ImageFilter, ImageOps, ImageMath
import numpy.random as rnd
import pickle
from time import sleep
from copy import deepcopy
# from tf.keras.models import Sequential # This does not work!
from tensorflow.python.keras.models import Model, Sequential
from tensorflow.python.keras.layers import InputLayer, Input, Lambda
from tensorflow.python.keras.layers import Reshape, MaxPooling2D, Dropout, BatchNormalization
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.initializers import RandomNormal
from tensorflow import test, logging
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import GridSearchCV
logging.set_verbosity(tf.logging.ERROR)
test.gpu_device_name()
tf.__version__
one_shot_path = os.path.join("drive", "My Drive", "Colab Notebooks", "One-Shot Classification")
background_path = os.path.join(one_shot_path, "background")
evaluation_path = os.path.join(one_shot_path, "evaluation")
recognition_model_path = os.path.join(one_shot_path, "recognition_model.h5")
##creating training set
train_data = np.ndarray(shape=(964, 20, 105, 105))
train_alphabets = dict()
#for alphabet in os.listdir(background_path):
# alphabet_path = os.path.join(background_path, alphabet)
# for character in os.listdir(alphabet_path):
# character_path = os.path.join(alphabet_path, character)
# for image in os.listdir(character_path):
# index = int(image[0:4]) - 1
# writer = int(image[5:7]) - 1
# train_data[index][writer] = np.array(Image.open(os.path.join(character_path, image)))
# train_alphabets[alphabet] = index if alphabet not in train_alphabets or train_alphabets[alphabet] > index else train_alphabets[alphabet]
#with open(os.path.join("train.pickle"), 'wb') as f:
# pickle.dump([train_data, train_alphabets], f, protocol=2)
with open(os.path.join(one_shot_path, "train.pickle"), 'rb') as f:
train_data, train_alphabets = pickle.load(f, encoding='latin1')
#@title Inputs
conv_activation = 'relu' #@param ['relu', 'softplus', 'tanh', 'sigmoid'] {type:"string"}
dense_activation = 'sigmoid' #@param ['relu', 'softplus', 'tanh', 'sigmoid'] {type:"string"}
learning_rate = 1e-2 #@param {type:"number"}
conv_regularization_parameter = 1e-2 #@param {type:"number"}
dense_regularization_parameter = 1e-4 #@param {type:"number"}
batch_size = 128 #@param {type:"slider", min:0, max:1024, step:16}
batches_per_epoch = 75 #@param {type:"slider", min:0, max:100, step:5}
n_epochs = 200 #@param {type:"slider", min:25, max:500, step:25}
batch_size = 1 if batch_size == 0 else batch_size
batches_per_epoch = 1 if batches_per_epoch == 0 else batches_per_epoch
#@title Data Augmentation
image_size = 105 #@param {type:"slider", min:32, max:512, step:1}
rotation_range = 10 #@param {type:"slider", min:0, max:90, step:1}
width_shift_range = 2 #@param {type:"slider", min:0, max:10, step:0.1}
height_shift_range = 2 #@param {type:"slider", min:0, max:10, step:0.1}
shear_range = 0.3 #@param {type:"slider", min:0, max:1, step:0.1}
zoom_range = 0.2 #@param {type:"slider", min:0, max:1, step:0.01}
# this is the augmentation configuration we will use for training
datagen = ImageDataGenerator()
def transform_image(image):
return datagen.apply_transform(image.reshape((image_size, image_size, 1)),
transform_parameters =
{'theta': rnd.uniform(-rotation_range, rotation_range),
'tx' : rnd.uniform(-width_shift_range, width_shift_range),
'ty' : rnd.uniform(-height_shift_range, height_shift_range),
'shear': rnd.uniform(-shear_range, shear_range),
'zx' : rnd.uniform(-zoom_range, zoom_range),
'zy' : rnd.uniform(-zoom_range, zoom_range)
})
#generate image pairs [x1, x2] with target y = 1/0 representing same/different
def datagen_flow(datagen, val = False):
while True:
X1 = np.ndarray(shape=(batch_size, image_size, image_size, 1))
X2 = np.ndarray(shape=(batch_size, image_size, image_size, 1))
Y = np.ndarray(shape=(batch_size,))
s_alphabets = sorted(train_alphabets.values())
a_indices = list(range(len(s_alphabets)))
times = batch_size//(2*len(a_indices))
remainder = (batch_size//2)%len(a_indices)
aindices = a_indices*times + list(rnd.choice(a_indices, remainder))
rnd.shuffle(aindices)
w_range = list(range(12, 20) if val else range(12))
i = 0
for a in aindices:
end_index = (len(train_data) if a+1 == len(s_alphabets) else s_alphabets[a+1])
c_range = list(range(s_alphabets[a], end_index))
writers = rnd.choice(w_range, 2)
same = rnd.choice(c_range)
X1[2*i] = transform_image(train_data[same, writers[0]])
X2[2*i] = transform_image(train_data[same, writers[1]])
Y[2*i] = 1.0
writers = rnd.choice(w_range, 2)
diff = rnd.choice(c_range, 2)
X1[2*i + 1] = transform_image(train_data[diff[0], writers[0]])
X2[2*i + 1] = transform_image(train_data[diff[1], writers[1]])
Y[2*i + 1] = 0.0
i += 1
yield [X1, X2], Y
train_generator = datagen_flow(datagen)
# this is a similar generator, for validation data that takes only the remaining 8 writers
train_dev_generator = datagen_flow(datagen, val=True)
w_init = RandomNormal(mean=0.0, stddev=1e-2)
b_init = RandomNormal(mean=0.5, stddev=1e-2)
input_shape=(image_size, image_size, 1)
left_input = Input(input_shape)
right_input = Input(input_shape)
# Start construction of the Keras Sequential model.
convnet = Sequential()
# First convolutional layer with activation, batchnorm and max-pooling.
convnet.add(Conv2D(kernel_size=10, strides=1, filters=64, padding='valid',
input_shape=input_shape, bias_initializer=b_init,
activation=conv_activation,
name='layer_conv1', kernel_regularizer=l2(conv_regularization_parameter)))
convnet.add(BatchNormalization(axis = 3, momentum=0.5, name = 'bn1'))
convnet.add(MaxPooling2D(pool_size=2, strides=2, name="max_pooling1"))
# Second convolutional layer with activation, batchnorm and max-pooling.
convnet.add(Conv2D(kernel_size=7, strides=1, filters=128, padding='valid',
kernel_initializer=w_init, bias_initializer=b_init,
activation=conv_activation, name='layer_conv2', kernel_regularizer=l2(conv_regularization_parameter)))
convnet.add(BatchNormalization(axis = 3, name = 'bn2'))
convnet.add(MaxPooling2D(pool_size=2, strides=2, name="max_pooling2"))
# Third convolutional layer with activation, batchnorm and max-pooling.
convnet.add(Conv2D(kernel_size=4, strides=1, filters=128, padding='valid',
kernel_initializer=w_init, bias_initializer=b_init,
activation=conv_activation, name='layer_conv3', kernel_regularizer=l2(conv_regularization_parameter)))
convnet.add(BatchNormalization(axis = 3, name = 'bn3'))
convnet.add(MaxPooling2D(pool_size=2, strides=2, name="max_pooling3"))
# Fourth convolutional layer with activation, batchnorm and max-pooling.
convnet.add(Conv2D(kernel_size=4, strides=1, filters=256, padding='valid',
kernel_initializer=w_init, bias_initializer=b_init,
activation=conv_activation, name='layer_conv4', kernel_regularizer=l2(conv_regularization_parameter)))
convnet.add(BatchNormalization(axis = 3, name = 'bn4'))
convnet.add(MaxPooling2D(pool_size=2, strides=2, name="max_pooling4"))
# Flatten the 4-rank output of the convolutional layers
# to 2-rank that can be input to a fully-connected / dense layer.
convnet.add(Flatten())
# First fully-connected / dense layer with activation.
convnet.add(Dense(4096, activation=dense_activation,
kernel_initializer=w_init, bias_initializer=b_init,
name = "dense_1", kernel_regularizer=l2(dense_regularization_parameter)))
convnet.add(BatchNormalization(axis = 1, name = 'bn5'))
#call the convnet Sequential model on each of the input tensors so params will be shared
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
#layer to merge two encoded inputs with the l1 distance between them
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
#call this layer on list of two input tensors.
L1_distance = L1_layer([encoded_l, encoded_r])
prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(L1_distance)
model = Model(inputs=[left_input,right_input],outputs=prediction)
from tensorflow.python.keras.optimizers import SGD, Adam
#optimizer = SGD(lr=learning_rate, momentum=0.5)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
steps_train = batches_per_epoch
steps_validation = batches_per_epoch
from tensorflow.python.keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler, ReduceLROnPlateau
model_checkpoint = ModelCheckpoint(recognition_model_path, monitor='val_loss',
save_best_only=True, period=10)
lr_scheduler = LearningRateScheduler(lambda epoch, lr: 0.99*lr)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_lr=1e-4)
class LearningRateFinder(Callback):
def __init__(self, steps=100, period=10):
super(LearningRateFinder, self).__init__()
self.steps = steps
self.batch_size=batch_size
self.period = period
self.best_lr = 1e-4
self.best_loss = 1000
self.find_lr = True
self.current_lr = None
self.training_path = os.path.join(one_shot_path, "training_model.h5")
self.model_weights = None
def reset_values(self):
K.set_value(self.model.optimizer.lr, self.best_lr)
self.best_lr = 1e-4
self.best_loss = 1000
self.model = load_model(self.training_path)
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
self.find_lr = epoch % self.period == 0
if epoch % self.period == 1:
print("Learning Rate: " + "{0:.2g}".format(K.get_value(self.model.optimizer.lr)))
if(self.find_lr):
self.current_lr = K.get_value(self.model.optimizer.lr)
self.model.save(self.training_path)
self.model_weights = self.model.get_weights()
def on_epoch_end(self, epoch, logs={}):
if(self.find_lr):
self.reset_values()
return
def on_batch_begin(self, batch, logs={}):
if(self.find_lr):
K.set_value(self.model.optimizer.lr, 10**(2*batch/self.steps + np.log10(self.current_lr) - 1))
return
def on_batch_end(self, batch, logs={}):
if(self.find_lr):
loss = logs.get('loss')
if loss < self.best_loss:
self.best_loss = loss
self.best_lr = K.get_value(self.model.optimizer.lr)
elif loss >= 1.25*self.best_loss:
self.find_lr = False
self.reset_values()
self.model.set_weights(self.model_weights)
return
lr_finder = LearningRateFinder(steps=steps_train, period=n_epochs//4)
model.fit_generator(train_generator,
steps_per_epoch = steps_train,
epochs=n_epochs,
validation_data = train_dev_generator,
validation_steps = steps_validation,
callbacks = [model_checkpoint, lr_scheduler, reduce_lr]
)
model = load_model(recognition_model_path)
##creating test set
test_data = np.ndarray(shape=(659, 20, 105, 105))
test_alphabets = dict()
#for alphabet in os.listdir(evaluation_path):
# alphabet_path = os.path.join(evaluation_path, alphabet)
# for character in os.listdir(alphabet_path):
# character_path = os.path.join(alphabet_path, character)
# for image in os.listdir(character_path):
# index = int(image[0:4]) - 965
# writer = int(image[5:7]) - 1
# test_data[index][writer] = np.array(Image.open(os.path.join(character_path, image)))
# test_alphabets[alphabet] = index if alphabet not in test_alphabets or test_alphabets[alphabet] > index else test_alphabets[alphabet]
#with open(os.path.join("test.pickle"), 'wb') as f:
# pickle.dump([test_data, test_alphabets], f, protocol=2)
with open(os.path.join(one_shot_path, "test.pickle"), 'rb') as f:
test_data, test_alphabets = pickle.load(f, encoding='latin1')
N = 20
st_alphabets = sorted(test_alphabets.values())
correct = 0
show = True
for i in range(len(st_alphabets)):
end_index = len(test_data) if i+1 == len(st_alphabets) else st_alphabets[i+1]
c_range = list(range(st_alphabets[i],end_index))
for j in range(2):
c_list = rnd.choice(c_range, N)
w_list = rnd.choice(range(20), 2)
for c_i in range(N):
image = test_data[c_list[c_i]][w_list[0]]
X1 = np.array([image]*N).reshape((N, image_size, image_size, 1))
X2 = np.array(test_data[c_list][w_list[1]]).reshape((N, image_size, image_size, 1))
if show and c_i == 2 and i == 3:
plt.imshow(image)
plt.show()
for m in range(N):
plt.imshow(test_data[c_list[m]][w_list[1]])
plt.show()
targets = np.zeros((N,))
targets[c_i] = 1
predictions = model.predict([X1, X2])
if show and c_i == 2 and i == 3:
print(targets)
print(predictions)
show = False
if(np.argmax(predictions) == np.argmax(targets)):
correct += 1
print(str(N) + "-Way Classification Accuracy: " + "{0:.2f}".format(correct/(N*20*2)))
```
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import os
os.chdir('../../')
from musicautobot.numpy_encode import *
from musicautobot.utils.file_processing import process_all, process_file
from musicautobot.config import *
from musicautobot.music_transformer import *
from musicautobot.multitask_transformer import *
from musicautobot.utils.stacked_dataloader import StackedDataBunch
from fastai.text import *
```
## MultitaskTransformer Training
Multitask Training is an extension of [MusicTransformer](../music_transformer/Train.ipynb).
Instead a basic language model that predicts the next word...
We train on multiple tasks
* [Next Word](../music_transformer/Train.ipynb)
* [Bert Mask](https://arxiv.org/abs/1810.04805)
* [Sequence to Sequence Translation](http://jalammar.github.io/illustrated-transformer/)
This gives a more generalized model and also let's you do some really cool [predictions](Generate.ipynb)
## End to end training pipeline
1. Create and encode dataset
2. Initialize Transformer MOdel
3. Train
4. Predict
```
# Location of your midi files
midi_path = Path('data/midi/examples')
midi_path.mkdir(parents=True, exist_ok=True)
# Location to save dataset
data_path = Path('data/numpy')
data_path.mkdir(parents=True, exist_ok=True)
data_save_name = 'musicitem_data_save.pkl'
s2s_data_save_name = 'multiitem_data_save.pkl'
```
## 1. Gather midi dataset
Make sure all your midi data is in `musicautobot/data/midi` directory
Here's a pretty good dataset with lots of midi data:
https://www.reddit.com/r/datasets/comments/3akhxy/the_largest_midi_collection_on_the_internet/
Download the folder and unzip it to `data/midi`
## 2. Create dataset from MIDI files
```
midi_files = get_files(midi_path, '.mid', recurse=True); len(midi_files)
```
### 2a. Create NextWord/Mask Dataset
```
processors = [Midi2ItemProcessor()]
data = MusicDataBunch.from_files(midi_files, data_path, processors=processors,
encode_position=True, dl_tfms=mask_lm_tfm_pitchdur,
bptt=5, bs=2)
data.save(data_save_name)
xb, yb = data.one_batch(); xb
```
Key:
* 'msk' = masked input
* 'lm' = next word input
* 'pos' = timestepped postional encoding. This is in addition to relative positional encoding
Note: MultitaskTransformer trains on both the masked input ('msk') and next word input ('lm') at the same time.
The encoder is trained on the 'msk' data, while the decoder is trained on 'lm' data.
### 2b. Create sequence to sequence dataset
```
processors = [Midi2MultitrackProcessor()]
s2s_data = MusicDataBunch.from_files(midi_files, data_path, processors=processors,
preloader_cls=S2SPreloader, list_cls=S2SItemList,
dl_tfms=melody_chord_tfm,
bptt=5, bs=2)
s2s_data.save(s2s_data_save_name)
```
Structure
```
xb, yb = s2s_data.one_batch(); xb
```
Key:
* 'c2m' = chord2melody translation
* enc = chord
* dec = melody
* 'm2c' = next word input
* enc = melody
* dec = chord
* 'pos' = timestepped postional encoding. Gives the model a better reference when translating
Note: MultitaskTransformer trains both translations ('m2c' and 'c2m') at the same time.
## 3. Initialize Model
```
# Load Data
batch_size = 2
bptt = 128
lm_data = load_data(data_path, data_save_name,
bs=batch_size, bptt=bptt, encode_position=True,
dl_tfms=mask_lm_tfm_pitchdur)
s2s_data = load_data(data_path, s2s_data_save_name,
bs=batch_size//2, bptt=bptt,
preloader_cls=S2SPreloader, dl_tfms=melody_chord_tfm)
# Combine both dataloaders so we can train multiple tasks at the same time
data = StackedDataBunch([lm_data, s2s_data])
# Create Model
config = multitask_config(); config
learn = multitask_model_learner(data, config.copy())
# learn.to_fp16(dynamic=True) # Enable for mixed precision
learn.model
```
# 4. Train
```
learn.fit_one_cycle(4)
learn.save('example')
```
## Predict
---
See [Generate.ipynb](Generate.ipynb) to use a pretrained model and generate better predictions
---
```
# midi_files = get_files(midi_path, '.mid', recurse=True)
midi_file = Path('data/midi/notebook_examples/single_bar_example.mid'); midi_file
next_word = nw_predict_from_midi(learn, midi_file, n_words=20, seed_len=8); next_word.show()
pred_melody = s2s_predict_from_midi(learn, midi_file, n_words=20, seed_len=4, pred_melody=True); pred_melody.show()
pred_notes = mask_predict_from_midi(learn, midi_file, predict_notes=True); pred_notes.show()
```
| github_jupyter |
```
library(tidyverse)
library(broom) #glance
library(knitr) #kable
library(MASS) #stepAIC
library(data.table) #fread
library(car) #vif
```
Recommend a model to predict the y variable -- “Opening Weekend Gross” with the possible predictors (no interactions) -- Runtime, Production Budget, Critic Rating, Audience Rating, and/or Month of Release.
Explain how you determined your model and why you recommended it over other models.
```
data6 <- fread('https://raw.githubusercontent.com/wilsonify/AppliedRegression/master/data/wide_release_movies.csv')
colnames(data6) <- colnames(data6) %>%
tolower() %>%
str_replace_all(' ','_') %>%
str_replace_all('[()]','')
data6 %>% str()
subdata6 <- data6[,c('opening_weekend_gross','runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')]
subdata6 <- subdata6 %>% drop_na()
subdata6 %>% str()
full_fit <- lm(formula = opening_weekend_gross~
runtime+
production_budget_in_millions+
critic_rating+
audience_rating+
monthofrelease
,data = subdata6)
summary(full_fit)
models <- interaction( c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
,c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
,c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
,c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
,c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
,c('runtime','production_budget_in_millions','critic_rating','audience_rating','monthofrelease')
, sep='+') %>%
levels() %>%
paste("opening_weekend_gross ~",.)
scaled <- scale(subdata6) %>% as.data.frame()
scaled %>% cor() %>% round(2)
AICc_from_AIC <- function(AIC,fit) {
n <- length(fit$residuals)
k <- length(fit$coefficients) - 1
correction <- (2*k^2 + 2*k) / (n - k - 1)
return ( AIC + correction )
}
result <- data_frame()
for (form in models) {
#print(form)
fit <- lm(data=scaled, formula = as.formula(form))
glance_of_fit <- glance(fit) %>%
mutate( model=form
,k = length(fit$coefficients) - 1
,AICc = AICc_from_AIC(AIC,fit)) %>%
dplyr::select(c( 'model'
,'k'
,'adj.r.squared'
,'AIC'
,'AICc'
,'BIC')
)
result <- rbind(result,glance_of_fit)
}
result
rbind(result[which.max(result$adj.r.squared),]
,result[which.min(result$AIC),]
,result[which.min(result$AICc),]
,result[which.min(result$BIC),]
)
fit <- lm(opening_weekend_gross ~ production_budget_in_millions+audience_rating, scaled)
summary(fit)
plot(y=studres(fit),x=fit$fitted.values)
plot(y=abs(studres(fit)),x= fit$fitted.values)
rfit <- lm(abs(studres(fit)) ~ fit$fitted.values)
rfit$fitted.values %>% str()
scaled %>% str()
wfit <- lm(opening_weekend_gross ~ production_budget_in_millions+audience_rating, scaled,weights=(1/(rfit$fitted.values^2)))
summary(wfit)
plot(y=studres(wfit),x=wfit$fitted.values)
AICc_from_AIC <- function(AIC,fit) {
n <- length(fit$residuals)
k <- length(fit$coefficients) - 1
correction <- (2*k^2 + 2*k) / (n - k - 1)
return ( AIC + correction )
}
result <- data_frame()
for (form in models) {
#print(form)
fit <- lm(data=scaled, formula = as.formula(form))
rfit <- lm(abs(studres(fit)) ~ fit$fitted.values)
wfit <- lm(data = scaled, as.formula(form), weights=(1/(rfit$fitted.values^2)))
glance_of_fit <- glance(fit) %>%
mutate( model=form
,k = length(fit$coefficients) - 1
,AICc = AICc_from_AIC(AIC,fit)) %>%
dplyr::select(c( 'model'
,'k'
,'adj.r.squared'
,'AIC'
,'AICc'
,'BIC')
)
result <- rbind(result,glance_of_fit)
}
```
| github_jupyter |
```
import matplotlib
from matplotlib.axes import Axes
from matplotlib.patches import Polygon
from matplotlib.path import Path
from matplotlib.ticker import NullLocator, Formatter, FixedLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, IdentityTransform
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import matplotlib.pyplot as plt
import numpy as np
class TriangularAxes(Axes):
"""
A custom class for triangular projections.
"""
name = 'triangular'
def __init__(self, *args, **kwargs):
Axes.__init__(self, *args, **kwargs)
self.set_aspect(1, adjustable='box', anchor='SW')
self.cla()
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
x_min = 0
y_min = 0
x_max = 1
y_max = 1
x_spacing = 0.1
y_spacing = 0.1
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('bottom')
self.yaxis.set_ticks_position('left')
Axes.set_xlim(self, x_min, x_max)
Axes.set_ylim(self, y_min, y_max)
self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))
self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = IdentityTransform()
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
self.transAffine = Affine2D.from_values(
1., 0, 0.5, np.sqrt(3)/2., 0, 0)
self.transAffinedep = Affine2D.from_values(
1., 0, -0.5, np.sqrt(3)/2., 0, 0)
#self.transAffine = IdentityTransform()
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = IdentityTransform()
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -20.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
self._yaxis_transform = self.transData
yaxis_text_base = \
self.transProjection + \
(self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_spines(self):
dep_spine = mspines.Spine.linear_spine(self,
'right')
# Fix dependent axis to be transformed the correct way
dep_spine.set_transform(self.transAffinedep + self.transAxes)
return {'left':mspines.Spine.linear_spine(self,
'left'),
'bottom':mspines.Spine.linear_spine(self,
'bottom'),
'right':dep_spine}
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
Any data and gridlines will be clipped to this shape.
"""
return Polygon([[0,0], [0.5,np.sqrt(3)/2], [1,0]], closed=True)
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
# Now register the projection with matplotlib so the user can select
# it.
register_projection(TriangularAxes)
import pycalphad.io.tdb_keywords
pycalphad.io.tdb_keywords.TDB_PARAM_TYPES.extend(['EM', 'BULK', 'SHEAR', 'C11', 'C12', 'C44'])
from pycalphad import Database, Model, calculate, equilibrium
import numpy as np
import pycalphad.variables as v
import sympy
from tinydb import where
class ElasticModel(Model):
def build_phase(self, dbe, phase_name, symbols, param_search):
phase = dbe.phases[phase_name]
self.models['ref'] = self.reference_energy(phase, param_search)
self.models['idmix'] = self.ideal_mixing_energy(phase, param_search)
self.models['xsmix'] = self.excess_mixing_energy(phase, param_search)
self.models['mag'] = self.magnetic_energy(phase, param_search)
# Here is where we add our custom contribution
# EM, BULK, SHEAR, C11, C12, C44
for prop in ['EM', 'BULK', 'SHEAR', 'C11', 'C12', 'C44']:
prop_param_query = (
(where('phase_name') == phase.name) & \
(where('parameter_type') == prop) & \
(where('constituent_array').test(self._array_validity))
)
prop_val = self.redlich_kister_sum(phase, param_search, prop_param_query)
setattr(self, prop, prop_val)
# Extra code necessary for compatibility with order-disorder model
ordered_phase_name = None
disordered_phase_name = None
try:
ordered_phase_name = phase.model_hints['ordered_phase']
disordered_phase_name = phase.model_hints['disordered_phase']
except KeyError:
pass
if ordered_phase_name == phase_name:
self.models['ord'] = self.atomic_ordering_energy(dbe,
disordered_phase_name,
ordered_phase_name)
dbf = Database('ElasticTi.tdb')
mod = ElasticModel(dbf, ['TI', 'MO', 'NB', 'VA'], 'BCC_A2')
symbols = dict([(sympy.Symbol(s), val) for s, val in dbf.symbols.items()])
mod.EM = mod.EM.xreplace(symbols)
x1 = np.linspace(0,1, num=100)
x2 = np.linspace(0,1, num=100)
mesh = np.meshgrid(x1, x2)
X = mesh[0]
Y = mesh[1]
mesh_arr = np.array(mesh)
mesh_arr = np.moveaxis(mesh_arr, 0, 2)
dep_col = 1 - np.sum(mesh_arr, axis=-1, keepdims=True)
mesh_arr = np.concatenate((mesh_arr, dep_col), axis=-1)
mesh_arr = np.concatenate((mesh_arr, np.ones(mesh_arr.shape[:-1] + (1,))), axis=-1)
orig_shape = tuple(mesh_arr.shape[:-1])
mesh_arr = mesh_arr.reshape(-1, mesh_arr.shape[-1])
mesh_arr[np.any(mesh_arr < 0, axis=-1), :] = np.nan
res = calculate(dbf, ['TI', 'MO', 'NB', 'VA'], 'BCC_A2', T=300, P=101325,
model=mod, output='EM', points=mesh_arr)
res_EM = res.EM.values.reshape(orig_shape)
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12,12))
ax = fig.gca(projection='triangular')
CS = ax.contour(X, Y, res_EM, levels=list(range(-10, 310, 10)), linewidths=4, cmap='cool')
ax.clabel(CS, inline=1, fontsize=13, fmt='%1.0f')
#PCM=ax.get_children()[0] #get the mappable, the 1st and the 2nd are the x and y axes
#plt.colorbar(PCM, ax=ax)
ax.set_xlabel('Mole Fraction Mo', fontsize=18)
ax.set_ylabel('Mole Fraction Nb', fontsize=18, rotation=60, labelpad=-180)
ax.tick_params(axis='both', which='major', labelsize=18)
ax.tick_params(axis='both', which='minor', labelsize=18)
fig.savefig('TiMoNb-EM.pdf')
```
| github_jupyter |
## Arkouda example of cosine distance and euclidean distance
- Two random arkouda int64 pdarrays are created then the distance is measured between them...
- The cosine and euclidean distance functions are compared against the scipy variants for correctness
Arkouda functions used:
- `ak.connect`
- `ak.randint`
- `ak.sum`
- `ak.pdarray.__mul__`
- `ak.pdarray.to_ndarray`
- `ak.disconnect`
- `ak.shutdown`
```
import arkouda as ak
import math
from scipy.spatial import distance
import numpy as np
```
### Connect to the Arkouda server
```
# connect to the arkopuda server using the connect_url which the server prints out
ak.connect(connect_url="tcp://localhost:5555")
```
### Create two pdarrays and fill with random integers
```
# create two in64 pdarrays and add them
a = ak.randint(0,10,100)
b = ak.randint(0,10,100)
print(a+b)
```
### Check for pdarray
```
def ak_check_pda(u):
if not isinstance(u, ak.pdarray):
raise TypeError("argument must be a pdarray")
```
### Dot Product of two Arkouda pdarrays
$u \cdot v = \sum_i{u_i v_i}$
```
# define the dot product or two arkouda pdarrays
def ak_dot(u, v):
ak_check_pda(u)
ak_check_pda(v)
return ak.sum(u*v)
```
### Magnatude ($L_2$ norm) of an Arkouda pdarray
$\|u\|_2 = \sqrt{\sum_i{u_i^2}}$
```
# define the magnitude/L_2-norm of arkouda pdarray
def ak_mag2(u):
ak_check_pda(u)
return math.sqrt(ak_dot(u,u))
```
### Cosine Distance of two Arkouda pdarrays
$D_C = 1 - \cos(\theta) = 1 - \frac{u \cdot v}{\|u\|_2\|v\|_2} = 1 - \frac{\sum_i{u_i v_i}}{\sqrt{\sum_i{u_i^2}}\sqrt{\sum_i{v_i^2}}}$
```
# define the cosine distance of two arkouda pdarrays
# should function similarly to scipy.spatial.distance.cosine
def ak_cos_dist(u, v):
ak_check_pda(u)
ak_check_pda(v)
return (1.0 - ak_dot(u,v)/(ak_mag2(u)*ak_mag2(v)))
```
### Euclidean Distance of two Arkouda pdarrays
$D_E = \|u-v\|_2$
```
# define the euclidean distance of two arkouda pdarrays
# should function similarly to scipy.spatial.distance.euclidean
def ak_euc_dist(u, v):
ak_check_pda(u)
ak_check_pda(v)
return (ak_mag2(u-v))
```
### Measure cosine distance and check against scipy
```
# check the arkouda version against the scipy version
d1 = ak_cos_dist(a,b)
d2 = distance.cosine(a.to_ndarray(), b.to_ndarray())
print(d1,d2)
print(np.allclose(d1,d2))
```
### Measure euclidean distance and check against scipy
```
# check the arkouda version against the scipy version
d1 = ak_euc_dist(a,b)
d2 = distance.euclidean(a.to_ndarray(), b.to_ndarray())
print(d1,d2)
print(np.allclose(d1,d2))
```
### Disconnect from Arkouda server or Shutdown Arkouda server
```
# disconnect from the the arkouda server
#ak.disconnect()
# shutdown the arkouda server
#ak.shutdown()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filename = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv"
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
df = pd.read_csv(filename, names= headers)
df.head(10)
```
# Table of content
- Identify and handle missing values
- Identify missing values
- Deal with missing values
- Correct data format
- Data standardization
- Data Normalization (centering/scaling)
- Binning
- Indicator variable
# Identify and handle missing values
```
df.replace("?", np.nan, inplace=True)
df['normalized-losses'].isnull().value_counts()
# loop for knowing missing values in each columns
for col in df.columns:
print(df[col].isnull().value_counts())
print()
```
<h3 id="deal_missing_values">Deal with missing data</h3>
<b>How to deal with missing data?</b>
<ol>
<li>drop data<br>
a. drop the whole row<br>
b. drop the whole column
</li>
<li>replace data<br>
a. replace it by mean<br>
b. replace it by frequency<br>
c. replace it based on other functions
</li>
</ol>
```
mean_norm_loss = df['normalized-losses'].astype('float').mean()
df['normalized-losses'].replace(np.nan, mean_norm_loss, inplace=True)
mean_bore_loss = df['bore'].astype('float').mean()
df['bore'].replace(np.nan, mean_bore_loss, inplace=True)
mean_stroke_loss = df['stroke'].astype('float').mean()
df['stroke'].replace(np.nan, mean_stroke_loss, inplace=True)
mean_horsepower_loss = df['horsepower'].astype('float').mean()
df['horsepower'].replace(np.nan, mean_horsepower_loss, inplace=True)
mean_rpm_loss = df['peak-rpm'].astype('float').mean()
df['peak-rpm'].replace(np.nan, mean_rpm_loss, inplace=True)
df['num-of-doors'].value_counts()
df['num-of-doors'].replace(np.nan, 'four', inplace=True)
df['peak-rpm'] = df['peak-rpm'].astype('int')
df['horsepower'] = df['horsepower'].astype(int)
df['stroke'] = df['stroke'].astype(float)
df['bore'] = df['bore'].astype(float)
df['normalized-losses'] = df['normalized-losses'].astype(int)
df.info()
df.dropna(subset=['price'], inplace= True)
df.price = df.price.astype(int)
df.info()
df.to_csv('automobiles.csv')
```
# data standardization
```
df['city-mpg'] # needs to be coverted
df['city-kml'] = 235/df['city-mpg']
df.head()
```
# data normalization or scaling
```
df['peak-rpm'] = df['peak-rpm']/ df['peak-rpm'].max()
df['city-kml'] = df['city-kml'] / df['city-kml'].max()
df.head()
```
# binning
```
group_names = ['low','medium','high']
bins = np.linspace(df.horsepower.min(), df.horsepower.max(),4 )
df['hp-binned'] = pd.cut(df.horsepower,bins, labels=group_names,include_lowest=True)
df['hp-binned'].value_counts()
df['hp-binned'].value_counts().plot(kind='bar')
df['price'].plot.hist(bins=5)
```
# indicator variable or dummy variable
```
df['fuel-type'].unique()
dummy_var1 = pd.get_dummies(df['fuel-type'])
dummy_var2 = pd.get_dummies(df['make'])
# merge the dummy var in df and drop the orignal col
df = pd.concat([df, dummy_var1[:-1], dummy_var2[:-1]], axis=1)
df.drop(columns=['fuel-type','make'],inplace=True)
df.info()
df['num-of-doors'].nunique() # not neccesssary to convert to dummy if you have 2 values only
df['engine-type'].nunique()
df['num-of-doors'].replace('two',0,inplace=True)
df['num-of-doors'].replace('four',1,inplace=True)
df.head()
pd.get_dummies(df['engine-type'])
```
| github_jupyter |
# Set-up
```
# libraries
import re
import numpy as np
import pandas as pd
from pymongo import MongoClient
# let's connect to the localhost
client = MongoClient()
# let's create a database
db = client.moma
# collection
artworks = db.artworks
# print connection
print("""
Database
==========
{}
Collection
==========
{}
""".format(db, artworks), flush=True
)
```
## Data

```
df = pd.read_csv('https://media.githubusercontent.com/media/MuseumofModernArt/collection/master/Artworks.csv')
df.info()
```
# Loading
```
%%time
# slow loading of data
d = {}
for i in df.index:
d = {
"_id": str(df.loc[i, "Cataloged"]) + str(df.loc[i, "ObjectID"]),
"Title": df.loc[i, "Title"],
"Date": df.loc[i, "Date"],
"Artist": {
"Name": df.loc[i, "Artist"],
"Bio": df.loc[i, "ArtistBio"],
"Nationality": df.loc[i, "Nationality"],
"Birth": df.loc[i, "BeginDate"],
"Death": df.loc[i, "EndDate"],
"Gender": df.loc[i, "Gender"]
},
"Characteristics":{
"Medium": df.loc[i,'Medium'],
"Dimensions": df.loc[i,'Dimensions'],
"Circumference": df.loc[i,'Circumference (cm)'],
"Depth": df.loc[i,'Depth (cm)'],
"Diameter": df.loc[i,'Diameter (cm)'],
"Height": df.loc[i,'Height (cm)'],
"Length": df.loc[i,'Length (cm)'],
"Weight": df.loc[i,'Weight (kg)'],
"Width": df.loc[i,'Width (cm)'],
"Seat Height": df.loc[i,'Seat Height (cm)'],
"Duration": df.loc[i,'Duration (sec.)']
},
"Acquisition": {
"Date": df.loc[i, "DateAcquired"],
"CreditLine": df.loc[i, "CreditLine"],
"Number": df.loc[i, "AccessionNumber"]
},
"Classification": df.loc[i, "Classification"],
"Department": df.loc[i, "Department"],
"URL": df.loc[i, "URL"],
"ThumbnailURL": df.loc[i, "ThumbnailURL"]
}
artworks.insert_one(d)
# for further reference https://docs.mongodb.com/manual/reference/command/collStats/
stats = db.command("collstats", "artworks")
s0 = stats.get('size')/10**6
print("""
Namespace: {}
Document Count: {}
Size: {}
""".format(stats.get('ns'), stats.get('count'), s0), flush=True)
```
## Cleaning
```
# get key names
l = []
for i in d.keys():
try:
for b in d.get(str(i)).keys():
l.append(str(i) + '.' + str(b))
except:
l.append(i)
# unset NaN fields
for i in l:
update = artworks.update_many({str(i):np.nan},{"$unset": {str(i):""}})
print("""
Key: {}
Matched: {}
Modified: {}
------------
""".format(i, update.matched_count, update.modified_count), flush=True)
# for further reference https://docs.mongodb.com/manual/reference/command/collStats/
stats = db.command("collstats", "artworks")
s1 = stats.get('size')/10**6
print("""
Namespace: {}
Document Count: {}
Size: {}
Var. Size: {}
""".format(stats.get('ns'), stats.get('count'), s1, round(s0-s1, 2)), flush=True)
```
## Further Cleaning
```
# change data type
update = artworks.update_many({"Date":{"$regex": '^[0-9]*$'}}, [{ "$set": { "Date": { "$toInt": "$Date" } } }])
print("""
Key: {}
Matched: {}
Modified: {}
------------
""".format("Date", update.matched_count, update.modified_count), flush=True)
# create an array field to store ranges
for i in artworks.find({"Date":{"$regex": '^[0-9]{4}-[0-9]{4}$'}}):
date = i.get('Date').split('-')
a = int(date[0])
b = int(date[1])
id = i.get('_id')
update = artworks.update_one({"_id": str(id)},{"$set": {"Date": [a, b]}})
print(update.matched_count, update.modified_count)
for i in artworks.find({"Date":{"$regex": '^[0-9]{4}–[0-9]{4}$'}}):
date = i.get('Date').split('–')
a = int(date[0])
b = int(date[1])
id = i.get('_id')
update = artworks.update_one({"_id": str(id)},{"$set": {"Date": [a, b]}})
print(update.matched_count, update.modified_count)
for i in artworks.find({"Date": {"$regex": '^[0-9]{4}-[0-9]{2}$'}}, {"Date": 1}):
date = i.get('Date').split('-')
a = int(date[0])
b = int(date[0][0] + date[0][1] + date[1])
id = i.get('_id')
update = artworks.update_one({"_id": str(id)},{"$set": {"Date": [a, b]}})
print(update.matched_count, update.modified_count)
for i in artworks.find({"Date": {"$regex": '^[0-9]{4}–[0-9]{2}$'}}, {"Date": 1}):
date = i.get('Date').split('–')
a = int(date[0])
b = int(date[0][0] + date[0][1]+ date[1])
id = i.get('_id')
update = artworks.update_one({"_id": str(id)},{"$set": {"Date": [a, b]}})
print(update.matched_count, update.modified_count)
# perform some further cleaning
for i in artworks.find({"Date":{"$regex": '^c. [0-9]{4}$'}}):
date = i.get('Date').split(' ')
b = int(date[1])
id = i.get('_id')
update = artworks.update_one({"_id": str(id)},{"$set": {"Date": b}})
print(update.matched_count, update.modified_count)
# remove Unknown or n.d.
update = artworks.update_many({"Date": {"$in": ["n.d.", "Unknown", "unknown"]}}, {"$unset": {"Date": ""}})
print("""
Matched: {}
Modified: {}
""".format(update.matched_count, update.modified_count), flush=True)
for i in artworks.find({"Date": {"$type": "string"}}, {"Date":1}):
print(i)
```
# Aggregation and loading
```
# collection
artw = db.artw
# print connection
print("""
Database
==========
{}
Collection
==========
{}
""".format(db, artw), flush=True
)
# df to dict
df.rename(columns={'Duration (sec.)': 'Duration (sec)'}, inplace=True)
dd = df.to_dict('records')
dd[0]
%%time
# insert array
insert = artw.insert_many(dd)
# define the pipeline
pipeline = [
{"$project":
{
"_id": {"$concat": ["$Cataloged", {"$toString": "$ObjectID"}]},
"Title": "$Title",
"Date": "$Date",
"Artist": {
"Name": "$Artist",
'Bio': "$ArtistBio",
'Nationality': "$Nationality",
"Birth": "$BeginDate",
"Death": "$EndDate",
"Gender": "$Gender",
},
"Characteristics":{
"Medium": '$Medium',
"Dimensions": '$Dimensions',
"Circumference": '$Circumference (cm)',
"Depth": '$Depth (cm)',
"Diameter": '$Diameter (cm)',
"Height": '$Height (cm)',
"Length": '$Length (cm)',
"Weight": '$Weight (kg)',
"Width": '$Width (cm)',
"Seat Height": '$Seat Height (cm)',
"Duration": '$Duration (sec)'
},
"Acquisition": {
"Date": "$DateAcquired",
"CreditLine": "$CreditLine",
"Number": "$AccessionNumber"
},
"Classification": "$Classification",
"Department": "$Department",
"URL": "$URL",
"ThumbnailURL": "$ThumbnailURL"
}
},
{ "$out" : "artw" }
]
# perform the aggregation
agr = artw.aggregate(pipeline)
# unset field with null values
[artw.update_many({str(i):np.nan},{"$unset": {str(i):""}}) for i in l]
```
| github_jupyter |
```
# this is for the data from the three runs where we try
# each of the conflict resolutions
# imports and reading csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import os
import sys
#get correct path for files
__file__ = 'conflict_res'
absolutepath = os.path.abspath(__file__)
print(absolutepath) #should be MQP/app/python_scripts/deep_dives/conflict_res
fileDirectory = os.path.dirname(absolutepath)
print(fileDirectory) #should be MQP/app/python_scripts/deep_dives
#Path of parent directory
parentDirectory = os.path.dirname(fileDirectory)
print(parentDirectory) #should be MQP/app/python_scripts
appDirectory = os.path.dirname(parentDirectory)
print(appDirectory) #should be MQP/app
#Navigate to all_data directory
all_data = os.path.join(appDirectory, 'all_data') #the directory going to, change if something different
print(all_data) #should be MQP/app/all_data
pathOutput1 = os.path.join(all_data, 'data_1.csv') #the file reading from, change if something different
print(pathOutput1) #should be MQP/app/all_data/data_1.csv
pathOutput3 = os.path.join(all_data, 'data_3.csv') #the file reading from, change if something different
print(pathOutput3) #should be MQP/app/all_data/data_3.csv
pathOutput4 = os.path.join(all_data, 'data_4.csv') #the file reading from, change if something different
print(pathOutput4) #should be MQP/app/all_data/data_4.csv
pathOutput12 = os.path.join(all_data, 'data_12.csv') #the file reading from, change if something different
print(pathOutput12) #should be MQP/app/all_data/data_12.csv
data1 = pd.read_csv(pathOutput1)
data3 = pd.read_csv(pathOutput3)
data4 = pd.read_csv(pathOutput4)
data12 = pd.read_csv(pathOutput12)
# general initializations
x=['move, exit, others', 'move, exit and reset, others', 'move, exit', 'none']
# y1 initializations
y1=[]
y1e=[]
avg_1_1=np.mean(data1['total_exit_time'])
avg_1_3=np.mean(data3['total_exit_time'])
avg_1_4=np.mean(data4['total_exit_time'])
avg_1_12=np.mean(data12['total_exit_time'])
y1.append(avg_1_1)
y1.append(avg_1_3)
y1.append(avg_1_4)
y1.append(avg_1_12)
y1e.append(np.std(data1['total_exit_time']))
y1e.append(np.std(data3['total_exit_time']))
y1e.append(np.std(data4['total_exit_time']))
y1e.append(np.std(data12['total_exit_time']))
# print('y1', y1)
# y2 initializations
y2=[]
y2e=[]
avg_2_1=np.mean(data1['avg_exit_time'])
avg_2_3=np.mean(data3['avg_exit_time'])
avg_2_4=np.mean(data4['avg_exit_time'])
avg_2_12=np.mean(data12['avg_exit_time'])
y2.append(avg_2_1)
y2.append(avg_2_3)
y2.append(avg_2_4)
y2.append(avg_2_12)
y2e.append(np.std(data1['avg_exit_time']))
y2e.append(np.std(data3['avg_exit_time']))
y2e.append(np.std(data4['avg_exit_time']))
y2e.append(np.std(data12['avg_exit_time']))
# print('y2', y2)
# y3 initializations
y3=[]
y3e=[]
avg_3_1=np.mean(data1['avg_collisions_total'])
avg_3_3=np.mean(data3['avg_collisions_total'])
avg_3_4=np.mean(data4['avg_collisions_total'])
avg_3_12=np.mean(data12['avg_collisions_total'])
y3.append(avg_3_1)
y3.append(avg_3_3)
y3.append(avg_3_4)
y3.append(avg_3_12)
y3e.append(np.std(data1['avg_collisions_total']))
y3e.append(np.std(data3['avg_collisions_total']))
y3e.append(np.std(data4['avg_collisions_total']))
y3e.append(np.std(data12['avg_collisions_total']))
# print('y3', y3)
# y4 initializations
y4=[]
y4e=[]
avg_4_1=np.mean(data1['total_avg_occ_all_time'])
avg_4_3=np.mean(data3['total_avg_occ_all_time'])
avg_4_4=np.mean(data4['total_avg_occ_all_time'])
avg_4_12=np.mean(data12['total_avg_occ_all_time'])
y4.append(avg_4_1)
y4.append(avg_4_3)
y4.append(avg_4_4)
y4.append(avg_4_12)
y4e.append(np.std(data1['total_avg_occ_all_time']))
y4e.append(np.std(data3['total_avg_occ_all_time']))
y4e.append(np.std(data4['total_avg_occ_all_time']))
y4e.append(np.std(data12['total_avg_occ_all_time']))
# print('y4', y4)
# y5 initializations
y5=[]
y5e=[]
avg_5_1=np.mean(data1['evaluation_metric'])
avg_5_3=np.mean(data3['evaluation_metric'])
avg_5_4=np.mean(data4['evaluation_metric'])
avg_5_12=np.mean(data12['evaluation_metric'])
y5.append(avg_5_1)
y5.append(avg_5_3)
y5.append(avg_5_4)
y5.append(avg_5_12)
y5e.append(np.std(data1['evaluation_metric']))
y5e.append(np.std(data3['evaluation_metric']))
y5e.append(np.std(data4['evaluation_metric']))
y5e.append(np.std(data12['evaluation_metric']))
# print('y4', y4)
# building the bar graphs
fig=plt.figure(figsize=(30, 12))
gs = GridSpec(nrows=2, ncols=3)
ax0 = fig.add_subplot(gs[0, 0])
plt.bar(x,y1,yerr=y1e, align='center', alpha=0.5, ecolor='black', capsize=10)
plt.title('Average Total Exit Time Based on Conflict Res')
plt.xlabel('Conflict Resolution Strategy')
plt.ylabel('Average Total Exit Time')
ax1 = fig.add_subplot(gs[1, 0])
plt.bar(x,y2,yerr=y2e, align='center', alpha=0.5, ecolor='black', capsize=10)
plt.title('Mean of Average Exit Time Based on Conflict Res')
plt.xlabel('Conflict Resolution Strategy')
plt.ylabel('Mean of Average Exit Time')
ax2 = fig.add_subplot(gs[0, 1])
plt.bar(x,y3,yerr=y3e, align='center', alpha=0.5, ecolor='black', capsize=10)
plt.title('Mean of Average Collisions Based on Conflict Res')
plt.xlabel('Conflict Resolution Strategy')
plt.ylabel('Mean of Average Collisions')
ax3 = fig.add_subplot(gs[1, 1])
plt.bar(x,y4,yerr=y4e, align='center', alpha=0.5, ecolor='black', capsize=10)
plt.title('Mean of Average Area Occupancy Based on Conflict Res')
plt.xlabel('Conflict Resolution Strategy')
plt.ylabel('Mean of Average Area Occupancy')
ax3 = fig.add_subplot(gs[:, 2])
plt.bar(x,y5,yerr=y5e, align='center', alpha=0.5, ecolor='black', capsize=10)
plt.title('Average Evaluation Metric Based on Conflict Res')
plt.xlabel('Conflict Resolution Strategy')
plt.ylabel('Average Evaluation Metric')
```
| github_jupyter |
# Gaussian Process (GP) smoothing
This example deals with the case when we want to **smooth** the observed data points $(x_i, y_i)$ of some 1-dimensional function $y=f(x)$, by finding the new values $(x_i, y'_i)$ such that the new data is more "smooth" (see more on the definition of smoothness through allocation of variance in the model description below) when moving along the $x$ axis.
It is important to note that we are **not** dealing with the problem of interpolating the function $y=f(x)$ at the unknown values of $x$. Such problem would be called "regression" not "smoothing", and will be considered in other examples.
If we assume the functional dependency between $x$ and $y$ is **linear** then, by making the independence and normality assumptions about the noise, we can infer a straight line that approximates the dependency between the variables, i.e. perform a linear regression. We can also fit more complex functional dependencies (like quadratic, cubic, etc), if we know the functional form of the dependency in advance.
However, the **functional form** of $y=f(x)$ is **not always known in advance**, and it might be hard to choose which one to fit, given the data. For example, you wouldn't necessarily know which function to use, given the following observed data. Assume you haven't seen the formula that generated it:
```
%pylab inline
figsize(12, 6);
import numpy as np
import scipy.stats as stats
x = np.linspace(0, 50, 100)
y = (np.exp(1.0 + np.power(x, 0.5) - np.exp(x/15.0)) +
np.random.normal(scale=1.0, size=x.shape))
plot(x, y);
xlabel("x");
ylabel("y");
title("Observed Data");
```
### Let's try a linear regression first
As humans, we see that there is a non-linear dependency with some noise, and we would like to capture that dependency. If we perform a linear regression, we see that the "smoothed" data is less than satisfactory:
```
plot(x, y);
xlabel("x");
ylabel("y");
lin = stats.linregress(x, y)
plot(x, lin.intercept + lin.slope * x);
title("Linear Smoothing");
```
### Linear regression model recap
The linear regression assumes there is a linear dependency between the input $x$ and output $y$, sprinkled with some noise around it so that for each observed data point we have:
$$ y_i = a + b\, x_i + \epsilon_i $$
where the observation errors at each data point satisfy:
$$ \epsilon_i \sim N(0, \sigma^2) $$
with the same $\sigma$, and the errors are independent:
$$ cov(\epsilon_i, \epsilon_j) = 0 \: \text{ for } i \neq j $$
The parameters of this model are $a$, $b$, and $\sigma$. It turns out that, under these assumptions, the maximum likelihood estimates of $a$ and $b$ don't depend on $\sigma$. Then $\sigma$ can be estimated separately, after finding the most likely values for $a$ and $b$.
### Gaussian Process smoothing model
This model allows departure from the linear dependency by assuming that the dependency between $x$ and $y$ is a Brownian motion over the domain of $x$. This doesn't go as far as assuming a particular functional dependency between the variables. Instead, by **controlling the standard deviation of the unobserved Brownian motion** we can achieve different levels of smoothness of the recovered functional dependency at the original data points.
The particular model we are going to discuss assumes that the observed data points are **evenly spaced** across the domain of $x$, and therefore can be indexed by $i=1,\dots,N$ without the loss of generality. The model is described as follows:
\begin{equation}
\begin{aligned}
z_i & \sim \mathcal{N}(z_{i-1} + \mu, (1 - \alpha)\cdot\sigma^2) \: \text{ for } i=2,\dots,N \\
z_1 & \sim ImproperFlat(-\infty,\infty) \\
y_i & \sim \mathcal{N}(z_i, \alpha\cdot\sigma^2)
\end{aligned}
\end{equation}
where $z$ is the hidden Brownian motion, $y$ is the observed data, and the total variance $\sigma^2$ of each ovservation is split between the hidden Brownian motion and the noise in proportions of $1 - \alpha$ and $\alpha$ respectively, with parameter $0 < \alpha < 1$ specifying the degree of smoothing.
When we estimate the maximum likelihood values of the hidden process $z_i$ at each of the data points, $i=1,\dots,N$, these values provide an approximation of the functional dependency $y=f(x)$ as $\mathrm{E}\,[f(x_i)] = z_i$ at the original data points $x_i$ only. Therefore, again, the method is called smoothing and not regression.
### Let's describe the above GP-smoothing model in PyMC3
```
import pymc3 as pm
from theano import shared
from pymc3.distributions.timeseries import GaussianRandomWalk
from scipy import optimize
```
Let's create a model with a shared parameter for specifying different levels of smoothing. We use very wide priors for the "mu" and "tau" parameters of the hidden Brownian motion, which you can adjust according to your application.
```
LARGE_NUMBER = 1e5
model = pm.Model()
with model:
smoothing_param = shared(0.9)
mu = pm.Normal("mu", sigma=LARGE_NUMBER)
tau = pm.Exponential("tau", 1.0/LARGE_NUMBER)
z = GaussianRandomWalk("z",
mu=mu,
tau=tau / (1.0 - smoothing_param),
shape=y.shape)
obs = pm.Normal("obs",
mu=z,
tau=tau / smoothing_param,
observed=y)
```
Let's also make a helper function for inferring the most likely values of $z$:
```
def infer_z(smoothing):
with model:
smoothing_param.set_value(smoothing)
res = pm.find_MAP(vars=[z], fmin=optimize.fmin_l_bfgs_b)
return res['z']
```
Please note that in this example, we are only looking at the MAP estimate of the unobserved variables. We are not really interested in inferring the posterior distributions. Instead, we have a control parameter $\alpha$ which lets us allocate the variance between the hidden Brownian motion and the noise. Other goals and/or different models may require sampling to obtain the posterior distributions, but for our goal a MAP estimate will suffice.
### Exploring different levels of smoothing
Let's try to allocate 50% variance to the noise, and see if the result matches our expectations.
```
smoothing = 0.5
z_val = infer_z(smoothing)
plot(x, y);
plot(x, z_val);
title("Smoothing={}".format(smoothing));
```
It appears that the variance is split evenly between the noise and the hidden process, as expected.
Let's try gradually increasing the smoothness parameter to see if we can obtain smoother data:
```
smoothing = 0.9
z_val = infer_z(smoothing)
plot(x, y);
plot(x, z_val);
title("Smoothing={}".format(smoothing));
```
### Smoothing "to the limits"
By increading the smoothing parameter, we can gradually make the inferred values of the hidden Brownian motion approach the average value of the data. This is because as we increase the smoothing parameter, we allow less and less of the variance to be allocated to the Brownian motion, so eventually it aproaches the process which almost doesn't change over the domain of $x$:
```
fig, axes = subplots(2, 2)
for ax, smoothing in zip(axes.ravel(), [0.95, 0.99, 0.999, 0.9999]):
z_val = infer_z(smoothing)
ax.plot(x, y)
ax.plot(x, z_val)
ax.set_title('Smoothing={:05.4f}'.format(smoothing))
```
This example originally contributed by: Andrey Kuzmenko, http://github.com/akuz
| github_jupyter |
```
# %load /Users/facai/Study/book_notes/preconfig.py
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import SVG
```
逻辑回归在scikit-learn中的实现简介
==============================
分析用的代码版本信息:
```bash
~/W/g/scikit-learn ❯❯❯ git log -n 1
commit d161bfaa1a42da75f4940464f7f1c524ef53484f
Author: John B Nelson <jnelso11@gmu.edu>
Date: Thu May 26 18:36:37 2016 -0400
Add missing double quote (#6831)
```
### 0. 总纲
下面是sklearn中逻辑回归的构成情况:
```
SVG("./res/sklearn_lr.svg")
```
如[逻辑回归在spark中的实现简介](./spark_ml_lr.ipynb)中分析一样,主要把精力定位到算法代码上,即寻优算子和损失函数。
### 1. 寻优算子
sklearn支持liblinear, sag, lbfgs和newton-cg四种寻优算子,其中lbfgs属于scipy包,liblinear属于LibLinear库,剩下两种由sklearn自己实现。代码很好定位,逻辑也很明了,不多说:
```python
704 if solver == 'lbfgs':
705 try:
706 w0, loss, info = optimize.fmin_l_bfgs_b(
707 func, w0, fprime=None,
708 args=(X, target, 1. / C, sample_weight),
709 iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
710 except TypeError:
711 # old scipy doesn't have maxiter
712 w0, loss, info = optimize.fmin_l_bfgs_b(
713 func, w0, fprime=None,
714 args=(X, target, 1. / C, sample_weight),
715 iprint=(verbose > 0) - 1, pgtol=tol)
716 if info["warnflag"] == 1 and verbose > 0:
717 warnings.warn("lbfgs failed to converge. Increase the number "
718 "of iterations.")
719 try:
720 n_iter_i = info['nit'] - 1
721 except:
722 n_iter_i = info['funcalls'] - 1
723 elif solver == 'newton-cg':
724 args = (X, target, 1. / C, sample_weight)
725 w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
726 maxiter=max_iter, tol=tol)
727 elif solver == 'liblinear':
728 coef_, intercept_, n_iter_i, = _fit_liblinear(
729 X, target, C, fit_intercept, intercept_scaling, None,
730 penalty, dual, verbose, max_iter, tol, random_state,
731 sample_weight=sample_weight)
732 if fit_intercept:
733 w0 = np.concatenate([coef_.ravel(), intercept_])
734 else:
735 w0 = coef_.ravel()
736
737 elif solver == 'sag':
738 if multi_class == 'multinomial':
739 target = target.astype(np.float64)
740 loss = 'multinomial'
741 else:
742 loss = 'log'
743
744 w0, n_iter_i, warm_start_sag = sag_solver(
745 X, target, sample_weight, loss, 1. / C, max_iter, tol,
746 verbose, random_state, False, max_squared_sum, warm_start_sag)
```
### 2. 损失函数
#### 2.1 二分类
二分类的损失函数和导数由`_logistic_loss_and_grad`实现,运算逻辑和[逻辑回归算法简介和Python实现](./demo.ipynb)是相同的,不多说。
#### 2.2 多分类
sklearn的多分类支持ovr (one vs rest,一对多)和multinominal两种方式。
##### 2.2.0 ovr
默认是ovr,它会对毎个标签训练一个二分类的分类器,即总共$K$个。训练代码在
```python
1230 fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
1231 backend=backend)(
1232 path_func(X, y, pos_class=class_, Cs=[self.C],
1233 fit_intercept=self.fit_intercept, tol=self.tol,
1234 verbose=self.verbose, solver=self.solver, copy=False,
1235 multi_class=self.multi_class, max_iter=self.max_iter,
1236 class_weight=self.class_weight, check_input=False,
1237 random_state=self.random_state, coef=warm_start_coef_,
1238 max_squared_sum=max_squared_sum,
1239 sample_weight=sample_weight)
1240 for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
```
注意,1240L的`for class_ in classes`配合1232L的`pos_class=class`,就是逐个取标签来训练的逻辑。
##### 2.2.1 multinominal
前面讲到ovr会遍历标签,逐个训练。为了兼容这段逻辑,真正的二分类问题需要做变化:
```python
1201 if len(self.classes_) == 2:
1202 n_classes = 1
1203 classes_ = classes_[1:]
```
同样地,multinominal需要一次对全部标签做处理,也需要做变化:
```python
1217 # Hack so that we iterate only once for the multinomial case.
1218 if self.multi_class == 'multinomial':
1219 classes_ = [None]
1220 warm_start_coef = [warm_start_coef]
```
好,接下来,我们看multinoinal的损失函数和导数计算代码,它是`_multinomial_loss_grad`这个函数。
sklearn里多分类的代码使用的公式和[逻辑回归算法简介和Python实现](./demo.ipynb)里一致,即:
\begin{align}
L(\beta) &= \log(\sum_i e^{\beta_{i0} + \beta_i x)}) - (\beta_{k0} + \beta_k x) \\
\frac{\partial L}{\partial \beta} &= x \left ( \frac{e^{\beta_{k0} + \beta_k x}}{\sum_i e^{\beta_{i0} + \beta_i x}} - I(y = k) \right ) \\
\end{align}
具体到损失函数:
```python
244 def _multinomial_loss(w, X, Y, alpha, sample_weight):
245 #+-- 37 lines: """Computes multinomial loss and class probabilities.---
282 n_classes = Y.shape[1]
283 n_features = X.shape[1]
284 fit_intercept = w.size == (n_classes * (n_features + 1))
285 w = w.reshape(n_classes, -1)
286 sample_weight = sample_weight[:, np.newaxis]
287 if fit_intercept:
288 intercept = w[:, -1]
289 w = w[:, :-1]
290 else:
291 intercept = 0
292 p = safe_sparse_dot(X, w.T)
293 p += intercept
294 p -= logsumexp(p, axis=1)[:, np.newaxis]
295 loss = -(sample_weight * Y * p).sum()
296 loss += 0.5 * alpha * squared_norm(w)
297 p = np.exp(p, p)
298 return loss, p, w
```
+ 292L-293L是计算$\beta_{i0} + \beta_i x$。
+ 294L是计算 $L(\beta)$。注意,这里防止计算溢出,是在`logsumexp`函数里作的,原理和[逻辑回归在spark中的实现简介](./spark_ml_lr.ipynb)一样。
+ 295L是加总(注意,$Y$毎列是单位向量,所以起了选标签对应$k$的作用)。
+ 296L加上L2正则。
+ 注意,297L是p变回了$\frac{e^{\beta_{k0} + \beta_k x}}{\sum_i e^{\beta_{i0} + \beta_i x}}$,为了计算导数时直接用。
好,再看导数的计算:
```python
301 def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
302 #+-- 37 lines: """Computes the multinomial loss, gradient and class probabilities.---
339 n_classes = Y.shape[1]
340 n_features = X.shape[1]
341 fit_intercept = (w.size == n_classes * (n_features + 1))
342 grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
343 loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
344 sample_weight = sample_weight[:, np.newaxis]
345 diff = sample_weight * (p - Y)
346 grad[:, :n_features] = safe_sparse_dot(diff.T, X)
347 grad[:, :n_features] += alpha * w
348 if fit_intercept:
349 grad[:, -1] = diff.sum(axis=0)
350 return loss, grad.ravel(), p
```
+ 345L-346L,对应了导数的计算式;
+ 347L是加上L2的导数;
+ 348L-349L,是对intercept的计算。
#### 2.3 Hessian
注意,sklearn支持牛顿法,需要用到Hessian阵,定义见维基[Hessian matrix](https://en.wikipedia.org/wiki/Hessian_matrix),
\begin{equation}
{\mathbf H}={\begin{bmatrix}{\dfrac {\partial ^{2}f}{\partial x_{1}^{2}}}&{\dfrac {\partial ^{2}f}{\partial x_{1}\,\partial x_{2}}}&\cdots &{\dfrac {\partial ^{2}f}{\partial x_{1}\,\partial x_{n}}}\\[2.2ex]{\dfrac {\partial ^{2}f}{\partial x_{2}\,\partial x_{1}}}&{\dfrac {\partial ^{2}f}{\partial x_{2}^{2}}}&\cdots &{\dfrac {\partial ^{2}f}{\partial x_{2}\,\partial x_{n}}}\\[2.2ex]\vdots &\vdots &\ddots &\vdots \\[2.2ex]{\dfrac {\partial ^{2}f}{\partial x_{n}\,\partial x_{1}}}&{\dfrac {\partial ^{2}f}{\partial x_{n}\,\partial x_{2}}}&\cdots &{\dfrac {\partial ^{2}f}{\partial x_{n}^{2}}}\end{bmatrix}}.
\end{equation}
其实就是各点位的二阶偏导。具体推导就不写了,感兴趣可以看[Logistic Regression - Jia Li](http://sites.stat.psu.edu/~jiali/course/stat597e/notes2/logit.pdf)或[Logistic regression: a simple ANN Nando de Freitas](https://www.cs.ox.ac.uk/people/nando.defreitas/machinelearning/lecture6.pdf)。
基本公式是$\mathbf{H} = \mathbf{X}^T \operatorname{diag}(\pi_i (1 - \pi_i)) \mathbf{X}$,其中$\pi_i = \operatorname{sigm}(x_i \beta)$。
```python
167 def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
168 #+-- 33 lines: """Computes the gradient and the Hessian, in the case of a logistic loss.
201 w, c, yz = _intercept_dot(w, X, y)
202 #+-- 4 lines: if sample_weight is None:---------
206 z = expit(yz)
207 #+-- 8 lines: z0 = sample_weight * (z - 1) * y---
215 # The mat-vec product of the Hessian
216 d = sample_weight * z * (1 - z)
217 if sparse.issparse(X):
218 dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
219 shape=(n_samples, n_samples)), X)
220 else:
221 # Precompute as much as possible
222 dX = d[:, np.newaxis] * X
223
224 if fit_intercept:
225 # Calculate the double derivative with respect to intercept
226 # In the case of sparse matrices this returns a matrix object.
227 dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
228
229 def Hs(s):
230 ret = np.empty_like(s)
231 ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
232 ret[:n_features] += alpha * s[:n_features]
233
234 # For the fit intercept case.
235 if fit_intercept:
236 ret[:n_features] += s[-1] * dd_intercept
237 ret[-1] = dd_intercept.dot(s[:n_features])
238 ret[-1] += d.sum() * s[-1]
239 return ret
240
241 return grad, Hs
```
+ 201L, 206L, 和216L是计算中间的$\pi_i (1 - \pi_i)$。
+ 217L-222L,对中间参数变为对角阵后,预算公式后半部份,配合231L就是整个式子了。
这里我也只知其然,以后有时间再深挖下吧。
### 3. 小结
本文简单介绍了sklearn中逻辑回归的实现,包括二分类和多分类的具体代码和公式对应。
| github_jupyter |
# Building a Fraud Prediction Model with EvalML
In this demo, we will build an optimized fraud prediction model using EvalML. To optimize the pipeline, we will set up an objective function to minimize the percentage of total transaction value lost to fraud. At the end of this demo, we also show you how introducing the right objective during the training is over 4x better than using a generic machine learning metric like AUC.
```
import evalml
from evalml import AutoMLSearch
from evalml.objectives import FraudCost
```
## Configure "Cost of Fraud"
To optimize the pipelines toward the specific business needs of this model, we can set our own assumptions for the cost of fraud. These parameters are
* `retry_percentage` - what percentage of customers will retry a transaction if it is declined?
* `interchange_fee` - how much of each successful transaction do you collect?
* `fraud_payout_percentage` - the percentage of fraud will you be unable to collect
* `amount_col` - the column in the data the represents the transaction amount
Using these parameters, EvalML determines attempt to build a pipeline that will minimize the financial loss due to fraud.
```
fraud_objective = FraudCost(retry_percentage=.5,
interchange_fee=.02,
fraud_payout_percentage=.75,
amount_col='amount')
```
## Search for best pipeline
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as the holdout set.
```
X, y = evalml.demos.load_fraud(n_rows=1000)
```
EvalML natively supports one-hot encoding. Here we keep 1 out of the 6 categorical columns to decrease computation time.
```
cols_to_drop = ['datetime', 'expiration_date', 'country', 'region', 'provider']
for col in cols_to_drop:
X.pop(col)
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='binary', test_size=0.2, random_seed=0)
print(X.types)
```
Because the fraud labels are binary, we will use `AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary')`. When we call `.search()`, the search for the best pipeline will begin.
```
automl = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective=fraud_objective,
additional_objectives=['auc', 'f1', 'precision'],
max_batches=1,
optimize_thresholds=True)
automl.search()
```
### View rankings and select pipelines
Once the fitting process is done, we can see all of the pipelines that were searched, ranked by their score on the fraud detection objective we defined.
```
automl.rankings
```
To select the best pipeline we can call `automl.best_pipeline`.
```
best_pipeline = automl.best_pipeline
```
### Describe pipelines
We can get more details about any pipeline created during the search process, including how it performed on other objective functions, by calling the `describe_pipeline` method and passing the `id` of the pipeline of interest.
```
automl.describe_pipeline(automl.rankings.iloc[1]["id"])
```
## Evaluate on holdout data
Finally, since the best pipeline is already trained, we evaluate it on the holdout data.
Now, we can score the pipeline on the holdout data using both our fraud cost objective and the AUC (Area under the ROC Curve) objective.
```
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
```
## Why optimize for a problem-specific objective?
To demonstrate the importance of optimizing for the right objective, let's search for another pipeline using AUC, a common machine learning metric. After that, we will score the holdout data using the fraud cost objective to see how the best pipelines compare.
```
automl_auc = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective='auc',
additional_objectives=['f1', 'precision'],
max_batches=1,
optimize_thresholds=True)
automl_auc.search()
```
Like before, we can look at the rankings of all of the pipelines searched and pick the best pipeline.
```
automl_auc.rankings
best_pipeline_auc = automl_auc.best_pipeline
# get the fraud score on holdout data
best_pipeline_auc.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
# fraud score on fraud optimized again
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
```
When we optimize for AUC, we can see that the AUC score from this pipeline is better than the AUC score from the pipeline optimized for fraud cost. However, the losses due to fraud are over 3% of the total transaction amount when optimized for AUC and under 1% when optimized for fraud cost. As a result, we lose more than 2% of the total transaction amount by not optimizing for fraud cost specifically.
This happens because optimizing for AUC does not take into account the user-specified `retry_percentage`, `interchange_fee`, `fraud_payout_percentage` values. Thus, the best pipelines may produce the highest AUC but may not actually reduce the amount loss due to your specific type fraud.
This example highlights how performance in the real world can diverge greatly from machine learning metrics.
| github_jupyter |
# Basic functionality tests.
If the notebook cells complete with no exception the tests have passed.
The tests must be run in the full `jupyter notebook` or `jupyter lab` environment.
*Note:* I couldn't figure out to make the validation tests run correctly
at top level cell evaluation using `Run all`
because the widgets initialize after later cells have executed, causing spurious
failures. Consequently the automated validation steps involve an extra round trip using
a widget at the bottom of the notebook which is guaranteed to render last.
```
# Some test artifacts used below:
import jp_proxy_widget
from jp_proxy_widget import notebook_test_helpers
validators = notebook_test_helpers.ValidationSuite()
import time
class PythonClass:
class_attribute = "initial class attribute value"
def __init__(self):
self.set_instance_attribute("initial instance attribute value")
def set_instance_attribute(self, value):
self.instance_attribute = value
@classmethod
def set_class_attribute(cls, value):
cls.class_attribute = value
notebook_test_helpers
jp_proxy_widget
python_instance = PythonClass()
def python_function(value1, value2):
python_instance.new_attribute = "value1=%s and value2=%s" % (value1, value2)
```
# pong: test that a proxy widget can call back to Python
```
import jp_proxy_widget
pong = jp_proxy_widget.JSProxyWidget()
def validate_pong():
# check that the Python callbacks were called.
assert python_instance.instance_attribute == "instance"
assert PythonClass.class_attribute == "class"
assert python_instance.new_attribute == 'value1=1 and value2=3'
assert pong.error_msg == 'No error'
print ("pong says", pong.error_msg)
print ("Pong callback test succeeded!")
pong.js_init("""
//debugger;
instance_method("instance");
class_method("class");
python_function(1, 3);
element.html("<b>Callback test widget: nothing interesting to see here</b>")
//validate()
""",
instance_method=python_instance.set_instance_attribute,
class_method=PythonClass.set_class_attribute,
python_function=python_function,
#validate=validate_pong
)
#widget_validator_list.append([pong, validate_pong])
validators.add_validation(pong, validate_pong)
#pong.debugging_display()
pong
# set the mainloop check to True if running cells one at a time
mainloop_check = False
if mainloop_check:
# At this time this fails on "run all"
validate_pong()
```
# pingpong: test that Python can call in to a widget
... use a widget callback to pass the value back
```
pingpong_list = "just some strings".split()
def pingpong_python_fn(argument1, argument2):
print("called pingpong_python_fn") # this print goes nowhere?
pingpong_list[:] = [argument1, argument2]
def validate_pingpong():
# check that the callback got the right values
assert pingpong_list == ["testing", 123]
print ("ping pong test callback got ", pingpong_list)
print ("ping pong test succeeded!")
pingpong = jp_proxy_widget.JSProxyWidget()
pingpong.js_init("""
element.html("<em>Ping pong test -- no call yet.</em>")
element.call_in_to_the_widget = function (argument1, argument2) {
element.html("<b> Call in sent " + argument1 + " and " + argument2 + "</b>")
call_back_to_python(argument1, argument2);
}
element.validate = validate;
""", call_back_to_python=pingpong_python_fn, validate=validate_pingpong)
#widget_validator_list.append([pingpong, validate_pingpong])
validators.add_validation(pingpong, validate_pingpong)
#pingpong.debugging_display()
pingpong
# call in to javascript
pingpong.element.call_in_to_the_widget("testing", 123)
# call in to javascript and back to python to validate
pingpong.element.validate()
if mainloop_check:
validate_pingpong()
```
# roundtrip: datatype round trip
Test that values can be passed in to the proxy widget and back out again.
```
binary = bytearray(b"\x12\xff binary bytes")
string_value = "just a string"
int_value = -123
float_value = 45.6
json_dictionary = {"keys": None, "must": 321, "be": [6, 12], "strings": "values", "can": ["be", "any json"]}
list_value = [9, string_value, json_dictionary]
roundtrip_got_values = []
from jp_proxy_widget import hex_codec
from pprint import pprint
def get_values_back(binary, string_value, int_value, float_value, json_dictionary, list_value):
# NOTE: binary values must be converted explicitly from hex string encoding!
binary = hex_codec.hex_to_bytearray(binary)
roundtrip_got_values[:] = [binary, string_value, int_value, float_value, json_dictionary, list_value]
print ("GOT VALUES BACK")
pprint(roundtrip_got_values)
roundtrip_names = "binary string_value int_value float_value json_dictionary list_value".split()
def validate_roundtrip():
#assert roundtrip_got_values == [string_value, int_value, float_value, json_dictionary, list_value]
expected_values = [binary, string_value, int_value, float_value, json_dictionary, list_value]
if len(expected_values) != len(roundtrip_got_values):
print ("bad lengths", len(expected_values), len(roundtrip_got_values))
pprint(expected_values)
pprint(roundtrip_got_values)
assert len(expected_values) == len(roundtrip_got_values)
for (name, got, expected) in zip(roundtrip_names, roundtrip_got_values, expected_values):
if (got != expected):
print(name, "BAD MATCH got")
pprint(got)
print(" ... expected")
pprint(expected)
assert got == expected, "values don't match: " + repr((name, got, expected))
print ("roundtrip values match!")
roundtrip = jp_proxy_widget.JSProxyWidget()
roundtrip.js_init(r"""
element.all_values = [binary, string_value, int_value, float_value, json_dictionary, list_value];
html = ["<pre> Binary values sent as bytearrays appear in Javascript as Uint8Arrays"]
for (var i=0; i<names.length; i++) {
html.push(names[i]);
var v = element.all_values[i];
if (v instanceof Uint8Array) {
html.push(" Uint8Array")
} else {
html.push(" type: " + (typeof v))
}
html.push(" value: " + v);
}
html.push("</pre>");
element.html(html.join("\n"));
// send the values back
callback(binary, string_value, int_value, float_value, json_dictionary, list_value);
""",
binary=binary,
string_value=string_value,
int_value=int_value,
float_value=float_value,
json_dictionary=json_dictionary,
list_value=list_value,
names=roundtrip_names,
callback=get_values_back,
# NOTE: must up the callable level!
callable_level=4
)
roundtrip.debugging_display()
validators.add_validation(roundtrip, validate_roundtrip)
if mainloop_check:
validate_roundtrip()
#validate_roundtrip()
```
# loadCSS -- test load of simple CSS file.
We want to load this css file
```
from jp_proxy_widget import js_context
style_fn="js/simple.css"
print(js_context.get_text_from_file_name(style_fn))
loadCSS = jp_proxy_widget.JSProxyWidget()
# load the file
loadCSS.load_css(style_fn)
# callback for storing the styled element color
loadCSSstyle = {}
def color_callback(color):
loadCSSstyle["color"] = color
# initialize the element using the style and callback to report the color.
loadCSS.js_init("""
element.html('<div><em class="random-style-for-testing" id="loadCSSelement">Styled widget element.</em></div>')
var e = document.getElementById("loadCSSelement");
var style = window.getComputedStyle(e);
color_callback(style["color"]);
""", color_callback=color_callback)
def validate_loadCSS():
expect = 'rgb(216, 50, 61)'
assert expect == loadCSSstyle["color"], repr((expect, loadCSSstyle))
print ("Loaded CSS color is correct!")
loadCSS
validators.add_validation(loadCSS, validate_loadCSS)
if mainloop_check:
validate_loadCSS()
```
# loadJS -- load a javascript file (once only per interpreter)
We want to load this javascript file:
```
js_fn="js/simple.js"
print(js_context.get_text_from_file_name(js_fn))
loadJS = jp_proxy_widget.JSProxyWidget()
# load the file
loadJS.load_js_files([js_fn], force=True)
# callback for storing the styled element color
loadJSinfo = {}
def answer_callback(answer):
loadJSinfo["answer"] = answer
loadJS.js_init("""
element.html('<b>The answer is ' + window.the_answer + '</b>')
answer_callback(window.the_answer);
""", answer_callback=answer_callback, js_fn=js_fn)
def validate_loadJS():
expect = 42
assert expect == loadJSinfo["answer"], repr((expect, loadJSinfo))
print ("Loaded JS value is correct!")
loadJS
validators.add_validation(loadJS, validate_loadJS)
if mainloop_check:
validate_loadJS()
loadJS.print_status()
delay_ms = 1000
validators.run_all_in_widget(delay_ms=delay_ms)
```
| github_jupyter |
# MNLI Diagnostic Example
## Setup
#### Install dependencies
```
%%capture
!git clone https://github.com/jiant-dev/jiant.git
%%capture
# This Colab notebook already has its CUDA-runtime compatible versions of torch and torchvision installed
!sed -e /"torch==1.5.0"/d -i jiant/requirements.txt
!sed -e /"torchvision==0.6.0"/d -i jiant/requirements.txt
!pip install -r jiant/requirements.txt
```
#### Download data
```
%%capture
# Download/preprocess MNLI and RTE data
!wget https://raw.githubusercontent.com/huggingface/transformers/master/utils/download_glue_data.py
!python download_glue_data.py \
--data_dir ./raw_data \
--tasks "MNLI,diagnostic"
!PYTHONPATH=/content/jiant python jiant/jiant/scripts/preproc/export_glue_data.py \
--input_base_path=./raw_data \
--output_base_path=./tasks/ \
--task_name_ls "mnli,glue_diagnostic"
```
## `jiant` Pipeline
```
import sys
sys.path.insert(0, "/content/jiant")
import jiant.proj.main.tokenize_and_cache as tokenize_and_cache
import jiant.proj.main.export_model as export_model
import jiant.proj.main.scripts.configurator as configurator
import jiant.proj.main.runscript as main_runscript
import jiant.shared.caching as caching
import jiant.utils.python.io as py_io
import jiant.utils.display as display
import os
import torch
```
#### Task config
```
# Write MNLI task config
py_io.write_json({
"task": "mnli",
"name": "mnli",
"paths": {
"train": "/content/tasks/data/mnli/train.jsonl",
"val": "/content/tasks/data/mnli/val.jsonl",
},
}, path="./tasks/configs/mnli_config.json")
# Write MNLI-mismatched task config
py_io.write_json({
"task": "mnli",
"name": "mnli_mismatched",
"paths": {
"val": "/content/tasks/data/mnli/val_mismatched.jsonl",
},
}, path="./tasks/configs/mnli_mismatched_config.json")
# Write GLUE diagnostic task config
py_io.write_json({
"task": "glue_diagnostics",
"name": "glue_diagnostics",
"paths": {
"test": "/content/tasks/data/glue_diagnostics/test.jsonl",
},
}, path="./tasks/configs/glue_diagnostics_config.json")
```
#### Download model
```
export_model.lookup_and_export_model(
model_type="roberta-base",
output_base_path="./models/roberta-base",
)
```
#### Tokenize and cache
```
# Tokenize and cache each task
tokenize_and_cache.main(tokenize_and_cache.RunConfiguration(
task_config_path=f"./tasks/configs/mnli_config.json",
model_type="roberta-base",
model_tokenizer_path="./models/roberta-base/tokenizer",
output_dir=f"./cache/mnli",
phases=["train", "val"],
))
tokenize_and_cache.main(tokenize_and_cache.RunConfiguration(
task_config_path=f"./tasks/configs/mnli_mismatched_config.json",
model_type="roberta-base",
model_tokenizer_path="./models/roberta-base/tokenizer",
output_dir=f"./cache/mnli_mismatched",
phases=["val"],
))
tokenize_and_cache.main(tokenize_and_cache.RunConfiguration(
task_config_path=f"./tasks/configs/glue_diagnostics_config.json",
model_type="roberta-base",
model_tokenizer_path="./models/roberta-base/tokenizer",
output_dir=f"./cache/glue_diagnostics",
phases=["test"],
))
row = caching.ChunkedFilesDataCache("./cache/mnli/train").load_chunk(0)[0]["data_row"]
print(row.input_ids)
print(row.tokens)
row = caching.ChunkedFilesDataCache("./cache/mnli_mismatched/val").load_chunk(0)[0]["data_row"]
print(row.input_ids)
print(row.tokens)
row = caching.ChunkedFilesDataCache("./cache/glue_diagnostics/test").load_chunk(0)[0]["data_row"]
print(row.input_ids)
print(row.tokens)
```
#### Writing a run config
```
jiant_run_config = configurator.SimpleAPIMultiTaskConfigurator(
task_config_base_path="./tasks/configs",
task_cache_base_path="./cache",
train_task_name_list=["mnli"],
val_task_name_list=["mnli", "mnli_mismatched"],
test_task_name_list=["glue_diagnostics"],
train_batch_size=8,
eval_batch_size=16,
epochs=0.1,
num_gpus=1,
).create_config()
display.show_json(jiant_run_config)
```
Configure all three tasks to use an `mnli` head.
```
jiant_run_config["taskmodels_config"]["task_to_taskmodel_map"] = {
"mnli": "mnli",
"mnli_mismatched": "mnli",
"glue_diagnostics": "glue_diagnostics",
}
os.makedirs("./run_configs/", exist_ok=True)
py_io.write_json(jiant_run_config, "./run_configs/jiant_run_config.json")
```
#### Start training
```
run_args = main_runscript.RunConfiguration(
jiant_task_container_config_path="./run_configs/jiant_run_config.json",
output_dir="./runs/run1",
model_type="roberta-base",
model_path="./models/roberta-base/model/roberta-base.p",
model_config_path="./models/roberta-base/model/roberta-base.json",
model_tokenizer_path="./models/roberta-base/tokenizer",
learning_rate=1e-5,
eval_every_steps=500,
do_train=True,
do_val=True,
do_save=True,
write_test_preds=True,
force_overwrite=True,
)
main_runscript.run_loop(run_args)
test_preds = torch.load("./runs/run1/test_preds.p")
test_preds["glue_diagnostics"]
```
| github_jupyter |
# MNIST Digit Classification Neural Network
---
A neural network is a system of interconnected nodes, or artificial neurons that perform some task by learning from a dataset and incrementally improving its own performance. These artificial neurons are organised into multiple layers including an input layer, where data is fed forward through the network's successive layers, until it produces some output in the final layer.
Networks "learn" by analyzing a dataset of training inputs, where each training example is classified by a label. Through a process called backpropagation, the network adjusts the "weights" connecting each neuron (which can be thought of as the synapses connecting neurons in a human brain) based on how close the output produced from traning examples, which classifies each training example, is to the actual classification of those examples. Biases for each neuron are also updated accordingly.
### The MNIST Dataset
This project produces a neural network that classifies images of handwritten digits ranged from 0-9. These images are gathered from the MNIST database - a large set of images of handwritten digits commonly used for training neural networks like this one. This is my first attempt at building a neural network from scratch and I plan to continually update this project as I improve my code.
Each image is input as a 784-dimensional vector, with each vector component representing the greyscale value of a pixel in the image. The network has one hidden layer composed of 25 neurons and a final output layer of 10 neurons. Output in the network can be viewed as the "activation" of these output neurons, or the degree to which a neuron is affected by the input of the system. For example, with an input representing the digit 0, the output neuron of index 0 (so, the first neuron) would have a higher value (or activation) associated with it, while other neurons would have comparably lower activations.
Here are some other important features about my network:
- It uses the sigmoid activation function
- The number of epochs (a mini-batch of 100 training examples) and learning rates can be cusomised. These values are set to 800 and 1 by default.
- Currently, my network has an average training accuracy of 85%.
---
The following code implements my neural network
```
import numpy as np
import math
# Sigmoid activation function returns a value between 0 and 1
# based on the degree to which the input varies from 0
def sigmoid(x):
if x.size == 1:
return 1 / (1 + math.exp(-x))
else:
return np.array([(1 / (1 + math.exp(-i))) for i in x])
def sigmoid_derivative(x):
if x.size == 1:
return math.exp(-x) / ((1 + math.exp(-x))**2)
else:
return np.array([((math.exp(-i))/(1 + math.exp(-i))**2) for i in x])
class NNetwork:
# The network is initialised with the training and testing sets as input
def __init__(self, X_train, Y_train, X_test, Y_test):
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.input = np.zeros(784)
self.output = np.zeros(10)
self.y = np.zeros(10)
# Weights and biases are initialised as random values between -1 and 1
self.weights2 = np.random.uniform(low=-1.0, high=1.0, size=(25,784))
self.weights3 = np.random.uniform(low=-1.0, high=1.0, size=(10,25))
self.bias2 = np.random.uniform(low=-1.0, high=1.0, size=25)
self.bias3 = np.random.uniform(low=-1.0, high=1.0, size=10)
def train(self, epochs, lr):
for i in range(epochs):
d_weights2 = np.zeros(self.weights2.shape)
d_weights3 = np.zeros(self.weights3.shape)
d_bias2 = np.zeros(self.bias2.shape)
d_bias3 = np.zeros(self.bias3.shape)
for j in range(100):
self.input = self.X_train[(i * 100) + j,:]
self.y[self.Y_train[(i * 100) + j]] = 1
self.feedforward()
updates = self.backprop() # The gradient of the cost function
d_weights2 += updates[0]
d_weights3 += updates[1]
d_bias2 += updates[2]
d_bias3 += updates[3]
self.y = np.zeros(10)
d_weights2 /= 100
d_weights3 /= 100
d_bias2 /= 100
d_bias3 /= 100
# The average negative value of the change in the cost with respect to the change
# in each weight & bias for 100 training examples is calculated and added to the
# current value of each weight and bias
self.weights2 += -1 * lr * d_weights2
self.weights3 += -1 * lr * d_weights3
self.bias2 += -1 * lr * d_bias2
self.bias3 += -1 * lr * d_bias3
print("Training complete!")
# This function classifies a single image
def classify(self, x):
self.input = x
self.feedforward()
return np.argmax(self.output)
def test(self):
acc = 0
for i in range(10000):
x = X_test[i,:]
y = Y_test[i]
yHAT = self.classify(x)
if y == yHAT:
acc += 1
print("Testing accuracy: " + str((acc / 10000) * 100) + "%")
# This function uses the sigmoid activation function to
# feed an input forward, producing the values of the neurons
# in the second layer and the final layer
def feedforward(self):
self.layer2 = sigmoid(np.dot(self.input, self.weights2.T) + self.bias2)
self.output = sigmoid(np.dot(self.layer2, self.weights3.T) + self.bias3)
# This function calculates the gradient of the cost function, where each
# component of the cost gradient is associated with a single weight or bias
def backprop(self):
d_weights2 = np.zeros(self.weights2.shape)
d_weights3 = np.zeros(self.weights3.shape)
d_bias2 = np.zeros(self.bias2.shape)
d_bias3 = np.zeros(self.bias3.shape)
d_weights2 = self.input * (sigmoid_derivative(np.dot(self.input, self.weights2.T) + self.bias2)[:, np.newaxis] * np.sum((self.weights3.T * (sigmoid_derivative(np.dot(self.layer2, self.weights3.T) + self.bias3)) * 2 * (self.output - self.y)), axis=1)[:, np.newaxis])
d_weights3 = np.tile(self.layer2,(10,1)) * sigmoid_derivative(np.dot(self.layer2, self.weights3.T) + self.bias3)[:, np.newaxis] * (2 * (self.output - self.y))[:, np.newaxis]
d_bias2 = sigmoid_derivative(np.dot(self.input, self.weights2.T) + self.bias2) * (d_bias2 + np.sum((self.weights3.T * (sigmoid_derivative(np.dot(self.layer2, self.weights3.T) + self.bias3)) * 2 * (self.output - self.y)), axis=1))
d_bias3 = sigmoid_derivative(np.dot(self.layer2, self.weights3.T) + self.bias3) * (d_bias3 + 2 * (self.output - self.y))
return d_weights2, d_weights3, d_bias2, d_bias3
```
The following code downloads the mnist dataset and converts it to input for the network. This code is based on hsjeong5's github project [MNIST-for-Numpy](https://github.com/hsjeong5/MNIST-for-Numpy).
```
import mnist
mnist.init()
X_train, Y_train, X_test, Y_test = mnist.load()
X_train = X_train / 255
X_test = X_test / 255
```
The following code uses the above input data to train & test the accuracy of a neural network
```
network = NNetwork(X_train, Y_train, X_test, Y_test)
network.train(600, 1)
network.test()
```
Run the code below to test my network on three random images
```
import matplotlib.pyplot as plt
imgIndex = np.random.randint(low=0, high=10000, size=4)
fig = plt.figure(figsize=(8,6))
fig.subplots_adjust(wspace=0.3, hspace=0.3)
ax1 = fig.add_subplot(221)
ax1.set_title("Testing image index " + str(imgIndex[0]))
plt.imshow(X_test[imgIndex[0]].reshape(28, 28), cmap='gray')
ax2 = fig.add_subplot(222)
ax2.set_title("Testing image index " + str(imgIndex[1]))
plt.imshow(X_test[imgIndex[1]].reshape(28, 28), cmap='gray')
ax3 = fig.add_subplot(223)
ax3.set_title("Testing image index " + str(imgIndex[2]))
plt.imshow(X_test[imgIndex[2]].reshape(28, 28), cmap='gray')
ax4 = fig.add_subplot(224)
ax4.set_title("Testing image index " + str(imgIndex[3]))
plt.imshow(X_test[imgIndex[3]].reshape(28, 28), cmap='gray')
print("Image " + str(imgIndex[0]) + " value: " + str(Y_test[imgIndex[0]]) + " Classified by network as: " + str(network.classify(X_test[imgIndex[0],:])))
print("Image " + str(imgIndex[1]) + " value: " + str(Y_test[imgIndex[1]]) + " Classified by network as: " + str(network.classify(X_test[imgIndex[1],:])))
print("Image " + str(imgIndex[2]) + " value: " + str(Y_test[imgIndex[2]]) + " Classified by network as: " + str(network.classify(X_test[imgIndex[2],:])))
print("Image " + str(imgIndex[3]) + " value: " + str(Y_test[imgIndex[3]]) + " Classified by network as: " + str(network.classify(X_test[imgIndex[3],:])))
print()
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import dask.dataframe as ddf
import dask
import tensorflow as tf
```
### Load NYC Taxi fare prepped data
```
train_df = (pd
.read_parquet("../../datasets/kaggle/new-york-city-taxi-fare-prediction/train_full_2")
.sample(frac=0.80, random_state=42)
)
print(train_df.shape)
print(train_df.columns)
print(train_df.info())
cols = [
"distance_miles",
"passenger_count",
"is_pickup_JFK_new",
"is_dropoff_JFK_new",
#"is_pickup_EWR_new",
"is_dropoff_EWR_new",
#"is_pickup_LGA_new",
#"is_dropoff_LGA_new",
#"is_to_from_JFK_new",
"distance_to_center",
"distance_to_center_dropoff",
"year_2009",
"year_2010",
"year_2011",
"year_2012",
"year_2013",
"year_2014",
"year_2015",
#"hour_period_0",
#"hour_period_1",
#"hour_period_2",
#"hour_period_3",
"hour_period_4",
#"hour_period_5",
"pickup_zone_0",
"pickup_zone_1",
"pickup_zone_2",
"pickup_zone_3",
"pickup_zone_4",
"pickup_zone_5",
"pickup_zone_6",
"dropoff_zone_0",
"dropoff_zone_1",
"dropoff_zone_2",
"dropoff_zone_3",
"dropoff_zone_4",
"dropoff_zone_5",
"dropoff_zone_6",
]
x_train = train_df[cols].values
y_train = train_df[['fare_amount']].values
import gc
del train_df
gc.collect()
#scaler = preprocessing.StandardScaler()
scaler = preprocessing.MinMaxScaler()
x_train_norm = scaler.fit_transform(x_train)
del x_train
gc.collect()
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train_norm).shape[1:])]
from datetime import datetime
print(datetime.now())
global_step = tf.Variable(0, trainable=False)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train_norm}, y=y_train, batch_size=10000, num_epochs=5, shuffle=True)
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[1000, 250, 60, 15],
optimizer=tf.train.AdamOptimizer(learning_rate=0.0005, beta1=0.9, beta2=0.999, epsilon=0.1),
batch_norm=True)
regressor.train(input_fn=train_input_fn)
print(datetime.now())
val_df = (pd
.read_parquet("../../datasets/kaggle/new-york-city-taxi-fare-prediction/train_full_2")
.sample(frac=0.20, random_state=420)
)
print(val_df.shape)
print(val_df.columns)
print(val_df.info())
x_val = val_df[cols].values
y_val = val_df[['fare_amount']].values
del val_df
gc.collect()
x_val_norm = scaler.transform(x_val)
del x_val
gc.collect()
val_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_val_norm}, y=y_val, num_epochs=1, shuffle=False)
scores = regressor.evaluate(input_fn=val_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
predictions = regressor.predict(input_fn=val_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_val).shape)
score_sklearn = metrics.mean_squared_error(y_predicted, y_val)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
y_predicted.shape
y_predicted[0:2]
test_df = (pd
.read_parquet("../../datasets/kaggle/new-york-city-taxi-fare-prediction/test_full_2")
)
x_test = test_df[cols].values
x_test_norm = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_test_norm}, num_epochs=1, shuffle=False)
test_predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in test_predictions))
y_predicted = y_predicted.reshape((9914,1))
#y_predicted = np.array(list(test_predictions))
#y_predicted = y_predicted.reshape((9914,1))
y_predicted.shape
preds = [p[0] for p in y_predicted]
preds
# Write the predictions to a CSV file which we can submit to the competition.
submission = pd.DataFrame(
{'key': test_df.key, 'fare_amount': preds},
columns = ['key', 'fare_amount'])
submission.to_csv('../../datasets/kaggle/new-york-city-taxi-fare-prediction/submission.csv', index = False)
submission.describe()
```
| github_jupyter |
## 範例重點
* 學習如何在 keras 中加入 EarlyStop
* 知道如何設定監控目標
* 比較有無 earlystopping 對 validation 的影響
```
import os
from tensorflow import keras
# 本範例不需使用 GPU, 將 GPU 設定為 "無"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train, test = keras.datasets.cifar10.load_data()
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
x_train, y_train = train
x_test, y_test = test
# 資料前處理 - X 標準化
x_train = preproc_x(x_train)
x_test = preproc_x(x_test)
# 資料前處理 -Y 轉成 onehot
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
from tensorflow.keras.layers import BatchNormalization
"""
建立神經網路,並加入 BN layer
"""
def build_mlp(input_shape, output_units=10, num_neurons=[256, 128, 64]):
input_layer = keras.layers.Input(input_shape)
for i, n_units in enumerate(num_neurons):
if i == 0:
x = keras.layers.Dense(units=n_units,
activation="relu",
name="hidden_layer"+str(i+1))(input_layer)
x = BatchNormalization()(x)
else:
x = keras.layers.Dense(units=n_units,
activation="relu",
name="hidden_layer"+str(i+1))(x)
x = BatchNormalization()(x)
out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x)
model = keras.models.Model(inputs=[input_layer], outputs=[out])
return model
## 超參數設定
LEARNING_RATE = 1e-3
EPOCHS = 50
BATCH_SIZE = 1024
MOMENTUM = 0.95
"""
# 載入 Callbacks, 並將 monitor 設定為監控 validation loss
"""
from tensorflow.keras.callbacks import EarlyStopping
earlystop = EarlyStopping(monitor="val_loss",
patience=5,
verbose=1
)
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
optimizer = keras.optimizers.SGD(lr=LEARNING_RATE, nesterov=True, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=[earlystop]
)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["accuracy"]
valid_acc = model.history.history["val_accuracy"]
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(range(len(train_loss)), train_loss, label="train loss")
plt.plot(range(len(valid_loss)), valid_loss, label="valid loss")
plt.legend()
plt.title("Loss")
plt.show()
plt.plot(range(len(train_acc)), train_acc, label="train accuracy")
plt.plot(range(len(valid_acc)), valid_acc, label="valid accuracy")
plt.legend()
plt.title("Accuracy")
plt.show()
```
## Work
1. 試改變 monitor "Validation Accuracy" 並比較結果
2. 調整 earlystop 的等待次數至 10, 25 並比較結果
| github_jupyter |
# Automatic Differentiation
:label:`chapter_autograd`
In machine learning, we *train* models, updating them successively
so that they get better and better as they see more and more data.
Usually, *getting better* means minimizing a *loss function*,
a score that answers the question "how *bad* is our model?"
This question is more subtle than it appears.
Ultimately, what we really care about
is producing a model that performs well on data
that we have never seen before.
But we can only fit the model to data that we can actually see.
Thus we can decompose the task of fitting models into two key concerns:
*optimization* the process of fitting our models to observed data
and *generalization* the mathematical principles and practitioners wisdom
that guide as to how to produce models whose validity extends
beyond the exact set of datapoints used to train it.
This section addresses the calculation of derivatives,
a crucial step in nearly all deep learning optimization algorithms.
With neural networks, we typically choose loss functions
that are differentiable with respect to our model's parameters.
Put simply, this means that for each parameter,
we can determine how rapidly the loss would increase or decrease,
were we to *increase* or *decrease* that parameter
by an infinitessimally small amount.
While the calculations for taking these derivatives are straightforward,
requiring only some basic calculus,
for complex models, working out the updates by hand
can be a pain (and often error-prone).
The autograd package expedites this work
by automatically calculating derivatives.
And while many other libraries require
that we compile a symbolic graph to take automatic derivatives,
`autograd` allows us to take derivatives
while writing ordinary imperative code.
Every time we pass data through our model,
`autograd` builds a graph on the fly,
tracking which data combined through
which operations to produce the output.
This graph enables `autograd`
to subsequently backpropagate gradients on command.
Here, *backpropagate* simply means to trace through the compute graph,
filling in the partial derivatives with respect to each parameter.
If you are unfamiliar with some of the math,
e.g., gradients, please refer to :numref:`chapter_math`.
```
from mxnet import autograd, np, npx
npx.set_np()
```
## A Simple Example
As a toy example, say that we are interested
in differentiating the mapping
$y = 2\mathbf{x}^{\top}\mathbf{x}$
with respect to the column vector $\mathbf{x}$.
To start, let's create the variable `x` and assign it an initial value.
```
x = np.arange(4)
x
```
Note that before we even calculate the gradient
of ``y`` with respect to ``x``,
we will need a place to store it.
It's important that we do not allocate new memory
every time we take a derivative with respect to a parameter
because we will often update the same parameters
thousands or millions of times
and could quickly run out of memory.
Note also that a gradient with respect to a vector $x$
is itself vector-valued and has the same shape as $x$.
Thus it is intuitive that in code,
we will access a gradient taken with respect to `x`
as an attribute the `ndarray` `x` itself.
We allocate memory for an `ndarray`'s gradient
by invoking its ``attach_grad()`` method.
```
x.attach_grad()
```
After we calculate a gradient taken with respect to `x`,
we will be able to access it via the `.grad` attribute.
As a safe default, `x.grad` initializes as an array containing all zeros.
That's sensible because our most common use case
for taking gradient in deep learning is to subsequently
update parameters by adding (or subtracting) the gradient
to maximize (or minimize) the differentiated function.
By initializing the gradient to $\mathbf{0}$,
we ensure that any update accidentally exectuted
before a gradient has actually been calculated
will not alter the variable's value.
```
x.grad
```
Now let's calculate ``y``.
Because we wish to subsequently calculate gradients
we want MXNet to generate a computation graph on the fly.
We could imagine that MXNet would be turning on a recording device
to capture the exact path by which each variable is generated.
Note that building the computation graph
requires a nontrivial amount of computation.
So MXNet will only build the graph when explicitly told to do so.
We can invoke this behavior by placing our code
inside a ``with autograd.record():`` block.
```
with autograd.record():
y = 2.0 * np.dot(x, x)
y
```
Since `x` is an `ndarray` of length 4,
`np.dot` will perform an inner product of `x` and `x`,
yielding the scalar output that we assign to `y`.
Next, we can automatically calculate the gradient of `y`
with respect to each component of `x`
by calling `y`'s `backward` function.
```
y.backward()
```
If we recheck the value of `x.grad`, we will find its contents overwritten by the newly calculated gradient.
```
x.grad
```
The gradient of the function $y = 2\mathbf{x}^{\top}\mathbf{x}$
with respect to $\mathbf{x}$ should be $4\mathbf{x}$.
Let's quickly verify that our desired gradient was calculated correctly.
If the two `ndarray`s are indeed the same,
then their difference should consist of all zeros.
```
x.grad - 4 * x
```
If we subsequently compute the gradient of another variable
whose value was calculated as a function of `x`,
the contents of `x.grad` will be overwritten.
```
with autograd.record():
y = x.sum()
y.backward()
x.grad
```
## Backward for Non-scalar Variable
Technically, when `y` is not a scalar,
the most natural interpretation of the gradient of `y` (a vector of length $m$)
with respect to `x` (a vector of length $n$) is the Jacobian (an $m\times n$ matrix).
For higher-order and higher-dimensional $y$ and $x$,
the Jacobian could be a gnarly high order tensor
and complex to compute (refer to :numref:`chapter_math`).
However, while these more exotic objects do show up
in advanced machine learning (including in deep learning),
more often when we are calling backward on a vector,
we are trying to calculate the derivatives of the loss functions
for each constitutent of a *batch* of training examples.
Here, our intent is not to calculate the Jacobian
but rather the sum of the partial derivatives
computed individuall for each example in the batch.
Thus when we invoke backwards on a vector-valued variable,
MXNet assumes that we want the sum of the gradients.
In short, MXNet, will create a new scalar variable
by summing the elements in `y`,
and compute the gradient of that variable with respect to `x`.
```
with autograd.record(): # y is a vector
y = x * x
y.backward()
u = x.copy()
u.attach_grad()
with autograd.record(): # v is scalar
v = (u * u).sum()
v.backward()
x.grad - u.grad
```
## Advanced Autograd
Already you know enough to employ `autograd` and `ndarray`
successfully to develop many practical models.
While the rest of this section is not necessary just yet,
we touch on a few advanced topics for completeness.
### Detach Computations
Sometimes, we wish to move some calculations
outside of the recorded computation graph.
For example, say that `y` was calculated as a function of `x`.
And that subsequently `z` was calcatated a function of both `y` and `x`.
Now, imagine that we wanted to calculate
the gradient of `z` with respect to `x`,
but wanted for some reason to treat `y` as a constant,
and only take into account the role
that `x` played after `y` was calculated.
Here, we can call `u = y.detach()` to return a new variable
that has the same values as `y` but discards any information
about how `u` was computed.
In other words, the gradient will not flow backwards through `u` to `x`.
This will provide the same functionality as if we had
calculated `u` as a function of `x` outside of the `autograd.record` scope,
yielding a `u` that will be treated as a constant in any called to `backward`.
The following backward computes $\partial (u \odot x)/\partial x$
instead of $\partial (x \odot x \odot x) /\partial x$,
where $\odot$ stands for element-wise multiplication.
```
with autograd.record():
y = x * x
u = y.detach()
z = u * x
z.backward()
x.grad - u
```
Since the computation of $y$ was recorded,
we can subsequently call `y.backward()` to get $\partial y/\partial x = 2x$.
```
y.backward()
x.grad - 2*x
```
## Attach Gradients to Internal Variables
Attaching gradients to a variable `x` implicitly calls `x=x.detach()`.
If `x` is computed based on other variables,
this part of computation will not be used in the backward function.
```
y = np.ones(4) * 2
y.attach_grad()
with autograd.record():
u = x * y
u.attach_grad() # implicitly run u = u.detach()
z = u + x
z.backward()
print(x.grad, '\n', u.grad, '\n', y.grad)
```
## Head gradients
Detaching allows to breaks the computation into several parts. We could use chain rule :numref:`chapter_math` to compute the gradient for the whole computation. Assume $u = f(x)$ and $z = g(u)$, by chain rule we have $\frac{dz}{dx} = \frac{dz}{du} \frac{du}{dx}.$ To compute $\frac{dz}{du}$, we can first detach $u$ from the computation and then call `z.backward()` to compute the first term.
```
y = np.ones(4) * 2
y.attach_grad()
with autograd.record():
u = x * y
v = u.detach() # u still keeps the computation graph
v.attach_grad()
z = v + x
z.backward()
print(x.grad, '\n', y.grad)
```
Subsequently, we can call `u.backward()` to compute the second term,
but pass the first term as the head gradients to multiply both terms
so that `x.grad` will contains $\frac{dz}{dx}$ instead of $\frac{du}{dx}$.
```
u.backward(v.grad)
print(x.grad, '\n', y.grad)
```
## Computing the Gradient of Python Control Flow
One benefit of using automatic differentiation
is that even if building the computational graph of a function
required passing through a maze of Python control flow
(e.g. conditionals, loops, and arbitrary function calls),
we can still calculate the gradient of the resulting variable.
In the following snippet, note that
the number of iterations of the `while` loop
and the evaluation of the `if` statement
both depend on the value of the input `b`.
```
def f(a):
b = a * 2
while np.abs(b).sum() < 1000:
b = b * 2
if b.sum() > 0:
c = b
else:
c = 100 * b
return c
```
Again to compute gradients, we just need to `record` the calculation
and then call the `backward` function.
```
a = np.random.normal()
a.attach_grad()
with autograd.record():
d = f(a)
d.backward()
```
We can now analyze the `f` function defined above.
Note that it is piecewise linear in its input `a`.
In other words, for any `a` there exists some constant
such that for a given range `f(a) = g * a`.
Consequently `d / a` allows us to verify that the gradient is correct:
```
print(a.grad == (d / a))
```
## Training Mode and Prediction Mode
As we have seen, after we call `autograd.record`,
MXNet logs the operations in the following block.
There is one more subtle detail to be aware of.
Additionally, `autograd.record` will change
the running mode from *prediction* mode to *training* mode.
We can verify this behavior by calling the `is_training` function.
```
print(autograd.is_training())
with autograd.record():
print(autograd.is_training())
```
When we get to complicated deep learning models,
we will encounter some algorithms where the model
behaves differently during training and
when we subsequently use it to make predictions.
The popular neural network techniques *dropout* :numref:`chapter_dropout`
and *batch normalization* :numref:`chapter_batch_norm`
both exhibit this characteristic.
In other cases, our models may store auxiliary variables in *training* mode
for purposes of make computing gradients easier
that are not necessary at prediction time.
We will cover these differences in detail in later chapters.
## Summary
* MXNet provides an `autograd` package to automate the calculation of derivatives. To use it, we first attach gradients to those variables with respect to which we desire partial derivartives. We then record the computation of our target value, executed its backward function, and access the resulting gradient via our variable's `grad` attribute.
* We can detach gradients and pass head gradients to the backward function to control the part of the computation will be used in the backward function.
* The running modes of MXNet include *training mode* and *prediction mode*. We can determine the running mode by calling `autograd.is_training()`.
## Exercises
1. Try to run `y.backward()` twice.
1. In the control flow example where we calculate the derivative of `d` with respect to `a`, what would happen if we changed the variable `a` to a random vector or matrix. At this point, the result of the calculation `f(a)` is no longer a scalar. What happens to the result? How do we analyze this?
1. Redesign an example of finding the gradient of the control flow. Run and analyze the result.
1. In a second-price auction (such as in eBay or in computational advertising), the winning bidder pays the second-highest price. Compute the gradient of the final price with respect to the winning bidder's bid using `autograd`. What does the result tell you about the mechanism? If you are curious to learn more about second-price auctions, check out this paper by [Edelman, Ostrovski and Schwartz, 2005](https://www.benedelman.org/publications/gsp-060801.pdf).
1. Why is the second derivative much more expensive to compute than the first derivative?
1. Derive the head gradient relationship for the chain rule. If you get stuck, use the ["Chain rule" article on Wikipedia](https://en.wikipedia.org/wiki/Chain_rule).
1. Assume $f(x) = \sin(x)$. Plot $f(x)$ and $\frac{df(x)}{dx}$ on a graph, where you computed the latter without any symbolic calculations, i.e. without exploiting that $f'(x) = \cos(x)$.
## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2318)

| github_jupyter |
## Rover Project Test Notebook
This notebook contains the functions from the lesson and provides the scaffolding you need to test out your mapping methods. The steps you need to complete in this notebook for the project are the following:
* First just run each of the cells in the notebook, examine the code and the results of each.
* Run the simulator in "Training Mode" and record some data. Note: the simulator may crash if you try to record a large (longer than a few minutes) dataset, but you don't need a ton of data, just some example images to work with.
* Change the data directory path (2 cells below) to be the directory where you saved data
* Test out the functions provided on your data
* Write new functions (or modify existing ones) to report and map out detections of obstacles and rock samples (yellow rocks)
* Populate the `process_image()` function with the appropriate steps/functions to go from a raw image to a worldmap.
* Run the cell that calls `process_image()` using `moviepy` functions to create video output
* Once you have mapping working, move on to modifying `perception.py` and `decision.py` to allow your rover to navigate and map in autonomous mode!
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
**Run the next cell to get code highlighting in the markdown cells.**
```
%%HTML
<style> code {background-color : orange !important;} </style>
import sys
print(sys.executable)
%matplotlib inline
#%matplotlib qt # Choose %matplotlib qt to plot to an interactive window (note it may show up behind your browser)
# Make some of the relevant imports
import cv2 # OpenCV for perspective transform
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import scipy.misc # For saving images as needed
import glob # For reading in a list of images from a folder
import imageio
imageio.plugins.ffmpeg.download()
```
## Quick Look at the Data
There's some example data provided in the `test_dataset` folder. This basic dataset is enough to get you up and running but if you want to hone your methods more carefully you should record some data of your own to sample various scenarios in the simulator.
Next, read in and display a random image from the `test_dataset` folder
```
path = '../training_dataset/IGM/IMG/*'
path = '/Users/aslanfeng/Documents/Lifelong Learning/robotics/RoboND-Rover-Project-master/training_dataset/IGM/IMG/*'
img_list = glob.glob(path)
# Grab a random image and display it
idx = np.random.randint(0, len(img_list)-1)
image = mpimg.imread(img_list[idx])
plt.imshow(image)
```
## Calibration Data
Read in and display example grid and rock sample calibration images. You'll use the grid for perspective transform and the rock image for creating a new color selection that identifies these samples of interest.
```
# In the simulator you can toggle on a grid on the ground for calibration
# You can also toggle on the rock samples with the 0 (zero) key.
# Here's an example of the grid and one of the rocks
example_grid = '../calibration_images/example_grid1.jpg'
example_rock = '../calibration_images/example_rock1.jpg'
grid_img = mpimg.imread(example_grid)
rock_img = mpimg.imread(example_rock)
fig = plt.figure(figsize=(12,3))
plt.subplot(121)
plt.imshow(grid_img)
plt.subplot(122)
plt.imshow(rock_img)
```
## Perspective Transform
Define the perspective transform function from the lesson and test it on an image.
```
# Define a function to perform a perspective transform
# I've used the example grid image above to choose source points for the
# grid cell in front of the rover (each grid cell is 1 square meter in the sim)
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
mask = cv2.warpPerspective(np.ones_like(img[:,:,0]), M, (img.shape[1], img.shape[0]))
return warped, mask
# Define calibration box in source (actual) and destination (desired) coordinates
# These source and destination points are defined to warp the image
# to a grid where each 10x10 pixel square represents 1 square meter
# The destination box will be 2*dst_size on each side
dst_size = 5
# Set a bottom offset to account for the fact that the bottom of the image
# is not the position of the rover but a bit in front of it
# this is just a rough guess, feel free to change it!
bottom_offset = 6
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
])
warped, mask = perspect_transform(grid_img, source, destination)
plt.imshow(mask, cmap='gray')
#scipy.misc.imsave('../output/warped_example.jpg', warped)
```
## Color Thresholding
Define the color thresholding function from the lesson and apply it to the warped image
**TODO:** Ultimately, you want your map to not just include navigable terrain but also obstacles and the positions of the rock samples you're searching for. Modify this function or write a new function that returns the pixel locations of obstacles (areas below the threshold) and rock samples (yellow rocks in calibration images), such that you can map these areas into world coordinates as well.
**Hints and Suggestion:**
* For obstacles you can just invert your color selection that you used to detect ground pixels, i.e., if you've decided that everything above the threshold is navigable terrain, then everthing below the threshold must be an obstacle!
* For rocks, think about imposing a lower and upper boundary in your color selection to be more specific about choosing colors. You can investigate the colors of the rocks (the RGB pixel values) in an interactive matplotlib window to get a feel for the appropriate threshold range (keep in mind you may want different ranges for each of R, G and B!). Feel free to get creative and even bring in functions from other libraries. Here's an example of [color selection](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html) using OpenCV.
* **Beware However:** if you start manipulating images with OpenCV, keep in mind that it defaults to `BGR` instead of `RGB` color space when reading/writing images, so things can get confusing.
```
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
threshed = color_thresh(warped)
plt.imshow(threshed, cmap='gray')
#scipy.misc.imsave('../output/warped_threshed.jpg', threshed*255)
```
## Coordinate Transformations
Define the functions used to do coordinate transforms and apply them to an image.
```
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Grab another random image
idx = np.random.randint(0, len(img_list)-1)
image = mpimg.imread(img_list[idx])
warped, mask = perspect_transform(image, source, destination)
threshed = color_thresh(warped)
# Calculate pixel values in rover-centric coords and distance/angle to all pixels
xpix, ypix = rover_coords(threshed)
dist, angles = to_polar_coords(xpix, ypix)
mean_dir = np.mean(angles)
# Do some plotting
fig = plt.figure(figsize=(12,9))
plt.subplot(221)
plt.imshow(image)
plt.subplot(222)
plt.imshow(warped)
plt.subplot(223)
plt.imshow(threshed, cmap='gray')
plt.subplot(224)
plt.plot(xpix, ypix, '.')
plt.ylim(-160, 160)
plt.xlim(0, 160)
arrow_length = 100
x_arrow = arrow_length * np.cos(mean_dir)
y_arrow = arrow_length * np.sin(mean_dir)
plt.arrow(0, 0, x_arrow, y_arrow, color='red', zorder=2, head_width=10, width=2)
def find_rocks(img, rgb_thresh=(110, 110, 50)):
color_select = np.zeros_like(img[:,:,0])
rockpixel = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] < rgb_thresh[2])
color_select[rockpixel] = 1
return color_select
rock_map = find_rocks(rock_img)
fig = plt.figure(figsize=(12, 3))
plt.subplot(121)
plt.imshow(rock_img)
plt.subplot(122)
plt.imshow(rock_map, cmap='gray')
```
## Read in saved data and ground truth map of the world
The next cell is all setup to read your saved data into a `pandas` dataframe. Here you'll also read in a "ground truth" map of the world, where white pixels (pixel value = 1) represent navigable terrain.
After that, we'll define a class to store telemetry data and pathnames to images. When you instantiate this class (`data = Databucket()`) you'll have a global variable called `data` that you can refer to for telemetry and map data within the `process_image()` function in the following cell.
```
# Import pandas and read in csv file as a dataframe
import pandas as pd
# Change the path below to your data directory
# If you are in a locale (e.g., Europe) that uses ',' as the decimal separator
# change the '.' to ','
path = '/Users/aslanfeng/Documents/Lifelong Learning/robotics/RoboND-Rover-Project-master/training_dataset/robot_log.csv'
df = pd.read_csv('../training_dataset/robot_log.csv', delimiter=';', decimal='.')
df = pd.read_csv(path, delimiter=';',decimal='.')
csv_img_list = df["Path"].tolist() # Create list of image pathnames
# Read in ground truth map and create a 3-channel image with it
ground_truth = mpimg.imread('../calibration_images/map_bw.png')
ground_truth_3d = np.dstack((ground_truth*0, ground_truth*255, ground_truth*0)).astype(np.float)
# Creating a class to be the data container
# Will read in saved data from csv file and populate this object
# Worldmap is instantiated as 200 x 200 grids corresponding
# to a 200m x 200m space (same size as the ground truth map: 200 x 200 pixels)
# This encompasses the full range of output position values in x and y from the sim
class Databucket():
def __init__(self):
self.images = csv_img_list
self.xpos = df["X_Position"].values
self.ypos = df["Y_Position"].values
self.yaw = df["Yaw"].values
self.count = 0 # This will be a running index
self.worldmap = np.zeros((200, 200, 3)).astype(np.float)
self.ground_truth = ground_truth_3d # Ground truth worldmap
# Instantiate a Databucket().. this will be a global variable/object
# that you can refer to in the process_image() function below
data = Databucket()
```
## Write a function to process stored images
Modify the `process_image()` function below by adding in the perception step processes (functions defined above) to perform image analysis and mapping. The following cell is all set up to use this `process_image()` function in conjunction with the `moviepy` video processing package to create a video from the images you saved taking data in the simulator.
In short, you will be passing individual images into `process_image()` and building up an image called `output_image` that will be stored as one frame of video. You can make a mosaic of the various steps of your analysis process and add text as you like (example provided below).
To start with, you can simply run the next three cells to see what happens, but then go ahead and modify them such that the output video demonstrates your mapping process. Feel free to get creative!
```
# Define a function to pass stored images to
# reading rover position and yaw angle from csv file
# This function will be used by moviepy to create an output video
def process_image(img):
# Example of how to use the Databucket() object defined above
# to print the current x, y and yaw values
# print(data.xpos[data.count], data.ypos[data.count], data.yaw[data.count])
# TODO:
# 1) Define source and destination points for perspective transform
# 2) Apply perspective transform
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
# 4) Convert thresholded image pixel values to rover-centric coords
# 5) Convert rover-centric pixel values to world coords
# 6) Update worldmap (to be displayed on right side of screen)
# Example: data.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# data.worldmap[rock_y_world, rock_x_world, 1] += 1
# data.worldmap[navigable_y_world, navigable_x_world, 2] += 1
warped, mask = perspect_transform(img, source, destination)
threshed = color_thresh(warped)
obs_map = np.absolute(np.float32(threshed) - 1) * mask
xpix, ypix = rover_coords(threshed)
world_size = data.worldmap.shape[0]
scale = 2 * dst_size
xpos = data.xpos[data.count]
ypos = data.ypos[data.count]
yaw = data.yaw[data.count]
x_world, y_world = pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale)
obsxpix, obsypix = rover_coords(obs_map)
obs_x_world, obs_y_world = pix_to_world(obsxpix, obsypix, xpos, ypos, yaw, world_size, scale)
data.worldmap[y_world, x_world, 2] = 255
data.worldmap[obs_y_world, obs_x_world, 0] = 255
nav_pix = data.worldmap[:,:,2] > 0
data.worldmap[nav_pix,0] = 0
rock_map = find_rocks(warped)
if rock_map.any():
rock_x, rock_y = rover_coords(rock_map)
rock_x_world, rock_y_world = pix_to_world(rock_x, rock_y, xpos, ypos, yaw, world_size, scale)
data.worldmap[rock_y_world, rock_x_world,:] = 255
# 7) Make a mosaic image, below is some example code
# First create a blank image (can be whatever shape you like)
output_image = np.zeros((img.shape[0] + data.worldmap.shape[0], img.shape[1]*2, 3))
# Next you can populate regions of the image with various output
# Here I'm putting the original image in the upper left hand corner
output_image[0:img.shape[0], 0:img.shape[1]] = img
# Let's create more images to add to the mosaic, first a warped image
# Add the warped image in the upper right hand corner
output_image[0:img.shape[0], img.shape[1]:] = warped
# Overlay worldmap with ground truth map
map_add = cv2.addWeighted(data.worldmap, 1, data.ground_truth, 0.5, 0)
# Flip map overlay so y-axis points upward and add to output_image
output_image[img.shape[0]:, 0:data.worldmap.shape[1]] = np.flipud(map_add)
# Then putting some text over the image
cv2.putText(output_image,"Populate this image with your analyses to make a video!", (20, 20),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
if data.count < len(data.images) - 1:
data.count += 1 # Keep track of the index in the Databucket()
return output_image
```
## Make a video from processed image data
Use the [moviepy](https://zulko.github.io/moviepy/) library to process images and create a video.
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from moviepy.editor import ImageSequenceClip
# Define pathname to save the output video
output = '../output/test_mapping.mp4'
data = Databucket() # Re-initialize data in case you're running this cell multiple times
clip = ImageSequenceClip(data.images, fps=60) # Note: output video will be sped up because
# recording rate in simulator is fps=25
new_clip = clip.fl_image(process_image) #NOTE: this function expects color images!!
%time new_clip.write_videofile(output, audio=False)
```
### This next cell should function as an inline video player
If this fails to render the video, try running the following cell (alternative video rendering method). You can also simply have a look at the saved mp4 in your `/output` folder
```
from IPython.display import HTML
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(output))
```
### Below is an alternative way to create a video in case the above cell did not work.
```
import io
import base64
video = io.open(output, 'r+b').read()
encoded_video = base64.b64encode(video)
HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded_video.decode('ascii')))
```
| github_jupyter |
# Binary Classifier on Single records
### Most basic example.
This notebook will show how to set-up learning features (i.e. fields we want to use for modeling) and read them from a CSV file. Then create a very simple feed-forward Neural Net to classify fraud vs. non-fraud, train the model and test it.
Throughout these notebooks we will not explain how Neural Nets work, which loss functions exists, how NN's are optimized, what stochastic gradient decent is etc... There are some excellent resources online which do this in great detail.
#### The math
One thing we will quickly do is recap what a FeedForward (aka Linear Layer) is, just to build some intuition and to contrast this with other layers which will be explained later.
Linear Layers are often presented as shown below on the left, basically as weights that connect one layer to the next. Each node in the second layer is a weighted sum of the input values to which a bias term is added. So for nodes $h_j$ for h=1->3; $h_j= (\sum_{i=1}^4 x_i*w_{ij})+\beta_{j}$.
If we look more in detail what happens on the right hand size we see that for a single node we effectively take the weighted sum, add a bias and then perform an activation function. The activation is needed so the model can learn non-linearity. If all NN's did was stacking linear operations, the end-result would be a linear combination of the input, ideally we would want models to learn more complex relations, non-linear activations enable that. In most DeepLearning cases the `ReLU` function is used for activation.
Taking the weighted sum on a large scale for multiple nodes in one go, is nothing more or less than taking matrix dot-product. Same for adding the bias, that is just a matrix element wise addition. Another way of looking at this is considering the input __I__ to be a vector of size (1,4) and the hidden layer __H__ a vector of size (1,3), if we set-up a weight matrix __W__ of size (4,3), and a bias vector __$\beta$__ of size (1,3) then $H = act(I \odot W + \beta)$. The dot product of the input array with the weight matrix plus a bias vector which is then 'activated'
The hidden layer would typically be connected to a next layer, and a next and a next... until we get to an output layer. The formula for the output $O$ would thus be something like; $O = act(act(act(I \odot W_1 + \beta_1) \odot W_2 + \beta_2) \odot W_3 + \beta_3)$ if we had 3 hidden layers.

#### The Intuition
We can think of the __dot products and weights__ as taking bit of each input and combining that into a __hidden__ 'feature'. For instance if $X_1$ indicates 'age' category *elder* and $X_4$ indicates 'gender' *male*. If the inputs are 0 and 1 for yes/no, then using a positive weight $w_1$ and $w_4$ and 0 for the other weights, then we'd have a feature which 'activates' for an elder male customer.
We can think of the __bias__ as setting a minimal barrier or a reinforcement of the feature. For instance in above feature if we wanted our *elder male* hidden feature to only activate as it reaches .7 we could add a -.7 bias. If we wanted the feature to activate easier, we could add a positive bias.
Our Neural Nets will learn which weights and biases work best in order to solve a specific task. In Feedforward NN's they are the *'learnable'* parameters
---
#### Note on the data set
The data set used here is not particularly complex and/or big. It's not really all that challenging to find the fraud. In an ideal world we'd be using more complex data sets to show the real power of Deep Learning. There are a bunch of PCA'ed data sets available, but the PCA obfuscates some of the elements that are useful.
*These examples are meant to show the possibilities, it's not so useful to interpret their performance on this data set*
## Imports
```
import torch
import numpy as np
import gc
import d373c7.features as ft
import d373c7.engines as en
import d373c7.pytorch as pt
import d373c7.pytorch.models as pm
import d373c7.plot as pl
```
## Set a random seed for Numpy and Torch
> Will make sure we always sample in the same way. Makes it easier to compare results. At some point it should been removed to test the model stability.
```
# Numpy
np.random.seed(42)
# Torch
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
```
## Define base feature and read the File
The base features are features found in the input file.
Below code snipped follows a structure we'll see a lot.
- First we define features, these are fields we'll want to use. In this case we're defining 'Source' features, features that are in the data source, here the file. As parameters we provide the name as found in the first row of the file and a type.
- Then we bundle them in a `TensorDefinition`, this is essentially a group of features. We give it a name and list of features as input.
- Lastly we set up and engine of type `EnginePandasNumpy` and call the `from_csv` method with the TensorDefinition and the `file` name. This `from_csv` method will read the file and return a Pandas DataFrame object. The `inference` parameter specifies that we are in training mode, so any stats the feature needs to use will be gathered.
All this will return a Pandas DataFrame with __594643 rows__ (the number of transactions) and __6 columns__ (the number of features we defined), it can be use to perform basic data analysis.
```
# Change this to read from another location
file = '../../../../data/bs140513_032310.csv'
age = ft.FeatureSource('age', ft.FEATURE_TYPE_CATEGORICAL)
gender = ft.FeatureSource('gender', ft.FEATURE_TYPE_CATEGORICAL)
merchant = ft.FeatureSource('merchant', ft.FEATURE_TYPE_CATEGORICAL)
category = ft.FeatureSource('category', ft.FEATURE_TYPE_CATEGORICAL)
amount = ft.FeatureSource('amount', ft.FEATURE_TYPE_FLOAT)
fraud = ft.FeatureSource('fraud', ft.FEATURE_TYPE_INT_8)
base_features = ft.TensorDefinition(
'base',
[
age,
gender,
merchant,
category,
amount,
fraud
])
with en.EnginePandasNumpy() as e:
df = e.from_csv(base_features, file, inference=False)
df
```
## Define some derived features
After we've defined and read the source features, we can define some __derived__ features. Derived features apply a form of transformation to the source features, depending on the type of feature.
In this example 3 transformations are used;
- `FeatureNormalizeScale` The amount is scaled between 0 and 1.
- `FeatureOneHot` The categorical features are turned into one-hot encoded fields.
- `FeatureLabelBinary` The Fraud field is marked as Label. This is not really a transformation, it's just so the model knows which label to use.
We apply above transformations because Neural Nets prefer data that is in Binary ranges 0->1 or normally distributed
This will create a total of 78 features we can use in the model. We create a second list with the label.
```
amount_scale = ft.FeatureNormalizeScale('amount_scale', ft.FEATURE_TYPE_FLOAT_32, amount)
age_oh = ft.FeatureOneHot('age_one_hot', ft.FEATURE_TYPE_INT_8, age)
gender_oh = ft.FeatureOneHot('gender_one_hot', ft.FEATURE_TYPE_INT_8, gender)
merchant_oh = ft.FeatureOneHot('merchant_one_hot', ft.FEATURE_TYPE_INT_8, merchant)
category_oh = ft.FeatureOneHot('category_one_hot', ft.FEATURE_TYPE_INT_8, category)
fraud_label = ft.FeatureLabelBinary('fraud_label', ft.FEATURE_TYPE_INT_8, fraud)
features = ft.TensorDefinition(
'features',
[
age_oh,
gender_oh,
merchant_oh,
category_oh,
amount_scale,
])
label = ft.TensorDefinition('label', [fraud_label])
model_features = ft.TensorDefinitionMulti([features, label])
with en.EnginePandasNumpy() as e:
ft = e.from_csv(features, file, inference=False)
lb = e.from_csv(label, file, inference=False)
ft
lb
```
## Convert to Numpy
Now we convert the panda DataFrame to a list of Numpy arrays (which can be used for training). The `NumpyList` will have an entry for each of the Learning types. It will split out the *Binary*, *Continuous*, *Categorical* and *Label* Learning type __features__. Each Learning type will have a list entry in the `NumpyList` object
This step is needed so the models understand how to use the various features in the learning and testing processes.
In this case we have a first list with 77 binary (one-hot-endoded) features, the second is a list with 1 continuous feature (the amount) and the last list is the label (Fraud or Non-Fraud).
```
with en.EnginePandasNumpy() as e:
ft_np = e.to_numpy_list(features, ft)
lb_np = e.to_numpy_list(label, lb)
data_list = en.NumpyList(ft_np.lists + lb_np.lists)
print(data_list.shapes)
print(data_list.dtype_names)
```
## Wrangle the data
Time to split the data. For time series data it is very important to keep the order of the data. Below split will start from the end and work it's way to the front of the data. Doing so the training, validation and test data are nicely colocated in time. You almost *never* want to plain shuffle time based data.
> 1. Split out a test-set of size `test_records`. This is used for model testing.
> 2. Split out a validation-set of size `validation_records`. It will be used to monitor overfitting during training
> 3. All the rest is considered training data.
__Important__; please make sure the data is ordered in ascending fashion on a date(time) field. The split function does not order the data, it assumes the data is in the correct order.

```
test_records = 100000
val_records = 30000
train_data, val_data, test_data = data_list.split_time(val_records, test_records)
print(f'Training Data shapes {train_data.shapes}')
print(f'Validation Data shapes {val_data.shapes}')
print(f'Test Data shapes {test_data.shapes}')
del ft, lb
del ft_np, lb_np
del data_list
gc.collect()
print('Done')
```
## Set-up devices
```
device, cpu = pt.init_devices()
```
# Define Model
The training data set has to be balanced for Neural Nets. A balanced data set has a more or less equal amount of each class of the label. In our case fraud vs. non-fraud classes. Failing to balance the data can have dramatic results, Neural Nets are lazy, in extreme cases they might just plain always predict the majority class.
Fraud data-sets always have more non-fraud than fraud records. In this example the fraud class will be aggressively upsampled in the training phase by a custom `ClassSampler`. It oversamples the minority label until it matches the majority label in quantity. This may not be a good idea for a really large data sets.
> 1. First set-up a NumpyListDataSet for both the training data-set and validation data-set. A NumpyListDataSet is a specialized `Pytorch Dataset` which keeps the data as numpy arrays in memory and converts on the fly to `Pytorch Tensors`
> 2. Set-up a sampler for the training set only. The sampler will over-sample the '1'/fraud class. Note that this means the training and validation sets are balanced *differently*. This is important when interpreting the plots.
> 3. Wrap the dataset in a Pytorch Dataloader. `Dataloaders` allow the training loop to iterate over `Datasets`
> 4. Create a model. Here the most basic __GeneratedClassifier__ is used. __The GeneratedClassifier__ will create a model using the information it has about the features. *We are defining it to have 1 hidden layer of size 16*.
```
# Setup Pytorch Datasets for the training and validation
batch_size = 128
train_ds = pt.NumpyListDataSetMulti(model_features, train_data)
val_ds = pt.NumpyListDataSetMulti(model_features, val_data)
train_sampler = pt.ClassSamplerMulti(model_features, train_data).over_sampler()
# Wrap them in a Pytorch Dataloader
train_dl = train_ds.data_loader(cpu, batch_size, num_workers=2, sampler=train_sampler)
val_dl = val_ds.data_loader(cpu, batch_size, num_workers=2)
# Create a Model
m = pm.GeneratedClassifier(model_features, linear_layers=[16])
print(m)
```
The generated model consists of one stream -as there is one TensorDefinition containing modeling features-. That stream has a later of type __TensorDefinitionHead__ which will in this case just concatenate our *Binary* (77 one hot) and our Continuous (amount) features into a 78 shape tensor.
Which is consequently processed through the __Tail__. As we have a binary label, the tail is binary, it will process using the requested 16 size hidden layer and then output a final output layer of size 1. The output will be a score, ranging from 0 to 1 (because of the sigmoid), indicating the likelyhood of Fraud. The higher, the more certain the model is it bad.
Grapically this network looks like below (Some of the layers have been omitted for simplicity)

# Start Training
### First find a decent Learning Rate.
> Create a trainer and run the find_lr function and plot. This function iterates over the batches, gradually increasing the learning rate from a minimum to a maximum learning rate. It tends to show where we can find a good learning rate. In this case at around __3e-3__ we start a very steep decent. The model does not learn at lower than __1e-3__ learning rates. Beyond __1e-2__ it flattens out, stops learning at around __2e-2__ and explodes later. This exloding can be validate by running with a higher number of iteration and higher upper bound. A good learning rate is a location where the curve has a steep descent, but not too far down the curve. In this case around __5e-3__
```
t = pt.Trainer(m, device, train_dl, val_dl)
r = t.find_lr(1e-4, 1e-1, 200)
pl.TrainPlot().plot_lr(r)
```
## Start Training and plot the results
In our examples we will use a one_cycle logic. This is a training logic which starts at a learning rate lower than the specified learning rate, over the course of training works its way up to the specified learning rate and decreases again towards the end of the learning cycle. Proposed by [Leslie N. Smith, A DISCIPLINED APPROACH TO NEURAL NETWORK HYPER-PARAMETERS](https://arxiv.org/pdf/1803.09820.pdf)
> We train for __10 epochs__ and __learning rate 5e-3__. That means we run over the total training data set a total of 10 times/epochs where the model learns, after each epoch we use the trained model and perform a test run on the validation set.
> The result graph plots the accuracy and loss evolution at each Epoch for both the training and the validation set. We see the model behaves fairly well during training. The loss goes up slightly in the middle of the training. This is the one_cycle logic which is reaching the max learning rate.
```
t = pt.Trainer(m, device, train_dl, val_dl)
h = t.train_one_cycle(10, 5e-3)
pl.TrainPlot().plot_history(h, fig_size=(10,10))
```
## Test the model on the test data
> Test the model on the test set, it is data that was not seen during training and allows us to validate model results. This model behaves fairly OK. It is really good at catching the fraud. It has a fairly low False Negative rate. (Lower left corner of the Confusion Matrix). But it also has a fairly large False Positive rate (Upper right corner of the Confusion Matrix).
Some research would be needed but this is likely at least partially due to the oversampling. The model saw much more fraud during the training, so it might not be surprising it gets relatively good at predicting it.
```
test_ds = pt.NumpyListDataSetMulti(model_features, test_data)
test_dl = test_ds.data_loader(cpu, 128, num_workers=2)
ts = pt.Tester(m, device, test_dl)
pr = ts.test_plot()
tp = pl.TestPlot()
tp.print_classification_report(pr)
tp.plot_confusion_matrix(pr, fig_size=(6,6))
tp.plot_roc_curve(pr, fig_size=(6,6))
tp.plot_precision_recall_curve(pr, fig_size=(6,6))
```
# Conslusion
This is a first example that showed how we can read a file, set-up some features (fields), test and train a really simple Feed-Forward Neural Net model.
| github_jupyter |
### Before running:
Please set the file structure as follows:
* parent folder
* backgrounds - includes all background pictures
* dataset - generated images and masks will be placed here
* images
* masks
* strawberry autolabelling.ipynb
* strawberry.png
* strawberry2.png
```
from tqdm import tqdm
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pandas as pd
DATASET_PATH = "backgrounds"
NEW_DATASET_PATH = "dataset"
files = os.listdir(DATASET_PATH)
len(files), files[0]
MAX_SCALE = 1.5 # Max scale of strawberries during insertation
```
#### Load strawbery crops
```
image_to_insert_options = []
for file in ["strawberry1.png", "strawberry2.png"]:
strawberry = cv2.imread(file, flags=cv2.IMREAD_UNCHANGED)
strawberry = cv2.cvtColor(strawberry, cv2.COLOR_BGRA2RGBA)
strawberry = cv2.rotate(strawberry, cv2.ROTATE_180)
image_to_insert_options.append(strawberry)
image_to_insert_size = 100
x_pad, y_pad = image_to_insert_size * MAX_SCALE, image_to_insert_size * MAX_SCALE # Insertation paddings
```
#### Modify colors in hsv space
```
def change_hsv(img, h_change=0, s_change=0, v_change=0):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
h = cv2.add(h, h_change)
h[h > 180] -= 180
h[h < 0] += 180
s = cv2.add(s,s_change)
s[s > 255] = 255
s[s < 0] = 0
v = cv2.add(v,v_change)
v[v > 255] = 255
v[v < 0] = 0
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
```
#### Do augmentations on the image
```
def modify_image(image_to_insert):
red2green = -57 # shift between colors in hue space
# Rotate
def rotate_image(image, angle):
image_center = tuple(np.array(image_to_insert.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
return cv2.warpAffine(image_to_insert, rot_mat, image_to_insert.shape[1::-1], flags=cv2.INTER_LINEAR)
image_to_insert = rotate_image(image_to_insert, angle=np.random.randint(-30, 30))
# Flip
if np.random.random() > 0.5:
image_to_insert = cv2.flip(image_to_insert, 1)
# Scale
scale = np.random.random() + 0.5
image_to_insert_resized = cv2.resize(image_to_insert, None, fx=scale, fy= scale)
# Take the mask
mask_strawberry = (image_to_insert_resized[:, :, 3] == 255)
image_to_insert_resized = cv2.cvtColor(image_to_insert_resized, cv2.COLOR_RGBA2RGB)
# Change color of strawberry
r = np.random.random()
if r > 0.66:
# Keep red
h_change = np.random.randint(-5, 5)
s_change = np.random.randint(-25, -15)
v_change = np.random.randint(-25, -15)
elif r > 0.33:
# Make half red
h_change = np.random.randint(red2green + 30, red2green + 40)
s_change = np.random.randint(-5, 5)
v_change = np.random.randint(-5, 5)
else:
# Make green
h_change = np.random.randint(red2green + 20, red2green + 25)
s_change = np.random.randint(-150, -100)
v_change = np.random.randint(30, 40)
image_to_insert_resized = change_hsv(image_to_insert_resized,
h_change=h_change, s_change=s_change, v_change=v_change)
return image_to_insert_resized, mask_strawberry
backgrounds = os.listdir(os.path.join(DATASET_PATH))
len(backgrounds)
```
#### Generate new dataset
```
for filename in tqdm(backgrounds):
# Prepare background
background = cv2.imread(os.path.join(DATASET_PATH, filename))
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
max_dimension = max(background.shape)
scale = 1000/max_dimension
background = cv2.resize(background, None, fx=scale, fy=scale)
# Find green regions to insert there strawberris
background_hsv = cv2.cvtColor(background, cv2.COLOR_RGB2HSV)
mask_green = cv2.inRange(background_hsv, (36, 25, 25), (86, 255,255)) # Green color range (36,25,25) ~ (86, 255,255)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)) # Clean up the mask
mask_green_closed = cv2.morphologyEx(mask_green, cv2.MORPH_CLOSE, kernel)
mask_green_clean = cv2.morphologyEx(mask_green_closed, cv2.MORPH_OPEN, kernel)
# Get green pixels indices
X, Y = np.where(mask_green_clean == 255)
X, Y = zip(*[(x, y) for x, y in zip(X, Y) # Keep pixels within distance from borders
if 0 < x < background.shape[0] - x_pad and 0 < y < background.shape[1] - y_pad])
for iteration in range(3): # Use one background image to generate several (3) new images for dataset
# Choose places to insert strawberries with random
n_strawberries_to_insert = 5
pixels_to_insert_ind = np.random.randint(0, len(X), n_strawberries_to_insert)
# Insert strawberries
new_image = background.copy()
segmentation_masks = np.zeros(background.shape[0:2])
bboxes = []
for i, ind in enumerate(pixels_to_insert_ind): # Insert every strawberry
image_to_insert = image_to_insert_options[np.random.randint(0, len(image_to_insert_options))]
# Modify image
image_to_insert_resized, mask_strawberry = modify_image(image_to_insert)
# Insert crop
x, y = X[ind], Y[ind]
image_crop = new_image[x: x + image_to_insert_resized.shape[0], y: y + image_to_insert_resized.shape[1], :]
image_crop[np.where(mask_strawberry)] = image_to_insert_resized[np.where(mask_strawberry)]
# Create labels
segmentation_masks[x: x + image_to_insert_resized.shape[0],
y: y + image_to_insert_resized.shape[1]][mask_strawberry] = i + 1
bboxes.append([x, y, image_to_insert_resized.shape[0], image_to_insert_resized.shape[1]])
# Save new image and mask
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(NEW_DATASET_PATH, "images", f"{filename}_modified_{iteration}.png"), new_image)
with open(os.path.join(NEW_DATASET_PATH, "masks", f"{filename}_modified_{iteration}.png"), "wb") as f:
np.save(f, segmentation_masks.astype(np.int8))
#
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
from scipy import stats as st
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
conn=engine.connect()
# reflect an existing database into a new model
Base = automap_base()
Base.prepare(engine, reflect=True)
# reflect the tables
Base.classes.keys()
# Save references to each table
Measurement=Base.classes.measurement
Station=Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# I've made the decision to limit my search to the station in Honolulu so I'm not plotting multiple values on the same date.
# Calculate the date 1 year ago from the last data point in the database
YearStart=dt.datetime.date(dt.datetime.fromisoformat(engine.execute("SELECT max(date) FROM measurement WHERE station in (SELECT station FROM station WHERE name like '%HONOLULU%')").fetchall()[0][0]))- dt.timedelta(days=365)
print(f"The date the year of data will start on is {YearStart}.")
# Perform a query to retrieve the date and precipitation scores
# Since the start date is 2014-10-30, we know there isn't a leap day in our year of data.
engine.execute(f"SELECT date, prcp FROM measurement WHERE (date >= '{YearStart}') AND (station in (SELECT station FROM station WHERE name like '%HONOLULU%')) ORDER BY date LIMIT 10").fetchall()
# Save the query results as a Pandas DataFrame and set the index to the date column
df = pd.read_sql(f"SELECT date, prcp FROM measurement WHERE (date >= '{YearStart}') AND (station in (SELECT station FROM station WHERE name like '%HONOLULU%')) ORDER BY date", conn)
df.set_index('date', inplace=True)
# Sort the dataframe by date
df.head()
# Use Pandas Plotting with Matplotlib to plot the data
x_axis=df.index
bars=df.prcp
plt.figure(figsize=(10,5))
plt.bar(x_axis,bars,width=2)
plt.xticks([x for i,x in enumerate(x_axis) if i%30==0],rotation = 45)
plt.ylim(-.05,1.41)
plt.xlabel("Date")
plt.ylabel("Precipitation (in)")
plt.title("Precipitation by Date in Honolulu")
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
df.describe()
# Design a query to show the number of stations available in this dataset.
Station_Count=engine.execute("SELECT COUNT(distinct station) FROM measurement").fetchall()[0][0]
print(f"The number of unique stations available in the dataset is {Station_Count}.")
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
engine.execute("SELECT station, COUNT(prcp) AS Records_Count, (SELECT name FROM station WHERE measurement.station = station.station) AS station_name FROM measurement GROUP BY station_name ORDER BY Records_Count DESC").fetchall()
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station.
Station_info=session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.station=='USC00519281').all()[0]
print(f"The temp. info for station USC00519281 is the following:\nLow: {Station_info[0]}\nHigh: {Station_info[1]}\nAvg: {round(Station_info[2],2)}")
# Choose the station with the highest number of temperature observations.
engine.execute("SELECT station, COUNT(tobs) AS Records_Count, (SELECT name FROM station WHERE measurement.station = station.station) AS station_name FROM measurement GROUP BY station_name ORDER BY Records_Count DESC").fetchall()
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
# Calculate the date 1 year ago from the last data point in the database
YearStart=dt.datetime.date(dt.datetime.fromisoformat(engine.execute("SELECT max(date) FROM measurement WHERE station = 'USC00519281'").fetchall()[0][0]))- dt.timedelta(days=365)
print(f"The date the year of data will start on is {YearStart}.")
# Save the query results as a Pandas DataFrame and set the index to the date column
Temp_df = pd.read_sql(f"SELECT date, tobs as temp FROM measurement WHERE (date >= '{YearStart}') AND (station = 'USC00519281') ORDER BY date", conn)
Temp_df.set_index('date', inplace=True)
# Sort the dataframe by date
Temp_df.head()
#Plot the histogram
Temp_df["temp"].plot.hist(title="Temperature Distribution in Waihee", color="red",bins=12)
plt.xlabel("Temp (F)")
plt.show()
```
## Bonus Challenge
## Hawaii is reputed to enjoy mild weather all year. Is there a meaningful difference between the temperature in June and December?
```
#Determine the average temp in June and December across all stations and all years of the data set.
June_Avg=engine.execute("SELECT avg(tobs), count(tobs), (SELECT SUM((tobs-(SELECT AVG(tobs) FROM measurement))*(tobs-(SELECT AVG(tobs) FROM measurement)) ) / (COUNT(tobs)-1)) AS Variance FROM measurement WHERE date LIKE '%-06-%' ORDER BY date").fetchall()[0]
Dec_Avg=engine.execute("SELECT avg(tobs), count(tobs), (SELECT SUM((tobs-(SELECT AVG(tobs) FROM measurement))*(tobs-(SELECT AVG(tobs) FROM measurement)) ) / (COUNT(tobs)-1)) AS Variance FROM measurement WHERE date LIKE '%-12-%' ORDER BY date").fetchall()[0]
print('(average , count, variance )')
#To use the independent t-test to compare 2 means using an we need to see if the count in each sample is similar and if the variances are 'close' meaning one isn't more that twice the other.
print(June_Avg)
print(Dec_Avg)
#We need to show that these samples are normally distributed from the histograms below, it is clear that they are.
pd.read_sql("SELECT tobs FROM measurement WHERE date LIKE '%-06-%' ORDER BY date", conn).plot.hist(title="June Temperature Distribution", color="red", legend=None)
plt.xlabel("Temp (F)")
plt.show()
pd.read_sql("SELECT tobs FROM measurement WHERE date LIKE '%-12-%' ORDER BY date", conn).plot.hist(title="December Temperature Distribution", color="red", legend=None)
plt.xlabel("Temp (F)")
plt.show()
June=np.ravel(engine.execute("SELECT tobs FROM measurement WHERE date LIKE '%-06-%' ORDER BY date").fetchall())
December=np.ravel(engine.execute("SELECT tobs FROM measurement WHERE date LIKE '%-12-%' ORDER BY date").fetchall())
print(June,len(June))
print(December,len(December))
#Use the t-test to determine whether the difference in the means, if any, is statistically significant. Will you use a paired t-test, or an unpaired t-test? Why? - answered above. Also we use an unpaired test becasue the samples are independent of eachother.
st.ttest_ind(June,December)
```
According to the test the means are different and the p-value being so low indicates that with any other sample, this is extremely likely to be the outcome again.
## Bonus part 2
```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
def VacEst(VS,VE):
LY_Vacation_Start=str(int(VS[:4])-1)+VS[4:]
LY_Vacation_End=str(int(VE[:4])-1)+VE[4:]
[(tmin,tavg,tmax)]=calc_temps(LY_Vacation_Start, LY_Vacation_End)
return (tmin,tavg,tmax)
Vacation_Start='2016-04-03'
Vacation_End='2016-04-13'
(tmin,tavg,tmax)=VacEst(Vacation_Start,Vacation_End)
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
plt.bar([""],[tavg],width=.33, color='red',yerr=tmax-tmin)
plt.ylabel("Average Temperature (F)")
plt.title("Trip Avg Temp")
plt.xlim(-.5,.5)
plt.ylim(0,100)
plt.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
LY_Vacation_Start=str(int(Vacation_Start[:4])-1)+Vacation_Start[4:]
LY_Vacation_End=str(int(Vacation_End[:4])-1)+Vacation_End[4:]
engine.execute(f"SELECT m.date, ROUND(SUM(m.prcp),2) as TotalRainfall, m.station, s.name, s.latitude, s.longitude, s.elevation FROM measurement m INNER JOIN station s ON s.station = m.station WHERE m.date <= '{LY_Vacation_End}' and m.date >= '{LY_Vacation_Start}' GROUP BY m.station ORDER BY TotalRainfall DESC").fetchall()
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
Vacation_Start='2016-04-03'
Vacation_End='2016-04-13'
# Use the start and end date to create a range of dates
dateRange=[]
dateCounter=Vacation_Start
while dateCounter<=Vacation_End:
dateRange.append(dateCounter[5:])
if int(dateCounter[-2:])<9:
dateCounter=dateCounter[:9]+str(int(dateCounter[-1:])+1)
else:
dateCounter=dateCounter[:8]+str(int(dateCounter[-2:])+1)
# Stip off the year and save a list of %m-%d strings
print(dateRange)
# Loop through the list of %m-%d strings and calculate the normals for each date
TripNormals=[]
for date in dateRange:
TripNormals.append(daily_normals(date)[0])
print (TripNormals)
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
TripMin=[x[0] for x in TripNormals]
TripAvg=[round(x[1],2) for x in TripNormals]
TripMax=[x[2] for x in TripNormals]
Trip_df=pd.DataFrame({
"Date": dateRange,
"Min": TripMin,
"Avg": TripAvg,
"Max": TripMax
})
Trip_df.set_index('Date', inplace=True)
Trip_df.head()
# Plot the daily normals as an area plot with `stacked=False`
Trip_df.plot.area(stacked=False)
plt.ylabel("Temperature (F)")
plt.title("Daily Normals for Hawaii Trip")
plt.show()
#Although this is what was asked, using an area chart instead of a line chart here is a mistake. There is no value added by filling in the area.
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
```
# Homework 5
**Instructions:** Complete the notebook below. Download the completed notebook in HTML format. Upload assignment using Canvas.
**Due:** Feb. 20 at **12:30pm.**
## Exercise: AR(1) Process Simulation
For each of the AR(1) processes defined below:
1. Compute $y$ for $201$ periods (i.e., $t=0,\ldots, 200$).
2. Construct a well-labeled plot of $y$ for each simulation.
2. Print the mean and standard deviation of the stimulated process.
You may use the function from the Notebook for Lecture 10, write your own, or take a more brute force approach.
```
# Define a function for computing the first-order difference equation. CELL NOT PROVIDED
def ar1_sim(rho,sigma,y0,T,):
'''Function for simulating an AR(1) process
y[t] = rho*y[t-1] + epsilon[t]
Args:
rho (float): Autoregressive coefficient on y[t-1]
sigma (float): Standard deviation of epsilon
y0 (float): Initial value of the process
T (int): Number of periods to simulate including initial period
Returns:
NumPy ndarray
'''
y = np.zeros(T)
epsilon = np.random.normal(scale=sigma,size=T)
y[0] = y0
for t in range(T-1):
y[t+1] = rho*y[t]+epsilon[t+1]
return y
# Create variable 'T' equal to number of simulation periods
T = 201
# Set NumPy RNG seed to 126 st that simulations look the same each time I run the Notebook
np.random.seed(126)
```
1. $y_{t} = 0.33 y_{t-1} + \epsilon_t, \; \; \; \; \epsilon \sim \mathcal{N}(0,1), \; \; \; \; y_0 = 0$
```
rho=0.33
sigma = 1
y0 = 0
y = ar1_sim(rho,sigma,y0,T)
plt.plot(y,lw=3)
plt.title('AR(1): $\\rho='+str(rho)+'$ and $\\sigma='+str(sigma)+'$')
plt.xlabel('$t$',fontsize=15)
plt.ylabel('$y_t$',fontsize=15)
plt.grid()
# print mean of y
print('mean of y: ',round(y.mean(),4))
# Print standard deviation of y
print('standard deviation of y: ',round(y.std(),4))
```
2. $y_{t} = y_{t-1} + \epsilon_t, \; \; \; \; \epsilon \sim \mathcal{N}(0,1), \; \; \; \; y_0 = 0$
```
rho=1
sigma = 1
y0 = 0
y = ar1_sim(rho,sigma,y0,T)
plt.plot(y,lw=3)
plt.title('AR(1): $\\rho='+str(rho)+'$ and $\\sigma='+str(sigma)+'$')
plt.xlabel('$t$',fontsize=15)
plt.ylabel('$y_t$',fontsize=15)
plt.grid()
# print mean of y
print('mean of y: ',round(y.mean(),4))
# Print standard deviation of y
print('standard deviation of y:',round(y.std(),4))
```
3. $y_{t} = -0.95 y_{t-1} + \epsilon_t, \; \; \; \; \epsilon \sim \mathcal{N}(0,1), \; \; \; \; y_0 = 0$
```
rho=-0.95
sigma = 1
y0 = 0
y = ar1_sim(rho,sigma,y0,T)
plt.plot(y,lw=3)
plt.title('AR(1): $\\rho='+str(rho)+'$ and $\\sigma='+str(sigma)+'$')
plt.xlabel('$t$',fontsize=15)
plt.ylabel('$y_t$',fontsize=15)
plt.grid()
# print mean of y
print('mean of y: ',round(y.mean(),4))
# Print standard deviation of y
print('standard deviation of y: ',round(y.std(),4))
```
4. $y_{t} = 0.75 y_{t-1} + \epsilon_t , \; \; \; \; \epsilon \sim \mathcal{N}(0,4), \; \; \; \; y_0 = 20$
```
rho=0.75
sigma = 2
y0 = 20
y = ar1_sim(rho,sigma,y0,T)
plt.plot(y,lw=3)
plt.title('AR(1): $\\rho='+str(rho)+'$ and $\\sigma='+str(sigma)+'$')
plt.xlabel('$t$',fontsize=15)
plt.ylabel('$y_t$',fontsize=15)
plt.grid()
# print mean of y
print('mean of y: ',round(y.mean(),4))
# Print standard deviation of y
print('standard deviation of y:',round(y.std(),4))
```
5. $y_{t} = 0.9 y_{t-1} + \epsilon_t , \; \; \; \; \epsilon \sim \mathcal{N}(0,3), \; \; \; \; y_0 = 0$
```
rho=0.9
sigma = np.sqrt(3)
y0 = 0
y = ar1_sim(rho,sigma,y0,T)
plt.plot(y,lw=3)
plt.title('AR(1): $\\rho='+str(rho)+'$ and $\\sigma='+str(round(sigma,4))+'$')
plt.xlabel('$t$',fontsize=15)
plt.ylabel('$y_t$',fontsize=15)
plt.grid()
# print mean of y
print('mean of y: ',round(y.mean(),4))
# Print standard deviation of y
print('standard deviation of y: ',round(y.std(),4))
```
## Exercise: Stochastic Growth
We've seen in class that total factory productivity (TFP) in the US fluctuates over the business cycle around a long-term growth trend. That is, total factor productivity is characterized by *stochastic growth*. In this problem, you will simulate a model of TFP with stochastic growth.
### Background
Suppose that the trend component of TFP $A^{trend}$ grows smoothly at the constant rate $g$:
\begin{align}
A^{trend}_t & = (1+g)A_{t-1}^{trend}, \tag{1}
\end{align}
where $A_0$ is given. The log-deviations of actual TFP $A$ from trend TFP are determined according to an AR(1) process:
\begin{align}
\log\left(A_t/A^{trend}_t\right) & = \rho \log \left(A_{t-1} / A^{trend}_{t-1}\right) + \epsilon_t, \tag{2}
\end{align}
where $A_0$ is given and $\epsilon_t \sim \mathcal{N}(0,\sigma^2)$. Solve quation (2) for $A_t$ to get an expression for $A_t$ in terms of $A_{t-1}$, $A^{trend}_{t}$, $A^{trend}_{t-1}$, and $\epsilon_t$:
\begin{align}
A_t & = A^{trend}_t \displaystyle e^{\rho \log \left(A_{t-1} / A^{trend}_{t-1}\right) + \epsilon_t} \tag{3}
\end{align}
### Simulation
To simulate this model:
1. Specify values for $A_0$ and $A^{trend}_0$.
2. For $t = 0, 1, \ldots... T$, compute $A^{trend}_{t+1}$ using equation (1) and then compute $A_{t+1}$ using equation (3)
Use the following prameter values for this simulation
| $A^{trend}_0$ | $A_0$ | $g$ | $\rho$ | $\sigma$ | $T$ |
|---------------|-------|------|--------|----------|-----|
| 1 | 1 | 0.02 | 0.7 | 0.025 | 101 |
In the follwing cell, simulate $A^{trend}$ and $A$ using the parameter values provided.
```
# Simulate the stochastic growth model of TFP
T = 101
g = 0.02
rho = 0.7
sigma = 0.025
epsilon = np.random.normal(scale=sigma,size=T)
a_trend = np.ones(T)*1
a = np.ones(T)*1
for t in range(T-1):
a_trend[t+1] = (1+g)*a_trend[t]
a[t+1] = a_trend[t+1]*np.exp(rho*np.log(a[t]/a_trend[t])+epsilon[t])
```
Contruct a well-labeled plot of the simulated values of $A$ and $A^{trend}$
```
# Construct a plot of simulated TFP with its trend with:
# 1. Actual line: blue with lw=1, alpha=0.7, label = 'actual'
# 2. Trend line: red with lw=3, alpha=0.25, label = 'trend'
plt.plot(a_trend,'r',lw=3,alpha=0.25,label='Trend')
plt.plot(a,label='Actual')
plt.title('Simulated TFP with Stochastic Growth')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
```
Contruct a well-labeled plot of the simulated values of $\log \left(A/A^{trend}\right)$
```
# Construct a plot of simulated log deviation of TFP from trend
plt.plot(np.log(a/a_trend),lw=3)
plt.title('Simulated $\log A_t/A^{trend}$')
plt.grid()
```
Compute the mean and standard deviation of $\log \left(A/A^{trend}\right)$
```
# print mean of log(A/A trend)
print('mean of log(A/A trend): ',round((np.log(a/a_trend)).mean(),4))
# Print standard deviation of log(A/A trend)
print('standard deviation of log(A/A trend): ',round((np.log(a/a_trend)).std(),4))
```
## Exercise: Questions for Prescott (1983) Reading
The following questions are about Edward Prescott's 1986 article "Theory Ahead of Business Cycle Measurement" from the Fall 1986 issue of the Minneapolis Fed's *Quarterly Review* (link to article: [https://www.minneapolisfed.org/research/qr/qr1042.pdf](https://www.minneapolisfed.org/research/qr/qr1042.pdf)). The article is a summary of the research agenda of the author, his main collaborator, Finn Kydland, and others. That agenda entailed incorporating stochastic growth of TFP into the neoclassical growth model (essentially a Solow model but with the saving rate determined as a consequence a of utility maximization problem) as a way of modeling business cycle fluctuations. This line of research would later be called *real business cycle* theory.
The below questions are specifically about:
* Pages 9-11 (including the table on page 12)
* The final paragraph on page 21
**Question:** On page 10, Prescott writes that the "models constructed within this theoretical framework are necessarily highly abstract. Consequently they are necessarily false." What does Kydland mean by this? In what sense are abstract models necessarily false?
**Answer**
All theoretical models are approximations of reality and therefore will not relfect the complexity of the natural environment. But that doesn't mean that abstract models can't be useful. In fact, abstraction helps us understand complicated phenomena by emphasizing only the most relevant aspects of the issue. <!-- answer -->
**Question:** On page 10, Prescott lists two reasons for why he doesn't like the term *business cycle*. What is the first reason and what does it mean?
**Answer**
The first reason that Prescott doesn't like the term *business cyle* is that it implies that short-term fluctuations are the result of economic forces that are *independent* of long-term growth. In Prescott's view, short-term fluctuations and long-term growth *are a consequence of the same process*: stochastic TFP growth. That is, TFP grows, but at a random and unpredictable rate.<!-- answer -->
**Question:** Table 1 on page 12 shows that household consumption measures in the US fluctuate less than investment measures. What is the intuition for why consumption varies less than investment?
**Answer**
People like to smooth their consumption over time as much as they can. When houseohlds have higher-than-average income, they save most for the future and so investment rises more than consumption when incomes are high. And when household incomes are lower-than-average, households primarily reduce saving and investment falls more than consumption when incomes are high.<!-- answer -->
**Question:** In the last paragraph of page 21, to what does Prescott attribute economic fluctuations? Can you think of any reasons why this statement was, and still is, controversial?
**Answer**
"Economic fluctuations are the optimal responses to uncertainty in the rate of technological change." The statement is controversial for several reasons. <!-- answer -->
* First, the argument attributes *all* economic fluctuations to TFP variation and disregards other plausible candidates like, for example, exogenous variation in monetary policy. <!-- answer -->
* Second, Prescott's claim that fluctuations are *optimal* implies that the economy is *always* doing the best it can with what it has: When unemployment is high, it's because TFP fell relative to trend and households decided to take more leisure. <!-- answer -->
* Third, the statement implies that, even in principle, policymakers cannot improve the welfare of the public by stabilizing the business cycle. <!-- answer -->
| github_jupyter |
```
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
import matplotlib as matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import os
import datetime as dt
from shapely import wkt
from shapely.geometry import Point, Polygon
import geopandas as gpd
# import rtree, pygeos, fiona
# import netCDF4
import xarray as xr
# import dask
repo_path = Path('/Users/etriesch/dev/ocean-carbon-sampling/')
write_path = repo_path / 'data/clean/'
crs = 'epsg:4326'
# Northeast Pacific mask
p_min_lat, p_max_lat = 29, 48
p_min_lon, p_max_lon = 360-140, 360-116
#Northwest Atlantic mask
a_min_lat, a_max_lat = 20, 48
a_min_lon, a_max_lon = 360-97, 360-60
```
# Read in data
```
# get filenames
temp_path = repo_path / 'data/raw/oisst/'
t_files = [f for f in os.listdir(temp_path)
if f.endswith('.nc')]
t_files.sort()
print('files to read:', len(t_files))
# read in data
t_pac = pd.DataFrame()
t_atl = pd.DataFrame()
for f in t_files:
t = xr.open_dataset(temp_path + f)
# create pacific and atlantic masks
p_mask_lon = (t.lon >= p_min_lon) & (t.lon <= p_max_lon)
p_mask_lat = (t.lat >= p_min_lat) & (t.lat <= p_max_lat)
a_mask_lon = (t.lon >= a_min_lon) & (t.lon <= a_max_lon)
a_mask_lat = (t.lat >= a_min_lat) & (t.lat <= a_max_lat)
pac = t.where(p_mask_lon & p_mask_lat, drop=True)
atl = t.where(a_mask_lon & a_mask_lat, drop=True)
# convert to dataset and append
pac = pac.sst.to_dataframe().reset_index()
atl = atl.sst.to_dataframe().reset_index()
t_pac = pd.concat([pac, t_pac])
t_atl = pd.concat([atl, t_atl])
```
# Clean and subset
```
# make single df for cleaning
t_pac['pacific'] = True
t_atl['pacific'] = False
t_raw = pd.concat([t_pac, t_atl])
t_raw['date'] = pd.to_datetime(t_raw.time).dt.date
t_raw['year'] = pd.to_datetime(t_raw.time).dt.year
# clean temperature values
# set outlier as 5 times the 75th percentile
pctl, outl = 0.75, 5
pctl_colname = 'pctl'+str(int(pctl*100))
t_raw[pctl_colname] = t_raw.groupby(['year', 'pacific'])['sst'].transform(lambda x: x.quantile(pctl))
print('Outliers dropped (Pacific):', (t_raw.loc[t_raw.pacific].sst > outl * t_raw.loc[t_raw.pacific, pctl_colname]).sum() / t_raw.loc[t_raw.pacific].shape[0])
print('Outliers dropped (Atlantic):', (t_raw.loc[~t_raw.pacific].sst > outl * t_raw.loc[~t_raw.pacific, pctl_colname]).sum() / t_raw.loc[~t_raw.pacific].shape[0])
t = t_raw.loc[t_raw.sst <= outl * t_raw[pctl_colname]].reset_index()
```
# Create analytical columns
* Max SST
* Min SST
* Average annual SST
* Average standard deviation S?ST
```
pd.set_option('display.max_rows', 200)
# get mean, max, std chlorophyll per year at each location
t_s = t.groupby(['pacific', 'lat', 'lon', 'year'])['sst'].agg(['std', 'mean', 'max', 'min']).reset_index()
# average across years
t_s = t_s.groupby(['pacific', 'lat', 'lon']).agg('mean').reset_index()
```
# Write data to computer
```
filename = 'oisst.csv'
(write_path / filename).parent.mkdir(parents=True, exist_ok=True)
t_s.to_csv(filepath, index=False)
```
# Visualize
## temperature over time
```
# Visualize chlorophll at lat/long over time
i = np.random.randint(0,t_s.shape[0])
sub = t.loc[(t.lat == t_s.lat.iloc[i]) & (t.lon == t_s.lon.iloc[i])].sort_values('date')
fig, ax = plt.subplots(figsize=(15,5))
plt.plot(sub['date'], sub['sst'])
plt.hlines(y=sub.sst.mean(), xmin=sub.date.min(), xmax=sub.date.max(), color='red')
# print info and chart
print('index:', i)
print('location:', round(t_s.lat.iloc[i], 4), round(t_s.lon.iloc[i], 4))
plt.show()
```
## Map of temperature property
```
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# world_snip = gpd.clip(world, gp)
# Visualize
# temperature - pacific mean
geometry = [Point(xy) for xy in zip(t_s['lon'], t_s['lat'])]
gp = gpd.GeoDataFrame(t_s, crs=crs, geometry=geometry)
fig,ax = plt.subplots(figsize=(15,10))
gp.plot(ax=ax, column='max', markersize=1, legend=True)
plt.title('Mean')
plt.show()
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow.keras.layers as tfl
from tensorflow import keras
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
!nvidia-smi
from keras import layers
from keras import Input
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Flatten, Conv1D,LSTM,Bidirectional
import random
from sklearn.preprocessing import StandardScaler
from platform import python_version
from keras.constraints import maxnorm
data = pd.read_csv("/content/drive/MyDrive/techniche/train.csv")
test = pd.read_csv("/content/drive/MyDrive/techniche/test.csv")
def one_hot_encoder(df, features):
dummies = pd.get_dummies(df[features], drop_first=True)
res = pd.concat([df, dummies], axis=1)
res = res.drop(features, axis=1)
return(res)
categorical_cols = list(data.select_dtypes(include=["object"]).columns)
data_onehot = one_hot_encoder(data, categorical_cols)
test_onehot = one_hot_encoder(test, categorical_cols)
cols_train = list(data_onehot.columns[:25]) + ['CATEGORY_1_D', 'CATEGORY_1_E'] + list(data_onehot.columns[25:])
cols_train
data_onehot['CATEGORY_1_D']=0
data_onehot['CATEGORY_1_E']=0
data_onehot = data_onehot[cols_train]
data_onehot.head()
Y_train = data_onehot.groupby('UNIQUE_IDENTIFIER').mean()[['Y1', 'Y2']]
Y_train.head()
X_train = data_onehot.drop(["Y1", "Y2"], axis=1)
X_train.head()
def input_generator(df_input):
customer_stamp = df_input['UNIQUE_IDENTIFIER'].value_counts()
customer_id = df_input['UNIQUE_IDENTIFIER'].unique()
n_customer_stamp = customer_stamp[customer_id].values
cumsum_customer_stamp = np.cumsum(n_customer_stamp, axis=0)
time_stamp = np.hstack((0, cumsum_customer_stamp))
row_size = np.max(n_customer_stamp)
input_array = np.ones((customer_id.shape[0], row_size, df_input.shape[1]))*(-1)
for i in range(time_stamp.shape[0]-1):
temp_array = df_input.iloc[time_stamp[i]:time_stamp[i+1], :].values
input_array[i, :, :] = np.pad(temp_array, ((0, row_size-temp_array.shape[0]), (0, 0)), mode='constant', constant_values=(-1, 0))
return input_array
scaler2 = StandardScaler()
X2_train_scaled = pd.DataFrame(scaler2.fit_transform(X_train), columns=X_train.columns)
X2_test_scaled = pd.DataFrame(scaler2.transform(test_onehot), columns=test_onehot.columns)
X2_train = input_generator(X2_train_scaled)
X2_test = input_generator(X2_test_scaled)
X2_train = X2_train[:, :, 1:]
X2_test = X2_test[:, :, 1:]
print(X2_train.shape)
print(X2_test.shape)
```
# DL Model
CONV1D + BI-LSTM
```
input_ = tfl.Input(shape=(X2_train.shape[1], X2_train.shape[2]))
print(input_.shape)
#1st layer
x = tfl.Conv1D(filters=16, kernel_size=3,padding = 'same')(input_)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
x = tfl.MaxPool1D(1)(x)
#2nd layer
x = tfl.Conv1D(filters=32, kernel_size=3, padding = 'same')(x)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
x = tfl.MaxPool1D(1)(x)
#3nd layer
#x = tfl.Conv1D(filters=64, kernel_size=3, padding = 'same')(x)
#x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
#x = tfl.MaxPool1D(1)(x)
print(x.shape)
x=tfl.Bidirectional(tfl.LSTM(64, input_shape=(x.shape[1], x.shape[2])))(x)
print(x.shape)
#3rd layer
#x = tfl.Flatten()(x)
#print(x.shape)
#x = tfl.Dense(450,activation='selu',kernel_initializer='he_uniform',kernel_constraint=maxnorm(5))(x)
#x = tfl.Dropout(0.7)(x)
#x = tfl.ELU(alpha=1)(x)
x = tfl.Dense(200,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.8)(x)
#x = tfl.ELU(alpha=1)(x)
x = tfl.Dense(20,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
#x = tfl.ELU(alpha=1)(x)
output = tfl.Dense(1)(x)
model = tf.keras.Model(inputs=input_, outputs=output)
```
CONV2D + BI-LSTM
```
'''
input_ = tfl.Input(shape=(X2_train.shape[1], X2_train.shape[2]))
print(input_.shape)
x=tf.keras.layers.Reshape((X2_train.shape[1], X2_train.shape[2],1), input_shape=(X2_train.shape[1], X2_train.shape[2]))(input_)
print(x.shape)
#1st layer
x = tfl.Conv2D(filters=16, kernel_size=3,padding = 'same')(x)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
#x = tfl.MaxPool2D(2)(x)
print(x.shape)
#2nd layer
x = tfl.Conv2D(filters=32, kernel_size=3, padding = 'same')(x)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
#x = tfl.MaxPool2D(2)(x)
print(x.shape)
x = tfl.Reshape([x.shape[1],x.shape[2]*x.shape[3]])(x)
print(x.shape)
x=tfl.Bidirectional(tfl.LSTM(256, return_sequences=True), input_shape=(x.shape[1], x.shape[2]))(x)
print(x.shape)
#3rd layer
x = tfl.Flatten()(x)
print(x.shape)
x = tfl.Dense(1500,activation='selu',kernel_initializer='he_uniform',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
#x = tfl.ELU(alpha=1)(x)
x = tfl.Dense(800,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
#x = tfl.ELU(alpha=1)(x)
print(x.shape)
x = tfl.Dense(400,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
x = tfl.Dense(100,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
x = tfl.Dense(50,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
print(x.shape)
#x = tfl.ELU(alpha=1)(x)
output = tfl.Dense(1)(x)
model = tf.keras.Model(inputs=input_, outputs=output)
'''
```
201 MSE AND REAL SCORE OF 72
```
# After 40 epochs this was over fitting
'''
input_ = tfl.Input(shape=(X2_train.shape[1], X2_train.shape[2]))
print(input_.shape)
x=tf.keras.layers.Reshape((X2_train.shape[1], X2_train.shape[2],1), input_shape=(X2_train.shape[1], X2_train.shape[2]))(input_)
print(x.shape)
#1st layer
x = tfl.Conv2D(filters=8, kernel_size=3,padding = 'same')(x)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
#x = tfl.MaxPool2D(2)(x)
print(x.shape)
#2nd layer
x = tfl.Conv2D(filters=16, kernel_size=3, padding = 'same')(x)
x = tfl.ReLU()(x)
#x = tfl.BatchNormalization()(x)
#x = tfl.MaxPool2D(2)(x)
print(x.shape)
x = tfl.Reshape([x.shape[1],x.shape[2]*x.shape[3]])(x)
print(x.shape)
x=tfl.Bidirectional(tfl.LSTM(64, return_sequences=True), input_shape=(x.shape[1], x.shape[2]))(x)
print(x.shape)
#3rd layer
x = tfl.Flatten()(x)
print(x.shape)
x = tfl.Dense(1000,activation='selu',kernel_initializer='he_uniform',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
#x = tfl.ELU(alpha=1)(x)
x = tfl.Dense(500,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
#x = tfl.ELU(alpha=1)(x)
print(x.shape)
x = tfl.Dense(200,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
x = tfl.Dense(50,activation='selu',kernel_constraint=maxnorm(5))(x)
x = tfl.Dropout(0.5)(x)
print(x.shape)
#x = tfl.ELU(alpha=1)(x)
output = tfl.Dense(1)(x)
model = tf.keras.Model(inputs=input_, outputs=output)
'''
```
1D-CONVNET + BI-LSTM , gave MSE=193
```
'''
def conv1D_full(inputs):
inputs=tf.expand_dims(inputs,axis=-1)
#print(inputs.shape)
x = Conv1D(filters=8, kernel_size=3, activation='selu', padding='same')(inputs)
x = Conv1D(filters=16, kernel_size=3, activation='selu', padding='same')(x)
#print(x.shape)
#x = layers.MaxPooling1D(2)(x)
# x = Conv1D(filters=32, kernel_size=3, activation='selu', padding='same')(x)
# x = Conv1D(filters=64, kernel_size=3, activation='selu', padding='same')(x)
# x = layers.MaxPooling1D(2)(x)
#x = layers.Flatten()(x)
#x = layers.Dense(200, activation='selu')(x)
#print(x.shape)
return x
'''
'''
def multiple_cnn1D(nb):
inputs = Input(shape=(30, nb))
outputs = conv1D_full(inputs[:,:,0])
for i in range(1,nb):
x_i = conv1D_full(inputs[:,:,i])
outputs = tf.concat([ outputs , x_i ] , 2 )
#print(outputs.shape)
x=Bidirectional(LSTM(64, return_sequences=True), input_shape=(outputs.shape[1],outputs.shape[2]))(outputs)
#print(x.shape)
x = Dropout(0.5)(x)
x = layers.Flatten()(x)
#print(x.shape)
x = layers.Dense(1000, activation='selu',kernel_initializer='he_uniform',kernel_constraint=maxnorm(5))(x)
x = Dropout(0.5)(x)
x = layers.Dense(500, activation='selu',kernel_constraint=maxnorm(5))(x)
x = Dropout(0.5)(x)
x = layers.Dense(100,activation='selu',kernel_constraint=maxnorm(5))(x)
x = Dropout(0.5)(x)
x = layers.Dense(20,activation='selu',kernel_constraint=maxnorm(5))(x)
x = Dropout(0.5)(x)
answer = layers.Dense(1)(x)
#print(answer.shape)
model = Model(inputs, answer)
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(loss='mse', optimizer=opt, metrics=[tf.keras.metrics.MeanSquaredError()])
print(model.summary())
#dot_img_file = 'model_LSTM.png'
#tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True, rankdir='LR')
return model
'''
#model = multiple_cnn1D(39)
tf.keras.utils.plot_model(model, show_shapes=True)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='mse',metrics=[tf.keras.metrics.MeanSquaredError()])
#over_fitting after 30 epochs
model.fit(x=X2_train, y=Y_train['Y2'].values, validation_split=0.1, epochs=25, batch_size=128)
pd.DataFrame(model.history.history).plot()
predict_y2 = model.predict(X2_test)
predict_y2
predict_y2.shape
```
# Simple Model
```
X1_train = X_train.groupby('UNIQUE_IDENTIFIER').mean()
X1_test = test_onehot.groupby('UNIQUE_IDENTIFIER').mean()
X1_train.head()
scaler1 = StandardScaler()
X1_scaled = scaler1.fit_transform(X1_train)
X1_test_scaled = scaler1.transform(X1_test)
input1 = tfl.Input(shape=(X1_scaled.shape[1]))
#1st layer
x = tfl.Dense(256, kernel_initializer="glorot_normal", kernel_regularizer="l2")(input1)
x = tfl.BatchNormalization()(x)
x = tfl.ELU(alpha=1)(x)
x = tfl.Dropout(0.5)(x)
# 2nd layer
x = tfl.Dense(128, kernel_initializer="glorot_normal", kernel_regularizer="l2")(x)
x = tfl.BatchNormalization()(x)
x = tfl.ELU(alpha=1)(x)
x = tfl.Dropout(0.5)(x)
# # 3rd layer
# x = tfl.Dense(64, kernel_initializer="glorot_normal", kernel_regularizer="l2")(x)
# x = tfl.BatchNormalization()(x)
# x = tfl.ELU(alpha=1)(x)
# #4th layer
# x = tfl.Dense(32, kernel_initializer="glorot_normal", kernel_regularizer="l2")(x)
# x = tfl.BatchNormalization()(x)
# x = tfl.ELU(alpha=1)(x)
# 5th layer
output1 = tfl.Dense(1, kernel_initializer="glorot_normal", kernel_regularizer="l2")(x)
model1 = tf.keras.Model(inputs=input1, outputs=output1)
model1.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss="mse")
model1.fit(x=X1_scaled, y=Y_train['Y1'].values, epochs=100, batch_size=512, validation_split=0.1)
pd.DataFrame(model1.history.history).plot()
predict_y1 = model1.predict(X1_test_scaled)
predict_y1
customer_index = test_onehot['UNIQUE_IDENTIFIER'].unique()
customer_index = np.expand_dims(customer_index, axis=1)
customer_index
predictions = pd.DataFrame(np.hstack((customer_index, predict_y1, predict_y2)), columns=['UNIQUE_IDENTIFIER', 'Y1', 'Y2'])
predictions
predictions['UNIQUE_IDENTIFIER'] = predictions['UNIQUE_IDENTIFIER'].astype("int64")
predictions
predict=pd.DataFrame(model.predict(X2_train))
predict1=pd.DataFrame(model1.predict(X1_scaled))
pred=pd.concat([predict, predict1], axis=1)
exact=pd.DataFrame(pred)
from sklearn.metrics import mean_squared_error
m1 = np.sqrt(mean_squared_error(Y_train.iloc[:,1],exact.iloc[:,0]))
m2 = np.sqrt(mean_squared_error(Y_train.iloc[:,0],exact.iloc[:,1]))
print((m1+m2)/2)
predictions.to_csv("1D_Bi_LSTM_61_modified.csv", index=False)
```
| github_jupyter |
### Домашняя работа №3
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, confusion_matrix, log_loss
from sklearn.pipeline import Pipeline, make_pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from scipy.sparse import hstack
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class ColumnSelector(BaseEstimator, TransformerMixin):
"""
Transformer to select a single column from the data frame to perform additional transformations on
"""
def __init__(self, key):
self.key = key
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.key]
class NumberSelector(BaseEstimator, TransformerMixin):
"""
Transformer to select a single column from the data frame to perform additional transformations on
Use on numeric columns in the data
"""
def __init__(self, key):
self.key = key
def fit(self, X, y=None):
return self
def transform(self, X):
return X[[self.key]]
class OHEEncoder(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
self.columns = []
def fit(self, X, y=None):
self.columns = [col for col in pd.get_dummies(X, prefix=self.key).columns]
return self
def transform(self, X):
X = pd.get_dummies(X, prefix=self.key)
test_columns = [col for col in X.columns]
for col_ in test_columns:
if col_ not in self.columns:
X[col_] = 0
return X[self.columns]
```
В рамках конкурса вам нужно предсказать наличие сердечно-сосудистых заболеваний по результатам классического врачебного осмотра. Датасет сформирован из 100.000 реальных клинических анализов, и в нём используются признаки, которые можно разбить на 3 группы:
Объективные признаки:
- Возраст
- Рост
- Вес
- Пол
Результаты измерения:
- Артериальное давление верхнее и нижнее
- Холестерин
- Глюкоза
Субъективные признаки:
- Курение
- Употребление Алкоголя
- Физическая активность
Возраст дан в днях. Значения показателей холестерина и глюкозы представлены одним из трех классов: норма, выше нормы, значительно выше нормы. Значения субъективных признаков — бинарны.
Все показатели даны на момент осмотра.
```
from google.colab import drive
drive.mount('/gdrive')
df = pd.read_csv('train_case2.csv', ';')
df.head()
#разделим данные на train/test
X_train, X_test, y_train, y_test = train_test_split(df.drop('cardio', 1),
df['cardio'], random_state=0)
```
К полям:
- gender, cholesterol применим OHE-кодирование
- age, height, weight, ap_hi, ap_lo - standardScaler
- gluc, smoke, alco, active - оставим пока как есть
```
continuos_cols = ['age', 'height', 'weight', 'ap_hi', 'ap_lo']
cat_cols = ['gender', 'cholesterol']
base_cols = ['gluc', 'smoke', 'alco', 'active']
continuos_transformers = []
cat_transformers = []
base_transformers = []
for cont_col in continuos_cols:
transfomer = Pipeline([
('selector', NumberSelector(key=cont_col)),
('standard', StandardScaler())
])
continuos_transformers.append((cont_col, transfomer))
for cat_col in cat_cols:
cat_transformer = Pipeline([
('selector', ColumnSelector(key=cat_col)),
('ohe', OHEEncoder(key=cat_col))
])
cat_transformers.append((cat_col, cat_transformer))
for base_col in base_cols:
base_transformer = Pipeline([
('selector', NumberSelector(key=base_col))
])
base_transformers.append((base_col, base_transformer))
```
Теперь объединим все наши трансформеры с помощью FeatureUnion
```
feats = FeatureUnion(continuos_transformers+cat_transformers+base_transformers)
feature_processing = Pipeline([('feats', feats)])
feature_processing.fit_transform(X_train)
```
## LogisticRegression
```
classifier = Pipeline([
('features',feats),
('classifier', LogisticRegression(random_state = 42)),
])
#запустим кросс-валидацию
cv_scores = cross_val_score(classifier, X_train, y_train, cv=7, scoring='roc_auc')
cv_score = np.mean(cv_scores)
cv_score_std = np.std(cv_scores)
print('CV score is {}+-{}'.format(cv_score, cv_score_std))
#обучим пайплайн на всем тренировочном датасете
classifier.fit(X_train, y_train)
y_score = classifier.predict_proba(X_test)[:, 1]
```
Посчитаем precision/recall/f_score
```
b=1
precision, recall, thresholds = precision_recall_curve(y_test.values, y_score)
fscore = (1+b**2)*(precision * recall) / (b**2*precision + recall)
# locate the index of the largest f score
ix = np.argmax(fscore)
print('Best Threshold=%f, F-Score=%.3f, Precision=%.3f, Recall=%.3f' % (thresholds[ix],
fscore[ix],
precision[ix],
recall[ix]))
```
Нарисуем roc auc кривую (кстати, наверное неплохо бы ее вынести в отдельную функцию)
```
sns.set(font_scale=1.5)
sns.set_color_codes("muted")
plt.figure(figsize=(10, 8))
fpr, tpr, thresholds_ = roc_curve(y_test, y_score, pos_label=1)
lw = 2
plt.plot(fpr, tpr, lw=lw, label='ROC curve ')
plt.plot([0, 1], [0, 1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.savefig("ROC.png")
plt.show()
roc_auc_score_lr = roc_auc_score(y_true=y_test, y_score=classifier.predict_proba(X_test)[:,1])
log_loss_lr = log_loss(y_true=y_test, y_pred=classifier.predict_proba(X_test)[:,1])
print("roc auc score: {}".format(roc_auc_score_lr))
print("log loss score: {}".format(log_loss_lr))
```
Посомтрим на матрицу ошибок
```
#мы уже нашли ранее "оптимальный" порог, когда максимизировали f_score
font = {'size' : 15}
plt.rc('font', **font)
cnf_matrix = confusion_matrix(y_test, y_score>thresholds[ix])
plt.figure(figsize=(10, 8))
plot_confusion_matrix(cnf_matrix, classes=['cardio_0', 'cardio_1'],
title='Confusion matrix')
plt.savefig("conf_matrix.png")
plt.show()
```
Посчитаем FPR, TPR
```
TN = cnf_matrix[0][0]
FN = cnf_matrix[1][0]
TP = cnf_matrix[1][1]
FP = cnf_matrix[0][1]
TPR = TP/(TP+FN)
FPR = FP/(FP+TN)
TNR = TN/(FP+TN)
TPR, FPR, TNR
```
False Positive Rate довольно высокий ~ 0.45.
Это означает, что 45 процентов всех пациентов получат метку 1 при том, что они на самом деле здоровы
### Random Forest
```
classifier_tree = Pipeline([
('features',feats),
('classifier', RandomForestClassifier(random_state = 42)),
])
#запустим кросс-валидацию
cv_scores_tree = cross_val_score(classifier_tree, X_train, y_train, cv=7, scoring='roc_auc')
cv_score_tree = np.mean(cv_scores_tree)
cv_score_std_tree = np.std(cv_scores_tree)
print('CV score is {}+-{}'.format(cv_score_tree, cv_score_std_tree))
#обучим пайплайн на всем тренировочном датасете
classifier_tree.fit(X_train, y_train)
y_score_tree = classifier_tree.predict_proba(X_test)[:, 1]
b=2
precision_tree, recall_tree, thresholds_tree = precision_recall_curve(y_test.values, y_score_tree)
fscore_tree = (1+b**2)*(precision_tree * recall_tree) / (b**2*precision_tree + recall_tree)
# locate the index of the largest f score
ix_tree = np.argmax(fscore_tree)
print('Best Threshold=%f, F-Score=%.3f, Precision=%.3f, Recall=%.3f' % (thresholds_tree[ix_tree],
fscore_tree[ix_tree],
precision_tree[ix_tree],
recall_tree[ix_tree]))
sns.set(font_scale=1.5)
sns.set_color_codes("muted")
plt.figure(figsize=(10, 8))
fpr, tpr, thresholds_tree = roc_curve(y_test, y_score_tree, pos_label=1)
lw = 2
plt.plot(fpr, tpr, lw=lw, label='ROC curve ')
plt.plot([0, 1], [0, 1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.savefig("ROC.png")
plt.show()
roc_auc_score_tree = roc_auc_score(y_true=y_test, y_score=classifier_tree.predict_proba(X_test)[:,1])
log_loss_tree = log_loss(y_true=y_test, y_pred=classifier_tree.predict_proba(X_test)[:,1])
print("roc auc score: {}".format(roc_auc_score_tree))
print("log loss score: {}".format(log_loss_tree))
font = {'size' : 15}
plt.rc('font', **font)
cnf_matrix_tree = confusion_matrix(y_test, y_score>thresholds_tree[ix_tree])
plt.figure(figsize=(10, 8))
plot_confusion_matrix(cnf_matrix_tree, classes=['cardio_0', 'cardio_1'],
title='Confusion matrix')
plt.savefig("conf_matrix_tree.png")
plt.show()
TN_tree = cnf_matrix_tree[0][0]
FN_tree = cnf_matrix_tree[1][0]
TP_tree = cnf_matrix_tree[1][1]
FP_tree = cnf_matrix_tree[0][1]
TPR_tree = TP_tree/(TP_tree+FN_tree)
FPR_tree = FP_tree/(FP_tree+TN_tree)
TNR_tree = TN_tree/(FP_tree+TN_tree)
TPR_tree, FPR_tree, TNR_tree
classifier_ada = Pipeline([
('features',feats),
('classifier', AdaBoostClassifier(random_state = 42)),
])
#запустим кросс-валидацию
cv_scores_ada = cross_val_score(classifier_ada, X_train, y_train, cv=7, scoring='roc_auc')
cv_score_ada = np.mean(cv_scores_ada)
cv_score_std_ada = np.std(cv_scores_ada)
print('CV score is {}+-{}'.format(cv_score_ada, cv_score_std_ada))
#обучим пайплайн на всем тренировочном датасете
classifier_ada.fit(X_train, y_train)
y_score_ada = classifier_ada.predict_proba(X_test)[:, 1]
b=1
precision_ada, recall_ada, thresholds_ada = precision_recall_curve(y_test.values, y_score_ada)
fscore_ada = (1+b**2)*(precision_ada * recall_ada) / (b**2*precision_ada + recall_ada)
# locate the index of the largest f score
ix_ada = np.argmax(fscore_ada)
print('Best Threshold=%f, F-Score=%.3f, Precision=%.3f, Recall=%.3f' % (thresholds_ada[ix_ada],
fscore_ada[ix_ada],
precision_ada[ix_ada],
recall_ada[ix_ada]))
sns.set(font_scale=1.5)
sns.set_color_codes("muted")
plt.figure(figsize=(10, 8))
fpr, tpr, thresholds_ada = roc_curve(y_test, y_score_ada, pos_label=1)
lw = 2
plt.plot(fpr, tpr, lw=lw, label='ROC curve ')
plt.plot([0, 1], [0, 1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.savefig("ROC.png")
plt.show()
roc_auc_score_ada = roc_auc_score(y_true=y_test, y_score=classifier_ada.predict_proba(X_test)[:,1])
log_loss_ada = log_loss(y_true=y_test, y_pred=classifier_ada.predict_proba(X_test)[:,1])
print("roc auc score: {}".format(roc_auc_score_ada))
print("log loss score: {}".format(log_loss_ada))
font = {'size' : 15}
plt.rc('font', **font)
cnf_matrix_ada = confusion_matrix(y_test, y_score>thresholds_ada[ix_ada])
plt.figure(figsize=(10, 8))
plot_confusion_matrix(cnf_matrix_ada, classes=['cardio_0', 'cardio_1'],
title='Confusion matrix')
plt.savefig("conf_matrix_ada.png")
plt.show()
TN_ada = cnf_matrix_ada[0][0]
FN_ada = cnf_matrix_ada[1][0]
TP_ada = cnf_matrix_ada[1][1]
FP_ada = cnf_matrix_ada[0][1]
TPR_ada = TP_ada/(TP_ada+FN_ada)
FPR_ada = FP_ada/(FP_ada+TN_ada)
TNR_ada = TN_ada/(FP_ada+TN_ada)
TPR_ada, FPR_ada, TNR_ada
print(f'\t\tLogisticRegression \n')
print('F-Score=%.3f, Precision=%.3f, Recall=%.3f, ROC AUC=%.3f' % (fscore[ix],
precision[ix],
recall[ix], roc_auc_score_lr))
print(f'TPR {TPR} FPR {FPR} TNR {TNR}')
print(f'\n\t\tRandomForest \n')
print('F-Score=%.3f, Precision=%.3f, Recall=%.3f, ROC AUC=%.3f' % (fscore_tree[ix_tree],
precision_tree[ix_tree],
recall_tree[ix_tree], roc_auc_score_tree))
print(f'TPR {TPR_tree} FPR {FPR_tree} TNR {TNR_tree}')
print(f'\n\t\tAdaBoosting \n')
print('F-Score=%.3f, Precision=%.3f, Recall=%.3f, ROC AUC=%.3f' % (fscore_ada[ix_ada],
precision_ada[ix_ada],
recall_ada[ix_ada], roc_auc_score_ada))
print(f'TPR {TPR_ada} FPR {FPR_ada} TNR {TNR_ada}')
```
Делая вывод по данной работе, хочу сказать, что по моему мнению самая лучшая модель - модель обученная с помощью AdaBoosting. RandomForest выдает немного странные результаты, не понимаю почему идет такой сильный дисбаланс классов ответа.
При сильном дисбалансе классов roc_auc_curve подходит больше, потому что True Positive Rate и False Positive Rate устойчивы при несбалансированных данных.
```
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.