text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Structure Data Example: Automobile dataset
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
```
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# We're using pandas to read the CSV file. This is easy for small datasets, but for large and complex datasets,
# tensorflow parsing and processing functions are more powerful
import pandas as pd
import numpy as np
# TensorFlow
import tensorflow as tf
print('please make sure that version >= 1.2:')
print(tf.__version__)
print('@monteirom: I made changes so it also works with 1.1.0 that is the current pip install version')
print('@monteirom: The lines that were changed have @1.2 as comment')
# Layers that will define the features
#
# real_value_column: real values, float32
# sparse_column_with_hash_bucket: Use this when your sparse features are in string or integer format,
# but you don't have a vocab file that maps each value to an integer ID.
# output_id = Hash(input_feature_string) % bucket_size
# sparse_column_with_keys: Look up logic is as follows:
# lookup_id = index_of_feature_in_keys if feature in keys else default_value.
# You should use this when you know the vocab file for the feature
# one_hot_column: Creates an _OneHotColumn for a one-hot or multi-hot repr in a DNN.
# The input can be a _SparseColumn which is created by `sparse_column_with_*`
# or crossed_column functions
from tensorflow.contrib.layers import real_valued_column, sparse_column_with_keys, sparse_column_with_hash_bucket
from tensorflow.contrib.layers import one_hot_column
```
# Please Download
**https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
And move it to data/**
**So: data/imports-85.data is expected to exist!**
# Preparing the data
```
# The CSV file does not have a header, so we have to fill in column names.
names = [
'symboling',
'normalized-losses',
'make',
'fuel-type',
'aspiration',
'num-of-doors',
'body-style',
'drive-wheels',
'engine-location',
'wheel-base',
'length',
'width',
'height',
'curb-weight',
'engine-type',
'num-of-cylinders',
'engine-size',
'fuel-system',
'bore',
'stroke',
'compression-ratio',
'horsepower',
'peak-rpm',
'city-mpg',
'highway-mpg',
'price',
]
# We also have to specify dtypes.
dtypes = {
'symboling': np.int32,
'normalized-losses': np.float32,
'make': str,
'fuel-type': str,
'aspiration': str,
'num-of-doors': str,
'body-style': str,
'drive-wheels': str,
'engine-location': str,
'wheel-base': np.float32,
'length': np.float32,
'width': np.float32,
'height': np.float32,
'curb-weight': np.float32,
'engine-type': str,
'num-of-cylinders': str,
'engine-size': np.float32,
'fuel-system': str,
'bore': np.float32,
'stroke': np.float32,
'compression-ratio': np.float32,
'horsepower': np.float32,
'peak-rpm': np.float32,
'city-mpg': np.float32,
'highway-mpg': np.float32,
'price': np.float32,
}
# Read the file.
df = pd.read_csv('data/imports-85.data', names=names, dtype=dtypes, na_values='?')
# Some rows don't have price data, we can't use those.
df = df.dropna(axis='rows', how='any', subset=['price'])
```
## Dealing with NaN
There are many approaches possibles for NaN values in the data, here we just changing it to " " or 0 depending of the data type. This is the simplest way, but for sure is not the best in most cases, so in practice you should try some other ways to use the NaN data. Some approaches are:
* use the mean of the row
* use the mean of the column
* if/else substituion (e.g if a lot of NaN do this, else do this other thing)
* ...
* google others
```
# Fill missing values in continuous columns with zeros instead of NaN.
float_columns = [k for k,v in dtypes.items() if v == np.float32]
df[float_columns] = df[float_columns].fillna(value=0., axis='columns')
# Fill missing values in continuous columns with '' instead of NaN (NaN mixed with strings is very bad for us).
string_columns = [k for k,v in dtypes.items() if v == str]
df[string_columns] = df[string_columns].fillna(value='', axis='columns')
```
## Standardize features
```
# We have too many variables let's just use some of them
df = df[['num-of-doors','num-of-cylinders', 'horsepower', 'make', 'price', 'length', 'height', 'width']]
# Since we're possibly dealing with parameters of different units and scales. We'll need to rescale our data.
# There are two main ways to do it:
# * Normalization, which scales all numeric variables in the range [0,1].
# Example:
# * Standardization, it will then transform it to have zero mean and unit variance.
# Example:
# Which is better? It deppends of your data and your features.
# But one disadvantage of normalization over standardization is that it loses
# some information in the data. Since normalization loses more info it can make harder
# for gradient descent to converse, so we'll use standardization.
# In practice: please analyse your data and see what gives you better results.
def std(x):
return (x - x.mean()) / x.std()
before = df.length[0]
df.length = std(df.length)
df.width = std(df.width)
df.height = std(df.height)
df.horsepower = std(df.horsepower)
after = df.length[0]
print('before:', before, 'after:', after)
```
## Separating training data from testing data
```
TRAINING_DATA_SIZE = 160
TEST_DATA_SIZE = 10
LABEL = 'price'
# Split the data into a training set, eval set and test set
training_data = df[:TRAINING_DATA_SIZE]
eval_data = df[TRAINING_DATA_SIZE: TRAINING_DATA_SIZE + TEST_DATA_SIZE]
test_data = df[TRAINING_DATA_SIZE + TEST_DATA_SIZE:]
# Separate input features from labels
training_label = training_data.pop(LABEL)
eval_label = eval_data.pop(LABEL)
test_label = test_data.pop(LABEL)
```
# Using Tensorflow
## Defining input function
```
BATCH_SIZE = 64
# Make input function for training:
# num_epochs=None -> will cycle through input data forever
# shuffle=True -> randomize order of input data
training_input_fn = tf.estimator.inputs.pandas_input_fn(x=training_data,
y=training_label,
batch_size=BATCH_SIZE,
shuffle=True,
num_epochs=None)
# Make input function for evaluation:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=eval_data,
y=eval_label,
batch_size=BATCH_SIZE,
shuffle=False)
# Make input function for testing:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=test_data,
y=test_label,
batch_size=1,
shuffle=False)
```
## Defining a Linear Estimator
```
# Describe how the model should interpret the inputs. The names of the feature columns have to match the names
# of the series in the dataframe.
# @1.2.0 tf.feature_column.numeric_column -> tf.contrib.layers.real_valued_column
horsepower = real_valued_column('horsepower')
width = real_valued_column('width')
height = real_valued_column('height')
length = real_valued_column('length')
# @1.2.0 tf.feature_column.categorical_column_with_hash_bucket -> tf.contrib.layers.sparse_column_with_hash_bucket
make = sparse_column_with_hash_bucket('make', 50)
# @1.2.0 tf.feature_column.categorical_column_with_vocabulary_list -> tf.contrib.layers.sparse_column_with_keys
fuel_type = sparse_column_with_keys('fuel-type', keys=['diesel', 'gas'])
num_of_doors = sparse_column_with_keys('num-of-doors', keys=['two', 'four'])
num_of_cylinders = sparse_column_with_keys('num-of-cylinders', ['eight', 'five', 'four', 'six', 'three', 'twelve', 'two'])
linear_features = [horsepower, make, num_of_doors, num_of_cylinders, length, width, height]
regressor = tf.contrib.learn.LinearRegressor(feature_columns=linear_features, model_dir='tensorboard/linear_regressor/')
```
## Training
```
regressor.fit(input_fn=training_input_fn, steps=10000)
```
## Evaluating
```
regressor.evaluate(input_fn=eval_input_fn)
```
## Predicting
```
preds = list(regressor.predict(input_fn=eval_input_fn))
for i in range(TEST_DATA_SIZE):
print('prediction:', preds[i], 'real value:', test_label.iloc[i])
```
## Defining a DNN Estimator
```
# @1.2.0 tf.feature_column.indicator_column -> tf.contrib.layers.one_hot_column(tf.contrib.layers.sparse_column_with_keys(...))
dnn_features = [
#numerical features
length, width, height, horsepower,
# densify categorical features:
one_hot_column(make),
one_hot_column(num_of_doors)
]
dnnregressor = tf.contrib.learn.DNNRegressor(feature_columns=dnn_features,
hidden_units=[50, 30, 10], model_dir='tensorboard/DNN_regressor/')
```
## Training
```
dnnregressor.fit(input_fn=training_input_fn, steps=10000)
```
## Evaluating
```
dnnregressor.evaluate(input_fn=eval_input_fn)
```
## Predicting
```
preds = list(dnnregressor.predict(input_fn=eval_input_fn))
for i in range(TEST_DATA_SIZE):
print('prediction:', preds[i], 'real value:', test_label.iloc[i])
```
### Creating an Experiment
```
# @1.2.0 experiment_fn(run_config, params) - > experiment_fn(output_dir)
def experiment_fn(output_dir):
# This function makes an Experiment, containing an Estimator and inputs for training and evaluation.
# You can use params and config here to customize the Estimator depending on the cluster or to use
# hyperparameter tuning.
# Collect information for training
# @1.2.0 config=run_config -> ''
return tf.contrib.learn.Experiment(estimator=tf.contrib.learn.LinearRegressor(
feature_columns=linear_features, model_dir=output_dir),
train_input_fn=training_input_fn,
train_steps=10000,
eval_input_fn=eval_input_fn)
import shutil
# @1.2.0 tf.contrib.learn.learn_runner(exp, run_config=tf.contrib.learn.RunConfig(model_dir="/tmp/output_dir")
# -> tf.contrib.learn.python.learn.learm_runner.run(exp, output_dir='/tmp/output_dir')
shutil.rmtree("/tmp/output_dir", ignore_errors=True)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn, output_dir='/tmp/output_dir')
```
| github_jupyter |
<h6>Summary<span class="summary"></span></h6><a href="#code.eq"><span class="module-name-text-summary" style="white-space: nowrap;">code.eq</span></a><span class="module-name-text-summary">, </span><a href="#code.nat"><span class="module-name-text-summary" style="white-space: nowrap;">code.nat</span></a><span class="module-name-text-summary">, </span><a href="#code.product"><span class="module-name-text-summary" style="white-space: nowrap;">code.product</span></a><span class="module-name-text-summary">, </span><a href="#code.neg"><span class="module-name-text-summary" style="white-space: nowrap;">code.neg</span></a><span class="module-name-text-summary">, </span><a href="#code.bool"><span class="module-name-text-summary" style="white-space: nowrap;">code.bool</span></a><span class="module-name-text-summary">, </span><a href="#code.bool.op"><span class="module-name-text-summary" style="white-space: nowrap;">code.bool.op</span></a><span class="module-name-text-summary">, </span><a href="#code.nat.leq"><span class="module-name-text-summary" style="white-space: nowrap;">code.nat.leq</span></a><span class="module-name-text-summary">, </span><a href="#code.bool.leq"><span class="module-name-text-summary" style="white-space: nowrap;">code.bool.leq</span></a><span class="module-name-text-summary">, </span><a href="#code.decidable"><span class="module-name-text-summary" style="white-space: nowrap;">code.decidable</span></a><span class="module-name-text-summary">, </span><a href="#code.nat.leq.decidable"><span class="module-name-text-summary" style="white-space: nowrap;">code.nat.leq.decidable</span></a><span class="module-name-text-summary">, </span><a href="#code.nat.leq.decidable"><span class="module-name-text-summary" style="white-space: nowrap;">code.nat.leq.decidable</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.env"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.env</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.env"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.env</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.sem"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.sem</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.sem"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.sem</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.test"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.test</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.small-steps"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.small-steps</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.termination"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.termination</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.termination"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.termination</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.size"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.size</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.normalisation"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.normalisation</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.normalisation"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.normalisation</span></a><span class="module-name-text-summary">, </span><a href="#code.aexp.big-steps"><span class="module-name-text-summary" style="white-space: nowrap;">code.aexp.big-steps</span></a><span class="module-name-text-summary">, </span><a href="#code.bexp"><span class="module-name-text-summary" style="white-space: nowrap;">code.bexp</span></a><span class="module-name-text-summary">, </span><a href="#code.bexp.sem"><span class="module-name-text-summary" style="white-space: nowrap;">code.bexp.sem</span></a><span class="module-name-text-summary">, </span><a href="#code.bexp.sem"><span class="module-name-text-summary" style="white-space: nowrap;">code.bexp.sem</span></a><span class="module-name-text-summary">, </span><a href="#code.imp"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.det"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.det</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.det"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.det</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.loop"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.loop</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.loop"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.loop</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.trans"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.trans</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.trans"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.trans</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.chains"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.chains</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.lemma1"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.lemma1</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.lemma1"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.lemma1</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.big2small"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.big2small</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.big2small"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.big2small</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-vs-big-try"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-vs-big-try</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.gas"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.gas</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.lemma2"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.lemma2</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.lemma2"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.lemma2</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.skip"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.skip</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small-steps.skip"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small-steps.skip</span></a><span class="module-name-text-summary">, </span><a href="#code.wf"><span class="module-name-text-summary" style="white-space: nowrap;">code.wf</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small2big"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small2big</span></a><span class="module-name-text-summary">, </span><a href="#code.imp.small2big"><span class="module-name-text-summary" style="white-space: nowrap;">code.imp.small2big</span></a>
````
module code.eq where
infix 4 _≡_
data _≡_ {A : Set} (x : A) : A → Set where
refl : x ≡ x
{-# BUILTIN EQUALITY _≡_ #-}
sym : {A : Set} {x y : A} → x ≡ y → y ≡ x
sym refl = refl
trans : {A : Set} {x y z : A} → x ≡ y → y ≡ z → x ≡ z
trans refl refl = refl
cong : {A B : Set} (f : A → B) {x y : A} → x ≡ y → f x ≡ f y
cong f refl = refl
subst : ∀ {A : Set} {x y : A} (P : A → Set) → x ≡ y → P x → P y
subst P refl px = px
-- equational reasoning
infix 1 begin_
infixr 2 _≡⟨⟩_ _≡⟨_⟩_
infix 3 _∎
begin_ : {A : Set} {x y : A} → x ≡ y → x ≡ y
begin x≡y = x≡y
_≡⟨⟩_ : {A : Set} (x : A) {y : A} → x ≡ y → x ≡ y
x ≡⟨⟩ x≡y = x≡y
_≡⟨_⟩_ : {A : Set} (x : A) {y z : A} → x ≡ y → y ≡ z → x ≡ z
x ≡⟨ x≡y ⟩ y≡z = trans x≡y y≡z
_∎ : {A : Set} (x : A) → x ≡ x
x ∎ = refl
````
##### Natural numbers
````
module code.nat where
open import code.eq public
data ℕ : Set where
zero : ℕ
suc : ℕ → ℕ
{-# BUILTIN NATURAL ℕ #-}
infixl 5 _+_
_+_ : ℕ → ℕ → ℕ
zero + m = m
(suc n) + m = suc (n + m)
infixl 6 _*_
_*_ : ℕ → ℕ → ℕ
zero * m = zero
(suc n) * m = m + (n * m)
postulate +-comm : (m n : ℕ) → m + n ≡ n + m
+-comm-auto : ∀ {m n} → m + n ≡ n + m
+-comm-auto {m} {n} = +-comm m n
infix 4 _≤_
data _≤_ : ℕ → ℕ → Set where
0≤n : {n : ℕ} → 0 ≤ n
s≤s : {m n : ℕ} → m ≤ n → suc m ≤ suc n
≤-trans : ∀ {o m n} → o ≤ m → m ≤ n → o ≤ n
≤-trans 0≤n m≤n = 0≤n
≤-trans (s≤s o≤m) (s≤s m≤n) = s≤s (≤-trans o≤m m≤n)
≤-+-left : ∀ {m1 m2} → m1 ≤ m1 + m2
≤-+-left {zero} = 0≤n
≤-+-left {suc m1} = s≤s ≤-+-left
≤-+-right : ∀ {m1 m2} → m2 ≤ m1 + m2
≤-+-right {m1} {m2} = subst (m2 ≤_) (sym (+-comm m1 m2)) (≤-+-left {m2} {m1})
suc-lemma : (m n : ℕ) → suc (m + n) ≡ m + suc n
suc-lemma 0 n =
suc (0 + n) ≡⟨⟩
suc n ≡⟨⟩
0 + suc n ∎
suc-lemma (suc m) n =
suc (suc m + n) ≡⟨⟩
suc (suc (m + n)) ≡⟨ cong suc (suc-lemma m n) ⟩
suc (m + suc n) ≡⟨⟩
suc m + suc n ∎
-- Agda 2.5.3 has a problem with these ones
-- Agda 2.6 works OK
postulate ≤-suc2 : ∀ {m n} → suc m ≤ n → m ≤ n
--≤-suc2 sm≤n = ≤-trans ≤-+-right sm≤n
postulate ≤-+-cong-1 : ∀ {a b c : ℕ} → a ≤ c → a + b ≤ c + b
--≤-+-cong-1 0≤n = ≤-+-right
--≤-+-cong-1 (s≤s a≤c) = s≤s (≤-+-cong-1 a≤c)
postulate ≤-+-cong-2 : ∀ {a b c : ℕ} → b ≤ c → a + b ≤ a + c
{-≤-+-cong-2 {a} {b} {c} b≤c with +-comm b a | +-comm c a
... | p | q =
subst (a + b ≤_) q
(subst (_≤ c + a) p (≤-+-cong-1 {b = a} b≤c)) -}
postulate ≤-+-cong : ∀ {a b c d : ℕ} → a ≤ c → b ≤ d → a + b ≤ c + d
{- ≤-+-cong a≤c b≤d = ≤-trans (≤-+-cong-1 a≤c) (≤-+-cong-2 b≤d) -}
infix 4 _<_
_<_ : ℕ → ℕ → Set
m < n = suc m ≤ n
infix 4 _>_
_>_ : ℕ → ℕ → Set
m > n = n < m
````
##### Dependent product
````
module code.product where
infixr 4 _,_
record Σ {l} (A : Set l) (B : A → Set) : Set l where
constructor _,_
field
fst : A
snd : B fst
-- non-dependent product
infixr 2 _∧_
_∧_ : Set → Set → Set
A ∧ B = Σ A (λ _ → B)
infix 0 thereExists
thereExists : ∀ {l} {A : Set l} (B : A → Set) → Set l
thereExists {_} {A} B = Σ A B
syntax thereExists (λ x → B) = ∃[ x ] B
````
##### Negation
````
module code.neg where
data ⊥ : Set where
⊥-elim : {A : Set} → ⊥ → A
⊥-elim ()
infix 3 ¬_ -- higher priority than ∨ and ∧
¬_ : Set → Set
¬ A = A → ⊥
````
# Introduction
In this tutorial we study a simple imperative language.
We begin by introducing two tools which will be used in the following:
First, we define the domain of [boolean values](#Boolean-values) and some common boolean functions.
Then, we show that the ordering `_≤_` on natural numbers is a [decidable](#Decidables) property.
We then introduce a simple language of [arithmetic](#Arithmetic-expressions) and [boolean expressions](#Boolean-expressions).
For arithmetic expressions, we consider [denotational](#Exercise:-⟦_⟧A_)
and operational semantics, both [small](#*Small-steps-operational-semantics) and [big-steps](#*Big-steps-operational-semantics).
We then turn our attention to a simple language of [imperative programs](#Imperative-programs). The main result will be to establish that its [big-steps](#Big-steps-operational-semantics) and [small-steps](#Small-steps-operational-semantics) operational semantics are equivalent.
# Boolean values
````
module code.bool where
````
We define the domain of boolean values,
which will be useful later in order to give semantics to boolean expressions.
````
data 𝔹 : Set where
true : 𝔹
false : 𝔹
````
## **Exercise**: Computation on `𝔹`
Define the semantic operations of conjunction, disjunction, and negation on boolean values.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.bool.op where
open import code.bool public
infix 5 _&&_
_&&_ : 𝔹 → 𝔹 → 𝔹
true && true = ?
true && false = ?
false && true = ?
false && false = ?
infix 6 _||_
_||_ : 𝔹 → 𝔹 → 𝔹
b || c = ?
infix 4 ~_
~_ : 𝔹 → 𝔹
~ b = ?
```
# Decidables
## Order on `ℕ`
Consider again the inductive definition of `≤` for natural numbers:
```agda
--data _≤_ : ℕ → ℕ → Set where
--0≤n : {n : ℕ} → 0 ≤ n
--s≤s : {m n : ℕ} → m ≤ n → suc m ≤ suc n
```
<!--The essence of this definition is that evidence of `m ≤ n`
can be used to produce evidence of `suc m ≤ suc n`. -->
While the definition of `≤` can be used to establish `m ≤ n`
for concrete numbers `m` and `n`,
it does not directly yield an algorithm for checking whether `m ≤ n` holds.
(And for a good reason. As we will see below, inductive definitions such as `≤` can encode two-counter Minsky machines, and therefore such relations are undecidable in general.)
### **Exercise**: `≤ᵇ`
Write a program that returns `true` or `false` depending on whether `m ≤ n` holds or not.
*Hint:* Proceed by recursion on both arguments.
```
module code.nat.leq where
open import code.nat public
open import code.bool public
infix 15 _≤ᵇ_
_≤ᵇ_ : ℕ → ℕ → 𝔹
m ≤ᵇ n = ?
```
The function `_≤ᵇ_` *computes* the answer to whether `m ≤ n` holds.
This means that `m ≤ n` is a *decidable* property of the natural numbers.
However, `_≤ᵇ_` does not itself compute a *proof* that `m ≤ n` holds,
and in fact there may well be bugs in the definition of `_≤ᵇ_`.
## Decidable properties
We show that we can develop `_≤ᵇ_` and the proof of its correctness in a single definition.
````
module code.decidable where
open import code.neg public
open import code.nat public
````
We can combine a decision procedure for a property
together with the construction of the evidence that the property holds
with the introduction of the class of *decidable properties*.
````
data Dec (A : Set) : Set where
yes : A → Dec A
no : ¬ A → Dec A
````
The intuition is that evidence for `Dec A` has one of two possible forms:
Either it is of the form `yes p` where `p` is a proof that `A` holds,
or it is of the form `no ¬p` where `¬p` is a proof that `¬ A` holds.
The crucial improvement is the introduction of evidence also when the property does not hold.
### **Exercise:** `_≤_` is decidable
Show that the order `_≤_` on natural number is decidable.
This is an instance of *internal verification*,
whereby an algorithm and its proof of correctness are developed together.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.nat.leq.decidable where
open import code.decidable public
≤-suc : {m n : ℕ} → suc m ≤ suc n → m ≤ n
≤-suc (s≤s x) = x
_≤?_ : (m n : ℕ) → Dec (m ≤ n)
0 ≤? _ = ?
(suc _) ≤? 0 = ?
(suc m) ≤? (suc n) with m ≤? n
... | yes m≤n = ?
... | no ¬m≤n = ?
```
# Arithmetic expressions
````
{-# OPTIONS --allow-unsolved-metas #-}
module code.aexp where
open import code.eq public
open import code.nat public
open import code.decidable public
open import code.product public
````
We develop an eager denotational and operational semantics
for a simple language of arithmetic expressions,
and we prove that they agree.
## Variables
We represent *variable names* as natural number.
Any countable domain with decidable equality (such as strings) would work here
````
Var = ℕ
````
### Decidable equality
We show that variables have a decidable equality,
which boils down to show that the same property holds for natural numbers.
````
var-inv : ∀ {x y} → suc x ≡ suc y → x ≡ y
var-inv refl = refl
infix 5 _≡Var?_
_≡Var?_ : (x y : Var) → Dec (x ≡ y)
0 ≡Var? 0 = yes refl
0 ≡Var? suc _ = no λ ()
suc _ ≡Var? 0 = no λ ()
suc x ≡Var? suc y with x ≡Var? y
... | yes refl = yes refl
... | no neq = no λ eq → neq (var-inv eq)
````
## Syntax of expressions
We define a minimalistic language of arithmetic expressions
comprising variables and a let assignment construct.
````
data AExp : Set where
num : ℕ → AExp
var : Var → AExp
plus : AExp → AExp → AExp
let-exp : Var → AExp → AExp → AExp
````
For example,
the following expression adds one to a variable (called 10).
This is pure syntax so far, no calculation is being performed.
````
add-one : AExp
add-one = plus (var 10) (num 1)
````
## Environments
In order to represent the value of free variables,
we use environments.
````
Env = Var → ℕ
````
The following environment assigns value `200` to the variable named `10`,
and value `40` to every other variable.
````
ρ0 : Env
ρ0 10 = 200
ρ0 _ = 40
````
### **Exercise**: `Env` updates
Given an environment `ρ`, a variable `x` and a new value `m` for this variable,
let `ρ [ x ↦ m ]` be a new environment which assigns `m` to `x`,
and agrees with `ρ` elsewhere.
Complete the definition for the environment update function `_[_↦_]`.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.aexp.env where
open import code.aexp public
infixr 30 _[_↦_]
_[_↦_] : Env → Var → ℕ → Env
(ρ [ x ↦ m ]) y with x ≡Var? y
... | yes _ = ?
... | no _ = ?
```
## **Exercise**: `⟦_⟧A_`
We are now ready to give denotational semantics to arithmetic expressions.
Complete the definition of `⟦_⟧A_` below.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.aexp.sem where
open import code.aexp.env public
infix 10 ⟦_⟧A_
⟦_⟧A_ : AExp → Env → ℕ
⟦ e ⟧A ρ = ?
```
The solution is provided in order to be able to continue with the code below even without solving the exercise above.
````
module code.aexp.sem where
open import code.aexp.env public
infix 10 ⟦_⟧A_
⟦_⟧A_ : AExp → Env → ℕ
⟦ num n ⟧A ρ = n
⟦ var x ⟧A ρ = ρ x
⟦ plus e f ⟧A ρ = ⟦ e ⟧A ρ + ⟦ f ⟧A ρ
⟦ let-exp x e f ⟧A ρ = ⟦ f ⟧A (ρ [ x ↦ ⟦ e ⟧A ρ ])
````
With our denotational semantics for expressions we can check (by computation) the value of concrete expressions.
````
module code.aexp.test where
open import code.aexp.sem public
_ : ⟦ add-one ⟧A ρ0 ≡ 201
_ = refl
````
## *Small-steps operational semantics
````
module code.aexp.small-steps where
open import code.aexp.sem public
````
We use global environments and eager semantics.
````
infix 4 _⊢_↝_
data _⊢_↝_ : Env → AExp → AExp → Set where
↝-var : ∀ {ρ x} →
ρ ⊢ var x ↝ num (ρ x)
↝-plus-stop : ∀ {ρ m n} →
ρ ⊢ plus (num m) (num n) ↝ num (m + n)
↝-plus-left : ∀ {ρ e f e'} →
ρ ⊢ e ↝ e' →
ρ ⊢ plus e f ↝ plus e' f
↝-plus-right : ∀ {ρ e f f'} →
ρ ⊢ f ↝ f' →
ρ ⊢ plus e f ↝ plus e f'
↝-let-stop : ∀ {ρ x m n} →
ρ ⊢ let-exp x (num m) (num n) ↝ num n
↝-let-1 : ∀ {ρ x m f f'} →
ρ [ x ↦ m ] ⊢ f ↝ f' →
ρ ⊢ let-exp x (num m) f ↝ let-exp x (num m) f'
↝-let-2 : ∀ {ρ x e e' f} →
ρ ⊢ e ↝ e' →
ρ ⊢ let-exp x e f ↝ let-exp x e' f
````
### Preservation
````
↝-resp-⟦⟧ : ∀ {ρ e f} → ρ ⊢ e ↝ f → ⟦ e ⟧A ρ ≡ ⟦ f ⟧A ρ
↝-resp-⟦⟧ {ρ} .{var _} .{_} ↝-var = refl
↝-resp-⟦⟧ {ρ} .{plus (num _) (num _)} .{num _} ↝-plus-stop = refl
↝-resp-⟦⟧ {ρ} {plus e f} {plus e' .f} (↝-plus-left step) =
cong (_+ ⟦ f ⟧A ρ) (↝-resp-⟦⟧ {ρ} {e} {e'} step)
↝-resp-⟦⟧ {ρ} {plus e f} {plus .e f'} (↝-plus-right step) =
cong (⟦ e ⟧A ρ +_) (↝-resp-⟦⟧ {ρ} {f} {f'} step)
↝-resp-⟦⟧ {ρ} .{let-exp _ (num _) (num _)} .{num _} ↝-let-stop = refl
↝-resp-⟦⟧ {ρ} {let-exp x (num m) e} {let-exp .x (num .m) f} (↝-let-1 step) =
↝-resp-⟦⟧ {ρ [ x ↦ m ]} {e} {f} step
↝-resp-⟦⟧ {ρ} {let-exp x e f} {let-exp .x e' .f} (↝-let-2 step)
with ↝-resp-⟦⟧ {ρ} {e} {e'} step
... | eq = cong (λ m → ⟦ f ⟧A (ρ [ x ↦ m ])) eq
````
### Transitive closure
We define the transitive closure of the small-step operational semantics.
````
data _⊢_↝*_ : Env → AExp → AExp → Set where
stop : ∀ {ρ e} → ρ ⊢ e ↝* e
one : ∀ {ρ e f g} → ρ ⊢ e ↝ f → ρ ⊢ f ↝* g → ρ ⊢ e ↝* g
````
We can indeed show that `_⊢_↝*_` is transitive with a standard induction.
````
↝*-trans : ∀ {ρ e f g} → ρ ⊢ e ↝* f → ρ ⊢ f ↝* g → ρ ⊢ e ↝* g
↝*-trans stop d2 = d2
↝*-trans (one step d1) d2 = one step (↝*-trans d1 d2)
````
An easy induction based on `↝-resp-⟦⟧`
shows that the denotational semantics is preserved by the transitive closure.
````
↝*-resp-⟦⟧ : ∀ {ρ e f} → ρ ⊢ e ↝* f → ⟦ e ⟧A ρ ≡ ⟦ f ⟧A ρ
↝*-resp-⟦⟧ {ρ} {e} {.e} stop = refl
↝*-resp-⟦⟧ {ρ} {e} {g} (one {f = f} step der) =
begin
⟦ e ⟧A ρ ≡⟨ ↝-resp-⟦⟧ {ρ} {e} {f} step ⟩
⟦ f ⟧A ρ ≡⟨ ↝*-resp-⟦⟧ {ρ} {f} {g} der ⟩
⟦ g ⟧A ρ
∎
````
This immediately implies that the small-step semantics,
if it terminates producing a number `m`,
then this is the right result.
````
↝*-agree-⟦⟧ : ∀ {ρ e m} → ρ ⊢ e ↝* num m → m ≡ ⟦ e ⟧A ρ
↝*-agree-⟦⟧ der = sym (↝*-resp-⟦⟧ der)
````
### Deterministic values
Notice that small-step semantics is a non-deterministic relation:
In general there may be several ways to reduce an expression
(as witnessed by the rules `↝-plus-left` and `↝-plus-right` for instance).
However, as an immediate consequence of preservation
we have that if two numerical values are eventually produced,
then they necessarily are the same number.
````
↝*-det : ∀ ρ e m n → ρ ⊢ e ↝* num m → ρ ⊢ e ↝* num n → m ≡ n
↝*-det ρ e m n der1 der2
with ↝*-resp-⟦⟧ der1 | ↝*-resp-⟦⟧ der2
... | eq1 | eq2 =
begin
m ≡⟨ sym eq1 ⟩
⟦ e ⟧A ρ ≡⟨ eq2 ⟩
n
∎
````
### Congruence
We show that the transitive closure `_⊢_↝*_` respects subexpressions.
````
↝*-plus-cong-1 : ∀ {ρ e e' f} → ρ ⊢ e ↝* e' → ρ ⊢ plus e f ↝* plus e' f
↝*-plus-cong-1 stop = stop
↝*-plus-cong-1 (one x d) = one (↝-plus-left x) (↝*-plus-cong-1 d)
↝*-plus-cong-2 : ∀ {ρ e f f'} → ρ ⊢ f ↝* f' → ρ ⊢ plus e f ↝* plus e f'
↝*-plus-cong-2 stop = stop
↝*-plus-cong-2 (one x d) = one (↝-plus-right x) (↝*-plus-cong-2 d)
↝*-let-cong-1 : ∀ {ρ x e e' f} →
ρ ⊢ e ↝* e' →
ρ ⊢ let-exp x e f ↝* let-exp x e' f
↝*-let-cong-1 stop = stop
↝*-let-cong-1 (one x d) = one (↝-let-2 x) (↝*-let-cong-1 d)
↝*-let-cong-2 : ∀ {ρ x m f f'} →
ρ [ x ↦ m ] ⊢ f ↝* f' →
ρ ⊢ let-exp x (num m) f ↝* let-exp x (num m) f'
↝*-let-cong-2 stop = stop
↝*-let-cong-2 (one x d) = one (↝-let-1 x) (↝*-let-cong-2 d)
````
### Relational reasoning
We introduce some syntactic sugaring to conveniently write on chains of small steps.
````
infixr 2 _↝*⟨⟩_ _↝*⟨_⟩_ _↝⟨_⟩_
infix 3 _↝*∎
_↝*⟨⟩_ : ∀ {ρ} e {f} → ρ ⊢ e ↝* f → ρ ⊢ e ↝* f
e ↝*⟨⟩ e↝*f = e↝*f
_↝*⟨_⟩_ : ∀ {ρ} e {f g} → ρ ⊢ e ↝* f → ρ ⊢ f ↝* g → ρ ⊢ e ↝* g
e ↝*⟨ e↝*f ⟩ f↝*g = ↝*-trans e↝*f f↝*g
_↝⟨_⟩_ : ∀ {ρ} e {f g} → ρ ⊢ e ↝ f → ρ ⊢ f ↝* g → ρ ⊢ e ↝* g
e ↝⟨ e↝f ⟩ f↝*g = e ↝*⟨ one e↝f stop ⟩ f↝*g
_↝*∎ : ∀ {ρ} e → ρ ⊢ e ↝* e
e ↝*∎ = stop
````
### **Exercise**: Termination
So far we have shown that the small-step semantics produces the right result,
*if it produces any result at all*.
In fact, an operational semantics that never reaches a numerical value
trivially satisfies this condition.
We prove that the small step operational semantics always reaches some numerical value after a finite number of steps. In other word, we prove below that the rewrite is weakly normalising.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.aexp.termination where
open import code.aexp.small-steps public
↝*-terminates : ∀ {ρ} → (e : AExp) → ρ ⊢ e ↝* num (⟦ e ⟧A ρ)
↝*-terminates (num n) = ?
↝*-terminates (var x) = ?
↝*-terminates {ρ} (plus e f) = ?
↝*-terminates {ρ} (let-exp x e f) = ?
```
### **Exercise**: Strong normalisation
````
module code.aexp.size where
open import code.aexp.small-steps public
````
The termination property above is also called *weak normalisation*,
which means that there exists a finite reduction sequence `e ↝* f`
reaching a normal form (i.e. a value) `f ≡ num m`.
We show below even *strong normalisation*, namely,
*every* reduction sequence starting from `e` is finite.
To this end, we introduce a notion of size of arithmetic expressions.
````
size : AExp → ℕ
size (num x) = 0
size (var x) = 1
size (plus e f) = 1 + size e + size f
size (let-exp x e f) = 1 + size e + size f
````
In the lemma below we show that the size of an expression decreases at each step, which implies strong normalisation.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.aexp.normalisation where
open import code.aexp.size public
↝*-SN : ∀ {ρ e f} → ρ ⊢ e ↝ f → size e > size f
↝*-SN ↝-var = ?
↝*-SN ↝-plus-stop = ?
↝*-SN (↝-plus-left de) = ?
↝*-SN (↝-plus-right de) = ?
↝*-SN ↝-let-stop = ?
↝*-SN (↝-let-1 de) = ?
↝*-SN (↝-let-2 de) = ?
```
## *Big-steps operational semantics
````
module code.aexp.big-steps where
open import code.aexp.sem public
````
````
infix 4 _,_⇒_
data _,_⇒_ : AExp → Env → ℕ → Set where
⇒-num : ∀ {n ρ} → num n , ρ ⇒ n
⇒-var : ∀ {x ρ} → var x , ρ ⇒ ρ x
⇒-plus : ∀ {e f m n ρ} → e , ρ ⇒ m → f , ρ ⇒ n → plus e f , ρ ⇒ m + n
⇒-let : ∀ {x e f m n ρ} → e , ρ ⇒ m → f , ρ [ x ↦ m ] ⇒ n
→ let-exp x e f , ρ ⇒ n
````
### Evaluation is deterministic
````
⇒-det : ∀ {e ρ m n} → e , ρ ⇒ m → e , ρ ⇒ n → m ≡ n
⇒-det ⇒-num ⇒-num = refl
⇒-det ⇒-var ⇒-var = refl
⇒-det (⇒-plus x x₁) (⇒-plus y y₁)
with ⇒-det x y | ⇒-det x₁ y₁
... | refl | refl = refl
⇒-det (⇒-let ⇒₁-e ⇒₁-f) (⇒-let ⇒₂-e ⇒₂-f)
with ⇒-det ⇒₁-e ⇒₂-e
... | refl
with ⇒-det ⇒₁-f ⇒₂-f
... | refl = refl
````
Note that in the `⇒-let` case we cannot perform the two with-abstractions in parallel because in order to apply the second one `⇒-det ⇒₁-f ⇒₂-f`
we need the result of the first one.
### Agreement of the semantics
The following lemma shows that the big-steps operational semantics agrees with the denotational semantics.
````
⇒-agree-⟦⟧ : ∀ {e ρ} → e , ρ ⇒ ⟦ e ⟧A ρ
⇒-agree-⟦⟧ {num x} = ⇒-num
⇒-agree-⟦⟧ {var x} = ⇒-var
⇒-agree-⟦⟧ {plus e e₁} = ⇒-plus ⇒-agree-⟦⟧ ⇒-agree-⟦⟧
⇒-agree-⟦⟧ {let-exp x e f} = ⇒-let ⇒-agree-⟦⟧ ⇒-agree-⟦⟧
````
# Boolean expressions
````
module code.bexp where
open import code.aexp.sem public
open import code.nat.leq.decidable public
open import code.bool.op public
````
We define a simple language of boolan expressions.
## Syntax of expressions
An element in `BExp` is a boolean combination
of atomic expressions of the form `leq e f`,
where `e` and `f` are arithmetic expressions.
````
data BExp : Set where
tt : BExp
ff : BExp
and : BExp → BExp → BExp
or : BExp → BExp → BExp
not : BExp → BExp
leq : AExp → AExp → BExp
````
## **Exercise**: `⟦_⟧B_`
Define the denotational semantics of boolean expressions.
*Hint:* In the `leq` case you will need `_≤?_`.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.bexp.sem where
open import code.bexp public
infix 10 ⟦_⟧B_
⟦_⟧B_ : BExp → Env → 𝔹
⟦ b ⟧B ρ = ?
```
# Imperative programs
````
module code.imp where
open import code.bexp.sem public
infixr 20 _⨟_
infix 25 _≔_
````
## Syntax of programs
We define a simple imperative language,
leveraging on arithmetic and boolean expressions defined above.
````
data Cmd : Set where
skip : Cmd
_≔_ : Var → AExp → Cmd
_⨟_ : Cmd → Cmd → Cmd
if_then_else_ : BExp → Cmd → Cmd → Cmd
while_do:_ : BExp → Cmd → Cmd
````
For example, the following is a valid program (piece of syntax).
````
loop : Cmd
loop = while tt do: skip
````
(We added a colon in the syntax of `while_do:_`
because `do` is a reserved keyword.)
## Denotational semantics
It may come as a surprise,
but it is not possible to define the semantics of imperative programs as an Agda function,
because Agda allows only terminating (i.e., total) functions.
On the other hand, imperative programs, such as `loop` above, may not terminate.
Therefore, we resort to an operational semantics.
## Big-steps operational semantics
We begin with the notion of state,
which is the same as environments as of now,
but needs not be.
````
State = Var → ℕ
````
The definition of the operational semantics of imperative programs
follows a case analysis.
````
infix 4 _,_⇒_
data _,_⇒_ : Cmd → State → State → Set where
````
### `skip`
The `skip` command terminates immediately without changing the state.
````
⇒-skip : ∀ {s} →
skip , s ⇒ s
````
### `x ≔ e`
The assignment command modifies the state
by updating the value of `x` to the value of `e` in the current state.
````
⇒-assign : ∀ {s x e} →
x ≔ e , s ⇒ s [ x ↦ ⟦ e ⟧A s ]
````
### `c ⨟ d`
Sequencing two commands amounts to thread the state change.
````
⇒-seq : ∀ {c d s s' s''} →
c , s ⇒ s' →
d , s' ⇒ s'' →
c ⨟ d , s ⇒ s''
````
### `if b then c else d`
For conditionals, there are two cases to consider,
depending on whether the condition evaluates to true or to false.
````
⇒-if-tt : ∀ {b s c s' d} →
⟦ b ⟧B s ≡ true →
c , s ⇒ s' →
if b then c else d , s ⇒ s'
⇒-if-ff : ∀ {b s c s' d} →
⟦ b ⟧B s ≡ false →
d , s ⇒ s' →
if b then c else d , s ⇒ s'
````
### `while b do: c`
Similarly, for while loops there are two cases to consider.
````
⇒-while-tt : ∀ {b s c s' s''} →
⟦ b ⟧B s ≡ true →
c , s ⇒ s' →
while b do: c , s' ⇒ s'' →
while b do: c , s ⇒ s''
⇒-while-ff : ∀ {b s c} →
⟦ b ⟧B s ≡ false →
while b do: c , s ⇒ s
````
This concludes the definition of the operational semantics ` c , s ⇒ s'`.
## **Exercise**: `do: c while b`
Extend the syntax and semantics of imperative programs
by adding a new construct
````
-- do: c while b
````
where `c` is a command and `b` a boolean expression.
The informal semantics of the do-while construct
is to first execute `c`,
and then evaluate the guard `b`:
If it evaluates to true, then we repeat the process,
otherwise we exit the loop.
## **Exercise**: Determinism
Since we cannot encode the semantics of imperative programs as a function,
we immediately lose some properties which are for free for functions.
One example is deterministic evaluation.
Consequently, we need to prove separately
that evaluation of programs is deterministic.
Show that evaluation of imperative programs is deterministic.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.det where
open import code.imp public
-- convenient to rule out some impossible cases.
false≡true : {A : Set} → false ≡ true → A
false≡true ()
⇒-det : ∀ {c s s' s''} → c , s ⇒ s' → c , s ⇒ s'' → s' ≡ s''
⇒-det ⇒-skip ⇒-skip = ?
⇒-det ⇒-assign ⇒-assign = ?
⇒-det (⇒-seq der1 der2) (⇒-seq der3 der4) = {! ⇒-det der1 der2 !}
⇒-det (⇒-if-tt _ der1) (⇒-if-tt _ der2) = ?
⇒-det (⇒-if-tt x _) (⇒-if-ff y _) = ?
⇒-det (⇒-if-ff x _) (⇒-if-tt y _) = ?
⇒-det (⇒-if-ff _ der1) (⇒-if-ff _ der2) = ?
⇒-det (⇒-while-tt x der1 der2) (⇒-while-tt y der3 der4) = ?
⇒-det (⇒-while-tt x _ _) (⇒-while-ff y) = ?
⇒-det (⇒-while-ff x) (⇒-while-tt y _ _) = ?
⇒-det (⇒-while-ff _) (⇒-while-ff _) = ?
```
## **Exercise**: `loop`
Show that the program `loop` introduced above never stops.
```
module code.imp.loop where
open import code.imp.det public
loop-⊥ : ∀ {s s'} → ¬ (loop , s ⇒ s')
loop-⊥ = ?
```
## Small-steps operational semantics
````
module code.imp.small-steps where
open import code.imp.det public
open import code.product public
````
We provide an alternative small-steps semantics for imperative programs.
### Configurations
````
Conf = Cmd ∧ State -- Cartesian product
````
We treat configurations of the form `skip , s` as final,
hence there is no rule for the `skip` command.
````
infix 3 _↝_
data _↝_ : Conf → Conf → Set where
````
### `x ≔ e`
In the rule for assignment,
we just evaluate the arithmetic expression `e`
and update the state accordingly.
(We could have used the operational semantics of arithmetic expressions here,
but we avoid it for simplicity.
A similar remark applies to boolean expressions below.)
````
↝-assign : ∀ {x e s} →
x ≔ e , s ↝ skip , s [ x ↦ ⟦ e ⟧A s ]
````
### `c ⨟ d`
In the case of sequencing `c ⨟ d`,
where are two cases.
In the case `↝-seq-left`, we evaluate one step of `c`.
In the case `↝-seq-right`, `c` has finished and we continue with `d`.
````
↝-seq-left : ∀ {c s c' s' d} →
c , s ↝ c' , s' →
c ⨟ d , s ↝ c' ⨟ d , s'
↝-seq-right : ∀ {d s} →
skip ⨟ d , s ↝ d , s
````
### `if b then c else d`
The conditional has two symmetric cases,
depending on whether the condition evaluates to `true` or `false`.
````
↝-if-tt : ∀ {b s c d} →
⟦ b ⟧B s ≡ true →
if b then c else d , s ↝ c , s
↝-if-ff : ∀ {b s c d} →
⟦ b ⟧B s ≡ false →
if b then c else d , s ↝ d , s
````
### `while b do: c`
The while looping construct also has two cases.
If the condition `b` evaluates to true,
then the command rewrites to `c ⨟ while b do: c`.
Otherwise, it terminates rewriting to `skip`.
````
↝-while-tt : ∀ {b c s} →
⟦ b ⟧B s ≡ true →
while b do: c , s ↝ c ⨟ while b do: c , s
↝-while-ff : ∀ {b c s} →
⟦ b ⟧B s ≡ false →
while b do: c , s ↝ skip , s
````
## Transitive closure `↝*`
In order to formalise this, we first need to be able to combine many small steps together, i.e., we take the transitive closure of `_,_↝_,_`.
````
infix 3 _↝*_
data _↝*_ : Conf → Conf → Set where
stop : ∀ {x} → x ↝* x
one : ∀ {x y z} → x ↝ y → y ↝* z → x ↝* z
````
### **Exercise**: Transitivity
Show that the relation `↝*` on configurations is indeed transitive.
*Hint*: Do induction on `der1`.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.small-steps.trans where
open import code.imp.small-steps public
↝*-trans : ∀ {x y z} → x ↝* y → y ↝* z → x ↝* z
↝*-trans der1 der2 = ?
```
### Notation for transitive closure
````
module code.imp.small-steps.chains where
open import code.imp.small-steps.trans public
````
The following suggestive notation will be useful for simplyfying carrying out proofs involving `↝*`.
They are entirely analogous to the chain of equalities constructed with `≡⟨⟩`.
````
infix 1 start_
infixr 2 _↝*⟨⟩_ _↝⟨_⟩_ _↝*⟨_⟩_
infix 3 _end
start_ : ∀ {x y} → x ↝* y → x ↝* y
start x↝*y = x↝*y
_↝*⟨⟩_ : ∀ x {y} → x ↝* y → x ↝* y
x ↝*⟨⟩ x↝*y = x↝*y
_↝⟨_⟩_ : ∀ x {y z} → x ↝ y → y ↝* z → x ↝* z
x ↝⟨ x↝y ⟩ y↝*z = one x↝y y↝*z
_↝*⟨_⟩_ : ∀ x {y z} → x ↝* y → y ↝* z → x ↝* z
x ↝*⟨ x↝*y ⟩ y↝*z = ↝*-trans x↝*y y↝*z
_end : ∀ x → x ↝* x
x end = stop
````
### **Exercise**: A lemma about `⨟`
Our objective is to show that small steps and big steps semantics agree.
Before showing this,
we need to prove a lemma
connecting sequencing `⨟` and `↝*`.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.small-steps.lemma1 where
open import code.imp.small-steps.chains public
⨟-lemma-1 : ∀ {c d s s' s''} →
c , s ↝* skip , s'' →
d , s'' ↝* skip , s' →
c ⨟ d , s ↝* skip , s'
⨟-lemma-1 stop der2 = ?
⨟-lemma-1 {c} {d} {s} {s'} {s''} (one {y = c' , s'''} step der1) der2 = ?
```
## **Exercise**: `big→small`
Once we have two alternative semantics for imperative programs,
the question arises as to whether they are equivalent.
We show that the big steps and the small steps operational semantics of imperative programs yield the same results.
There are two directions to show.
We begin in this section with the easier direction `big→small`:
the big step semantics implies the small step one.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.big2small where
open import code.imp.small-steps.lemma1 public
big→small : ∀ c s s' → c , s ⇒ s' → c , s ↝* skip , s'
big→small = ?
```
## From small to big steps: first attempt
````
module code.imp.small-vs-big-try where
open import code.imp.big2small public
````
We turn now our attention to the other direction of the equivalence between small and big steps operational semantics,
namely
````
small→big : ∀ c s s' → c , s ↝* skip , s' → c , s ⇒ s'
````
A natural starting point is the converse of `⨟-lemma-1` above.
````
postulate ⨟-lemma-2 : ∀ {c d s s'} → c ⨟ d , s ↝* skip , s' → ∃[ s'' ] c , s ↝* skip , s'' ∧ d , s'' ↝* skip , s'
````
However, it turns out that the statement `small→big` above
creates problems for the termination checker in first `while` case
(all the other cases go through):
````
small→big (while b do: c) s s' (one (↝-while-tt b≡true) ↝*-der)
with ⨟-lemma-2 ↝*-der
... | s'' , ↝*-der1 , ↝*-der2
with small→big c s s'' ↝*-der1 |
? {- small→big (while b do: c) s'' s' ↝*-der2 -}
... | ⇒-der1 | ⇒-der2 = ⇒-while-tt b≡true ⇒-der1 ⇒-der2
small→big _ _ _ _ = ?
````
The issue with the commented code above
is that no argument of the call
````
-- small→big (while b do: c) s'' s' ↝*-der2
````
is structurally smaller than the original call.
What is missing here is an argument to convince the termination checker
that `↝*-der2` is "smaller" than ` ↝*-der`.
In order to formalise this, we need to refine the `↝*` relation
in order to take into account the *number* of derivation steps.
## Transitive closure with gas
````
module code.imp.small-steps.gas where
open import code.imp.big2small public
````
The idea is to enrich the transitive closure relation
with the information about how many derivation steps have been made so far.
````
infix 3 _↝*_#_
data _↝*_#_ : Conf → Conf → ℕ → Set where
stop : ∀ {x} → x ↝* x # 0
one : ∀ {x y z n} → x ↝ y → y ↝* z # n → x ↝* z # suc n
````
In the base case `stop` we terminate the computation immediately with `0` number of steps,
and in the inductive case `one` we add an extra step
to the number of steps `n` performed inductively.
### **Exercise**: A second lemma about `⨟`
We can now prove the converse of `⨟-lemma-1` above
in the richer setting offered by `_↝*_#_`.
(Also `⨟-lemma-1` can be generalised to `_↝*_#_`,
but we won't need it here.)
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.small-steps.lemma2 where
open import code.imp.small-steps.gas public
⨟-lemma-2 : ∀ {c d s s' m} →
c ⨟ d , s ↝* skip , s' # m →
∃[ s'' ] ∃[ m1 ] ∃[ m2 ]
c , s ↝* skip , s'' # m1 ∧
d , s'' ↝* skip , s' # m2 ∧
suc (m1 + m2) ≡ m
⨟-lemma-2 (one (↝-seq-left step) ↝-der) = ?
⨟-lemma-2 {s = s} (one {n = n} ↝-seq-right ↝-der) = ?
```
### **Exercise**: `skip` and `↝*`
Show that executing the `skip` command necessarily terminates in `0` steps.
*Hint*: Convince Agda that only the case `stop` needs to be considered.
```
{-# OPTIONS --allow-unsolved-metas #-}
module code.imp.small-steps.skip where
open import code.imp.small-steps.gas public
↝*-skip : ∀ {s c s' m} → skip , s ↝* c , s' # m → c ≡ skip ∧ s ≡ s' ∧ m ≡ 0
↝*-skip der = ?
```
## Well-founded induction
````
module code.wf where
open import code.nat public
````
We now need a [technique](https://stackoverflow.com/questions/19642921/assisting-agdas-termination-checker) to convince the termination checker of Agda
that recursively calling `small→big` on derivations of strictly smaller length will eventually terminate.
This is called *well-founded induction*
(a.k.a. *complete induction*, *course of values induction*, or *strong induction*),
a general technique that can be applied to many other settings.
First of all we define a data structure that expresses the fact that a natural number is *accessible*.
````
data Acc (n : ℕ) : Set
Pre : ℕ → Set
data Acc n where
acc : Pre n → Acc n
Pre n = ∀ m → m < n → Acc m
````
Intuitively, a number `n` is accessible
if every strictly smaller number `m < n` is also accessible.
In this way, well-founded induction becomes a simple *structural induction*
on the evidence that `Acc n` holds.
We are ready to state and prove that the strict order `_<_` is *well-founded*:
````
<-wf : ∀ n → Acc n
<-wf zero = acc λ _ ()
<-wf (suc n) with <-wf n
... | acc f = acc g
where
g : Pre (suc n)
g zero _ = acc λ _ ()
g (suc m) (s≤s m<n) = acc λ o o≤m → f o (≤-trans o≤m m<n)
````
<!--In the base case, `zero` is trivially accessible since it has no predecessors.
In the inductive step, we have to show that `suc n` is accessible,
and we inductively assume that `n` is accessible.-->
## **Exercise**: `small→big`
We are now ready to prove that the small step semantics implies the big step one.
```
module code.imp.small2big where
open import code.imp.small-steps.skip public
open import code.imp.small-steps.lemma2 public
open import code.wf public
small→big : ∀ c s s' n → c , s ↝* skip , s' # n → c , s ⇒ s'
small→big c s s' n ↝*-der = go c s s' n ↝*-der (<-wf n) where
go : ∀ c s s' n → c , s ↝* skip , s' # n → Acc n → c , s ⇒ s'
go = ?
```
| github_jupyter |
___
<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
___
# Python Crash Course Exercises
This is an optional exercise to test your understanding of Python Basics. If you find this extremely challenging, then you probably are not ready for the rest of this course yet and don't have enough programming experience to continue. I would suggest you take another course more geared towards complete beginners, such as [Complete Python Bootcamp](https://www.udemy.com/complete-python-bootcamp/?couponCode=PY20)
## Exercises
Answer the questions or complete the tasks outlined in bold below, use the specific method described if applicable.
** What is 7 to the power of 4?**
** Split this string:**
s = "Hi there Sam!"
**into a list. **
** Given the variables:**
planet = "Earth"
diameter = 12742
** Use .format() to print the following string: **
The diameter of Earth is 12742 kilometers.
```
planet = "Earth"
diameter = 12742
```
** Given this nested list, use indexing to grab the word "hello" **
```
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
```
** Given this nested dictionary grab the word "hello". Be prepared, this will be annoying/tricky **
```
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
```
** What is the main difference between a tuple and a list? **
```
# Tuple is immutable
```
** Create a function that grabs the email website domain from a string in the form: **
user@domain.com
**So for example, passing "user@domain.com" would return: domain.com**
```
domainGet('user@domain.com')
```
** Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization. **
```
findDog('Is there a dog here?')
```
** Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases. **
```
countDog('This dog runs faster than the other dog dude!')
```
** Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example:**
seq = ['soup','dog','salad','cat','great']
**should be filtered down to:**
['soup','salad']
```
seq = ['soup','dog','salad','cat','great']
```
### Final Problem
**You are driving a little too fast, and a police officer stops you. Write a function
to return one of 3 possible results: "No ticket", "Small ticket", or "Big Ticket".
If your speed is 60 or less, the result is "No Ticket". If speed is between 61
and 80 inclusive, the result is "Small Ticket". If speed is 81 or more, the result is "Big Ticket". Unless it is your birthday (encoded as a boolean value in the parameters of the function) -- on your birthday, your speed can be 5 higher in all
cases. **
```
def caught_speeding(speed, is_birthday):
pass
caught_speeding(81,True)
caught_speeding(81,False)
```
# Great job!
| github_jupyter |
```
import uuid
import json
roiIndex = 1
cellIndex = 1
def get_annotation(filename):
with open(filename) as f:
data = json.load(f)
f.close()
return data['regions']
def get_rois(regions,tagGroup,formatAnnotationTagsLookup):
rois = []
index = 0
global roiIndex
for region in regions:
if region['tags'][0].startswith(tagGroup):
try:
formatAnnotationTags = formatAnnotationTagsLookup[str(region['tags'][0])]
except KeyError as ke:
formatAnnotationTags = ""
rois.append({
#"annotationId": region['id'],
"annotationTags": formatAnnotationTags,
"extractionMethod": "NUMERIC_CLASSIFICATION",
"roiId": str(roiIndex),
"index": index,
"rect": {
"top": int(region['boundingBox']['top']),
"left": int(region['boundingBox']['left']),
"bottom": int(region['boundingBox']['top']) + int(region['boundingBox']['height']),
"right": int(region['boundingBox']['left']) + int(region['boundingBox']['width'])
}
})
index = index + 1
roiIndex = roiIndex +1
return rois
def get_cells(regions,tagGroups,formatLookup,formatNameLookup):
cells_data = []
renderIndex = 1
global cellIndex
for tagGroup in tagGroups:
try:
formatValue = formatLookup[str(tagGroup)]
formatName = formatNameLookup[str(tagGroup)]
except KeyError as ke:
formatValue = ""
formatName = ""
cells_data.append({
"cellId": str(cellIndex),
"rois": get_rois(regions,tagGroup,formatAnnotationTagsLookup),
"render": {
"index": renderIndex
},
"format": {
"name": formatName,
"value": formatValue
},
"validate": {
"regExp": ""
}
})
renderIndex = renderIndex +1
cellIndex = cellIndex +1
return cells_data
def get_layout(cells):
layout_data = []
layout_data.append({
"layout": {
"version": "1.0",
"name": "MULTISUBJECT1S10Q Exam Sheet Form",
"cells": cells
}
})
return layout_data[0]
def pp_json(json_thing, sort=True, indents=4):
if type(json_thing) is str:
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
return None
regions=get_annotation("MULTISUBJECT1S10Q_vottraw.json")
#regions
tagGroups = ["ROLLNUMBER", "HINDIID1","HINDIID2","HINDIID3","HINDIID4","HINDIID5","HINDIID6","HINDIID7","HINDIID8","HINDIID9","HINDIIDTEN","ENGLISHID1","ENGLISHID2","ENGLISHID3","ENGLISHID4","ENGLISHID5","ENGLISHID6","ENGLISHID7","ENGLISHID8","ENGLISHID9","ENGLISHIDTEN","MATHSID1","MATHSID2","MATHSID3","MATHSID4","MATHSID5","MATHSID6","MATHSID7","MATHSID8","MATHSID9","MATHSIDTEN","EVSID1","EVSID2","EVSID3","EVSID4","EVSID5","EVSID6","EVSID7","EVSID8","EVSID9","EVSIDTEN","SCIENCEID1","SCIENCEID2","SCIENCEID3","SCIENCEID4","SCIENCEID5","SCIENCEID6","SCIENCEID7","SCIENCEID8","SCIENCEID9","SCIENCEIDTEN"]
#rois=get_rois(regions,tagGroups[0])
formatLookup = {
'ROLLNUMBER': 'ROLLNUMBER',
'HINDIID1':'HINDI 1',
'HINDIID2':'HINDI 2',
'HINDIID3':'HINDI 3',
'HINDIID4':'HINDI 4',
'HINDIID5':'HINDI 5',
'HINDIID6':'HINDI 6',
'HINDIID7':'HINDI 7',
'HINDIID8':'HINDI 8',
'HINDIID9':'HINDI 9',
'HINDIIDTEN':'HINDI 10',
'ENGLISHID1':'ENGLISH 1',
'ENGLISHID2':'ENGLISH 2',
'ENGLISHID3':'ENGLISH 3',
'ENGLISHID4':'ENGLISH 4',
'ENGLISHID5':'ENGLISH 5',
'ENGLISHID6':'ENGLISH 6',
'ENGLISHID7':'ENGLISH 7',
'ENGLISHID8':'ENGLISH 8',
'ENGLISHID9':'ENGLISH 9',
'ENGLISHIDTEN':'ENGLISH 10',
'MATHSID1': 'MATHS 1',
'MATHSID2':'MATHS 2',
'MATHSID3':'MATHS 3',
'MATHSID4':'MATHS 4',
'MATHSID5':'MATHS 5',
'MATHSID6':'MATHS 6',
'MATHSID7':'MATHS 7',
'MATHSID8':'MATHS 8',
'MATHSID9':'MATHS 9',
'MATHSIDTEN':'MATHS 10',
'EVSID1': 'EVS 1',
'EVSID2':'EVS 2',
'EVSID3':'EVS 3',
'EVSID4':'EVS 4',
'EVSID5':'EVS 5',
'EVSID6':'EVS 6',
'EVSID7':'EVS 7',
'EVSID8':'EVS 8',
'EVSID9':'EVS 9',
'EVSIDTEN':'EVS 10',
'SCIENCEID1': 'SCIENCE 1',
'SCIENCEID2':'SCIENCE 2',
'SCIENCEID3':'SCIENCE 3',
'SCIENCEID4':'SCIENCE 4',
'SCIENCEID5':'SCIENCE 5',
'SCIENCEID6':'SCIENCE 6',
'SCIENCEID7':'SCIENCE 7',
'SCIENCEID8':'SCIENCE 8',
'SCIENCEID9':'SCIENCE 9',
'SCIENCEIDTEN': 'SCIENCE 10'
}
formatNameLookup = {
'ROLLNUMBER': 'ROLLNUMBER',
'HINDIID1':'HINDIID1',
'HINDIID2':'HINDIID2',
'HINDIID3':'HINDIID3',
'HINDIID4':'HINDIID4',
'HINDIID5':'HINDIID5',
'HINDIID6':'HINDIID6',
'HINDIID7':'HINDIID7',
'HINDIID8':'HINDIID8',
'HINDIID9':'HINDIID9',
'HINDIIDTEN':'HINDIID10',
'ENGLISHID1':'ENGLISHID1',
'ENGLISHID2':'ENGLISHID2',
'ENGLISHID3':'ENGLISHID3',
'ENGLISHID4':'ENGLISHID4',
'ENGLISHID5':'ENGLISHID5',
'ENGLISHID6':'ENGLISHID6',
'ENGLISHID7':'ENGLISHID7',
'ENGLISHID8':'ENGLISHID8',
'ENGLISHID9':'ENGLISHID9',
'ENGLISHIDTEN':'ENGLISHID10',
'MATHSID1': 'MATHSID1',
'MATHSID2':'MATHSID2',
'MATHSID3':'MATHSID3',
'MATHSID4':'MATHSID4',
'MATHSID5':'MATHSID5',
'MATHSID6':'MATHSID6',
'MATHSID7':'MATHSID7',
'MATHSID8':'MATHSID8',
'MATHSID9':'MATHSID9',
'MATHSIDTEN':'MATHSID10',
'EVSID1': 'EVSID1',
'EVSID2':'EVSID2',
'EVSID3':'EVSID3',
'EVSID4':'EVSID4',
'EVSID5':'EVSID5',
'EVSID6':'EVSID6',
'EVSID7':'EVSID7',
'EVSID8':'EVSID8',
'EVSID9':'EVSID9',
'EVSIDTEN':'EVSID10',
'SCIENCEID1': 'SCIENCEID1',
'SCIENCEID2':'SCIENCEID2',
'SCIENCEID3':'SCIENCEID3',
'SCIENCEID4':'SCIENCEID4',
'SCIENCEID5':'SCIENCEID5',
'SCIENCEID6':'SCIENCEID6',
'SCIENCEID7':'SCIENCEID7',
'SCIENCEID8':'SCIENCEID8',
'SCIENCEID9':'SCIENCEID9',
'SCIENCEIDTEN': 'SCIENCEID10'
}
formatAnnotationTagsLookup = {
'ROLLNUMBER1': 'ROLLNUMBERID1',
'ROLLNUMBER2': 'ROLLNUMBERID2',
'ROLLNUMBER3': 'ROLLNUMBERID3',
'HINDIID1':'HINDIID1',
'HINDIID2':'HINDIID2',
'HINDIID3':'HINDIID3',
'HINDIID4':'HINDIID4',
'HINDIID5':'HINDIID5',
'HINDIID6':'HINDIID6',
'HINDIID7':'HINDIID7',
'HINDIID8':'HINDIID8',
'HINDIID9':'HINDIID9',
'HINDIIDTEN':'HINDIID10',
'ENGLISHID1':'ENGLISHID1',
'ENGLISHID2':'ENGLISHID2',
'ENGLISHID3':'ENGLISHID3',
'ENGLISHID4':'ENGLISHID4',
'ENGLISHID5':'ENGLISHID5',
'ENGLISHID6':'ENGLISHID6',
'ENGLISHID7':'ENGLISHID7',
'ENGLISHID8':'ENGLISHID8',
'ENGLISHID9':'ENGLISHID9',
'ENGLISHIDTEN':'ENGLISHID10',
'MATHSID1': 'MATHSID1',
'MATHSID2':'MATHSID2',
'MATHSID3':'MATHSID3',
'MATHSID4':'MATHSID4',
'MATHSID5':'MATHSID5',
'MATHSID6':'MATHSID6',
'MATHSID7':'MATHSID7',
'MATHSID8':'MATHSID8',
'MATHSID9':'MATHSID9',
'MATHSIDTEN':'MATHSID10',
'EVSID1': 'EVSID1',
'EVSID2':'EVSID2',
'EVSID3':'EVSID3',
'EVSID4':'EVSID4',
'EVSID5':'EVSID5',
'EVSID6':'EVSID6',
'EVSID7':'EVSID7',
'EVSID8':'EVSID8',
'EVSID9':'EVSID9',
'EVSIDTEN':'EVSID10',
'SCIENCEID1': 'SCIENCEID1',
'SCIENCEID2':'SCIENCEID2',
'SCIENCEID3':'SCIENCEID3',
'SCIENCEID4':'SCIENCEID4',
'SCIENCEID5':'SCIENCEID5',
'SCIENCEID6':'SCIENCEID6',
'SCIENCEID7':'SCIENCEID7',
'SCIENCEID8':'SCIENCEID8',
'SCIENCEID9':'SCIENCEID9',
'SCIENCEIDTEN': 'SCIENCEID10'
}
cells=get_cells(regions,tagGroups,formatLookup,formatNameLookup)
pp_json(get_layout(cells),False)
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/11.2.Pretrained_NER_Profiling_Pipelines.ipynb)
```
import json, os
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys)
# Installing pyspark and spark-nlp
! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
import json
import os
from pyspark.ml import Pipeline,PipelineModel
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
import sparknlp
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
```
# Clinical NER Model Profiling Pretrained Pipeline
This pipeline can be used to explore all the available pretrained NER models at once. When you run this pipeline over your text, you will end up with the predictions coming out of each pretrained clinical NER model trained with `embeddings_clinical`.
|Clinical NER Model List|
|-|
|ner_ade_clinical|
|ner_posology_greedy|
|ner_risk_factors|
|jsl_ner_wip_clinical|
|ner_human_phenotype_gene_clinical|
|jsl_ner_wip_greedy_clinical|
|ner_cellular|
|ner_cancer_genetics|
|jsl_ner_wip_modifier_clinical|
|ner_drugs_greedy|
|ner_deid_sd_large|
|ner_diseases|
|nerdl_tumour_demo|
|ner_deid_subentity_augmented|
|ner_jsl_enriched|
|ner_genetic_variants|
|ner_bionlp|
|ner_measurements_clinical|
|ner_diseases_large|
|ner_radiology|
|ner_deid_augmented|
|ner_anatomy|
|ner_chemprot_clinical|
|ner_posology_experimental|
|ner_drugs|
|ner_deid_sd|
|ner_posology_large|
|ner_deid_large|
|ner_posology|
|ner_deidentify_dl|
|ner_deid_enriched|
|ner_bacterial_species|
|ner_drugs_large|
|ner_clinical_large|
|jsl_rd_ner_wip_greedy_clinical|
|ner_medmentions_coarse|
|ner_radiology_wip_clinical|
|ner_clinical|
|ner_chemicals|
|ner_deid_synthetic|
|ner_events_clinical|
|ner_posology_small|
|ner_anatomy_coarse|
|ner_human_phenotype_go_clinical|
|ner_jsl_slim|
|ner_jsl|
|ner_jsl_greedy|
|ner_events_admission_clinical|
You can check [Models Hub](https://nlp.johnsnowlabs.com/models) page for more information about all these models and more.
```
from sparknlp.pretrained import PretrainedPipeline
clinical_profiling_pipeline = PretrainedPipeline("ner_profiling_clinical", "en", "clinical/models")
text = '''A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation , associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting .'''
clinical_result = clinical_profiling_pipeline.annotate(text)
clinical_result.keys()
print("Clinical Text: \n", text, "\n \nResults:\n")
for i in clinical_result.keys():
print("{} : ".format(i), clinical_result[i])
```
**Lets show chunk results of NER models in a pandas dataframe.**
```
import pandas as pd
def get_chunk_results(light_result):
for i in light_result.keys():
model_name = "_".join(i.split("_")[:-1])
if (i == 'sentence') | (i == 'token'):
continue
if ("_chunks" not in i):
continue
elif len(light_result[i]) == 0:
print("\n", "*"*20, model_name, "Model Results", "*"*20)
print("No Result For This Model")
else:
sentences = []
begins = []
ends = []
chunks = []
entities = []
confidences = []
for n in (light_result[i]):
sentences.append(n.metadata['sentence'])
begins.append(n.begin)
ends.append(n.end)
chunks.append(n.result)
entities.append(n.metadata['entity'])
confidences.append(n.metadata['confidence'])
df = pd.DataFrame({'sentence':sentences, 'begin': begins, 'end': ends, 'chunks': chunks, 'entity': entities, 'confidence': confidences})
print("\n \n", "*"*20, model_name, "Model Results", "*"*20)
display(df)
text2 = """
At this time, chest tube placement for drainage of the fluid occurred and thoracoscopy with fluid biopsies, which were performed, which revealed epithelioid malignant mesothelioma.
The patient was then stained with a PET CT, which showed extensive uptake in the chest, bilateral pleural pericardial effusions, and lymphadenopathy.
She also had acidic fluid, pectoral and intramammary lymph nodes and uptake in L4 with SUV of 4. This was consistent with stage III disease. Her repeat echocardiogram showed an ejection fraction of 45% to 49%.
She was transferred to Oncology service and started on chemotherapy on September 1, 2007 with cisplatin 75 mg/centimeter squared equaling 109 mg IV piggyback over 2 hours on September 1, 2007
"""
clinical_full_result = clinical_profiling_pipeline.fullAnnotate(text2)[0]
clinical_full_result['ner_jsl_chunks']
print("Clinical Text:\n", text2)
get_chunk_results(clinical_full_result)
```
**Now we will show all NER labels of tokens in the same dataframe.**
```
import pandas as pd
def get_token_results(light_result):
tokens = [j.result for j in light_result["token"]]
sentences = [j.metadata["sentence"] for j in light_result["token"]]
begins = [j.begin for j in light_result["token"]]
ends = [j.end for j in light_result["token"]]
model_list = [ a for a in light_result.keys() if (a not in ["sentence", "token"] and "_chunks" not in a)]
df = pd.DataFrame({'sentence':sentences, 'begin': begins, 'end': ends, 'token':tokens})
for model_name in model_list:
temp_df = pd.DataFrame(light_result[model_name])
temp_df["jsl_label"] = temp_df.iloc[:,0].apply(lambda x : x.result)
temp_df = temp_df[["jsl_label"]]
# temp_df = get_ner_result(model_name)
temp_df.columns = [model_name]
df = pd.concat([df, temp_df], axis=1)
return df
get_token_results(clinical_full_result)
```
# BioBert NER Model Profiling Pretrained Pipeline
This pipeline can be used to explore all the available pretrained NER models at once. When you run this pipeline over your text, you will end up with the predictions coming out of each pretrained clinical NER model trained with `biobert_pubmed_base_cased`.
|BioBert NER Model List|
|-|
|ner_cellular_biobert|
|ner_diseases_biobert|
|ner_events_biobert|
|ner_bionlp_biobert|
|ner_jsl_greedy_biobert|
|ner_jsl_biobert|
|ner_anatomy_biobert|
|ner_jsl_enriched_biobert|
|ner_human_phenotype_go_biobert|
|ner_deid_biobert|
|ner_deid_enriched_biobert|
|ner_clinical_biobert|
|ner_anatomy_coarse_biobert|
|ner_human_phenotype_gene_biobert|
|ner_posology_large_biobert|
|jsl_rd_ner_wip_greedy_biobert|
|ner_posology_biobert|
|jsl_ner_wip_greedy_biobert|
|ner_chemprot_biobert|
|ner_ade_biobert|
|ner_risk_factors_biobert|
You can check [Models Hub](https://nlp.johnsnowlabs.com/models) page for more information about all these models and more.
```
from sparknlp.pretrained import PretrainedPipeline
biobert_profiling_pipeline = PretrainedPipeline("ner_profiling_biobert", "en", "clinical/models")
bio_result = biobert_profiling_pipeline.annotate(text)
bio_result.keys()
print("Clinical Text: \n", text, "\n \nResults:\n")
for i in bio_result.keys():
print("{} : ".format(i), bio_result[i])
bio_full_result = biobert_profiling_pipeline.fullAnnotate(text2)[0]
bio_full_result['ner_jsl_biobert_chunks']
print("Clinical Text:\n", text2)
```
**Show NER chunks in a pandas dataframe.**
```
get_chunk_results(bio_full_result)
```
**Show NER label results in a pandas dataframe.**
```
get_token_results(clinical_full_result)
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
**Github DAU Forecasting**
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Data](#Data)
1. [Train](#Train)
1. [Evaluate](#Evaluate)
## Introduction
This notebook demonstrates demand forecasting for Github Daily Active Users Dataset using AutoML.
AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.
Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.
Notebook synopsis:
1. Creating an Experiment in an existing Workspace
2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs
4. Evaluating the fitted model using a rolling test
## Setup
```
import os
import azureml.core
import pandas as pd
import numpy as np
import logging
import warnings
from pandas.tseries.frequencies import to_offset
# Squash warning messages for cleaner output in the notebook
warnings.showwarning = lambda *args, **kwargs: None
from azureml.core.workspace import Workspace
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.train.estimator import Estimator
```
This notebook is compatible with Azure ML SDK version 1.35.0 or later.
```
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem.
```
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = "github-remote-cpu"
experiment = Experiment(ws, experiment_name)
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Run History Name"] = experiment_name
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
```
### Using AmlCompute
You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.
> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "github-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_DS12_V2", max_nodes=4
)
compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
## Data
Read Github DAU data from file, and preview data.
Let's set up what we know about the dataset.
**Target column** is what we want to forecast.
**Time column** is the time axis along which to predict.
**Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example "store" and "item" if your data has multiple time series of sales, one series for each combination of store and item sold.
**Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.
This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset.
```
import pandas as pd
from pandas import DataFrame
from pandas import Grouper
from pandas import concat
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
plt.figure(figsize=(20, 10))
plt.tight_layout()
plt.subplot(2, 1, 1)
plt.title("Github Daily Active User By Year")
df = pd.read_csv("github_dau_2011-2018_train.csv", parse_dates=True, index_col="date")
test_df = pd.read_csv(
"github_dau_2011-2018_test.csv", parse_dates=True, index_col="date"
)
plt.plot(df)
plt.subplot(2, 1, 2)
plt.title("Github Daily Active User By Month")
groups = df.groupby(df.index.month)
months = concat([DataFrame(x[1].values) for x in groups], axis=1)
months = DataFrame(months)
months.columns = range(1, 49)
months.boxplot()
plt.show()
target_column_name = "count"
time_column_name = "date"
time_series_id_column_names = []
freq = "D" # Daily data
```
### Split Training data into Train and Validation set and Upload to Datastores
```
from helper import split_fraction_by_grain
from helper import split_full_for_forecasting
train, valid = split_full_for_forecasting(df, time_column_name)
train.to_csv("train.csv")
valid.to_csv("valid.csv")
test_df.to_csv("test.csv")
datastore = ws.get_default_datastore()
datastore.upload_files(
files=["./train.csv"],
target_path="github-dataset/tabular/",
overwrite=True,
show_progress=True,
)
datastore.upload_files(
files=["./valid.csv"],
target_path="github-dataset/tabular/",
overwrite=True,
show_progress=True,
)
datastore.upload_files(
files=["./test.csv"],
target_path="github-dataset/tabular/",
overwrite=True,
show_progress=True,
)
from azureml.core import Dataset
train_dataset = Dataset.Tabular.from_delimited_files(
path=[(datastore, "github-dataset/tabular/train.csv")]
)
valid_dataset = Dataset.Tabular.from_delimited_files(
path=[(datastore, "github-dataset/tabular/valid.csv")]
)
test_dataset = Dataset.Tabular.from_delimited_files(
path=[(datastore, "github-dataset/tabular/test.csv")]
)
```
### Setting forecaster maximum horizon
The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand).
```
forecast_horizon = 12
```
## Train
Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|forecasting|
|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>
|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
|**training_data**|Input dataset, containing both features and label column.|
|**label_column_name**|The name of the label column.|
|**enable_dnn**|Enable Forecasting DNNs|
```
from azureml.automl.core.forecasting_parameters import ForecastingParameters
forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name,
forecast_horizon=forecast_horizon,
freq="D", # Set the forecast frequency to be daily
)
# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.
automl_config = AutoMLConfig(
task="forecasting",
primary_metric="normalized_root_mean_squared_error",
experiment_timeout_hours=1,
training_data=train_dataset,
label_column_name=target_column_name,
validation_data=valid_dataset,
verbosity=logging.INFO,
compute_target=compute_target,
max_concurrent_iterations=4,
max_cores_per_iteration=-1,
enable_dnn=True,
enable_early_stopping=False,
forecasting_parameters=forecasting_parameters,
)
```
We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
```
remote_run = experiment.submit(automl_config, show_output=True)
# If you need to retrieve a run that already started, use the following code
# from azureml.train.automl.run import AutoMLRun
# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')
```
Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!
### Retrieve the Best Model for Each Algorithm
Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration.
```
from helper import get_result_df
summary_df = get_result_df(remote_run)
summary_df
from azureml.core.run import Run
from azureml.widgets import RunDetails
forecast_model = "TCNForecaster"
if not forecast_model in summary_df["run_id"]:
forecast_model = "ForecastTCN"
best_dnn_run_id = summary_df["run_id"][forecast_model]
best_dnn_run = Run(experiment, best_dnn_run_id)
best_dnn_run.parent
RunDetails(best_dnn_run.parent).show()
best_dnn_run
RunDetails(best_dnn_run).show()
```
## Evaluate on Test Data
We now use the best fitted model from the AutoML Run to make forecasts for the test set.
We always score on the original dataset whose schema matches the training set schema.
```
from azureml.core import Dataset
test_dataset = Dataset.Tabular.from_delimited_files(
path=[(datastore, "github-dataset/tabular/test.csv")]
)
# preview the first 3 rows of the dataset
test_dataset.take(5).to_pandas_dataframe()
compute_target = ws.compute_targets["github-cluster"]
test_experiment = Experiment(ws, experiment_name + "_test")
import os
import shutil
script_folder = os.path.join(os.getcwd(), "inference")
os.makedirs(script_folder, exist_ok=True)
shutil.copy("infer.py", script_folder)
from helper import run_inference
test_run = run_inference(
test_experiment,
compute_target,
script_folder,
best_dnn_run,
test_dataset,
valid_dataset,
forecast_horizon,
target_column_name,
time_column_name,
freq,
)
RunDetails(test_run).show()
from helper import run_multiple_inferences
summary_df = run_multiple_inferences(
summary_df,
experiment,
test_experiment,
compute_target,
script_folder,
test_dataset,
valid_dataset,
forecast_horizon,
target_column_name,
time_column_name,
freq,
)
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
run_id = run_summary.run_id
test_run_id = run_summary.test_run_id
test_run = Run(test_experiment, test_run_id)
test_run.wait_for_completion()
test_score = test_run.get_metrics()[run_summary.primary_metric]
summary_df.loc[summary_df.run_id == run_id, "Test Score"] = test_score
print("Test Score: ", test_score)
summary_df
```
| github_jupyter |
```
from network_evaluation_tools import gene_conversion_tools as gct
from network_evaluation_tools import data_import_tools as dit
import pandas as pd
import itertools
import time
```
## Load Reactome-Functional Interactions Raw Data
#### Source: http://reactomews.oicr.on.ca:8080/caBigR3WebApp2016/FIsInGene_022717_with_annotations.txt.zip
Downloaded: June 15, 2017
Last Updated: February 27, 2017
Note about processing: It looks like most of the edges are given as gene symbols but many of them seem to be invalid names, so we will use some of the gene conversion tools to filter these results as best we can.
```
wd = '/cellar/users/jkhuang/Data/Projects/Network_Analysis/Data/'
Reactome_FIs_Raw = pd.read_csv(wd+'Network_Data_Raw/FIsInGene_022717_with_annotations.txt',sep='\t')
print 'Raw edges in ReactomeFI:', Reactome_FIs_Raw.shape[0]
# Extract gene list
Reactome_FIs_Raw_Genes = list(set(Reactome_FIs_Raw['Gene1']).union(set(Reactome_FIs_Raw['Gene2'])))
# Find "invalid genes" by text format
query_string, valid_genes, invalid_genes = gct.query_constructor(Reactome_FIs_Raw_Genes, exclude_prefixes=['CHEBI'], print_invalid_genes=True)
# Get Edgelist of network
query_edgelist = Reactome_FIs_Raw[['Gene1','Gene2', 'Score']].values.tolist()
# Filter query edges
query_edgelist_filt = gct.filter_query_edgelist(query_edgelist,invalid_genes)
# Filter edge list
ReactomeFI_edgelist_filt = gct.filter_converted_edgelist(query_edgelist_filt, weighted=True)
# Save filtered, converted edge list to file
gct.write_edgelist(ReactomeFI_edgelist_filt, wd+'Network_SIFs_Symbol/ReactomeFI_Symbol.sif', binary=False)
# Create filtered network
ReactomeFI90_edgelist = dit.filter_weighted_network_sif(wd+'Network_SIFs_Symbol/ReactomeFI_Symbol.sif', nodeA_col=0, nodeB_col=1, score_col=2,
q=0.9, delimiter='\t', verbose=True, save_path=wd+'Network_SIFs_Symbol/ReactomeFI90_edgelist_Symbol.sif')
# The filter function didn't work here because the max value makes up >90% of the edges.
# We need to filter but keep all max edges instead
ReactomeFI_edgelist = pd.DataFrame(ReactomeFI_edgelist_filt, columns=['NodeA', 'NodeB', 'Score'])
q_score = ReactomeFI_edgelist['Score'].quantile(0.9)
ReactomeFI_edgelist_filt2 = ReactomeFI_edgelist[ReactomeFI_edgelist['Score']>=q_score]
print ReactomeFI_edgelist_filt2.shape[0], '/', ReactomeFI_edgelist.shape[0], 'edges kept, ', float(ReactomeFI_edgelist_filt2.shape[0])/ReactomeFI_edgelist.shape[0]
# Essentially >85% of the edges have the 'maximum score' which makes almost no sense for filtering further
```
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.nlp import *
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from torchtext import vocab, data, datasets
import pandas as pd
sl=1000
vocab_size=200000
PATH='data/arxiv/brundage_bot.csv'
df = pd.read_csv(PATH)
df.head()
df['txt'] = df.category + ' ' + df.title + '\n' + df.summary
print(df.iloc[0].txt)
n=len(df); n
val_idx = get_cv_idxs(n, val_pct=0.1)
((val,trn),(val_y,trn_y)) = split_by_idx(val_idx, df.txt.values, df.tweeted.values)
```
## Ngram logistic regression
```
veczr = CountVectorizer(ngram_range=(1,3), tokenizer=tokenize)
trn_term_doc = veczr.fit_transform(trn)
val_term_doc = veczr.transform(val)
trn_term_doc.shape, trn_term_doc.sum()
y=trn_y
x=trn_term_doc.sign()
val_x = val_term_doc.sign()
p = x[np.argwhere(y!=0)[:,0]].sum(0)+1
q = x[np.argwhere(y==0)[:,0]].sum(0)+1
r = np.log((p/p.sum())/(q/q.sum()))
b = np.log(len(p)/len(q))
pre_preds = val_term_doc @ r.T + b
preds = pre_preds.T>0
(preds==val_y).mean()
m = LogisticRegression(C=0.1, fit_intercept=False)
m.fit(x, y);
preds = m.predict(val_x)
(preds.T==val_y).mean()
probs = m.predict_proba(val_x)[:,1]
from sklearn.metrics import precision_recall_curve, average_precision_score
import matplotlib.pyplot as plt
precision, recall, _ = precision_recall_curve(val_y, probs)
average_precision = average_precision_score(val_y, probs)
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall curve: AUC={0:0.2f}'.format(average_precision));
recall[precision>=0.6][0]
df_val = df.iloc[sorted(val_idx)]
incorrect_yes = np.where((preds != val_y) & (val_y == 0))[0]
most_incorrect_yes = np.argsort(-probs[incorrect_yes])
txts = df_val.iloc[incorrect_yes[most_incorrect_yes[:10]]]
txts[["link", "title", "summary"]]
' '.join(txts.link.values)
incorrect_no = np.where((preds != val_y) & (val_y == 1))[0]
most_incorrect_no = np.argsort(probs[incorrect_no])
txts = df_val.iloc[incorrect_no[most_incorrect_no[:10]]]
txts[["link", "title", "summary"]]
' '.join(txts.link.values)
to_review = np.where((preds > 0.8) & (val_y == 0))[0]
to_review_idx = np.argsort(-probs[to_review])
txts = df_val.iloc[to_review[to_review_idx]]
txt_html = ('<li><a href="http://' + txts.link + '">' + txts.title.str.replace('\n',' ') + '</a>: '
+ txts.summary.str.replace('\n',' ') + '</li>').values
full_html = (f"""<!DOCTYPE html>
<html>
<head><title>Brundage Bot Backfill</title></head>
<body>
<ul>
{os.linesep.join(txt_html)}
</ul>
</body>
</html>""")
```
## Learner
```
veczr = CountVectorizer(ngram_range=(1,3), tokenizer=tokenize, max_features=vocab_size)
trn_term_doc = veczr.fit_transform(trn)
val_term_doc = veczr.transform(val)
trn_term_doc.shape, trn_term_doc.sum()
md = TextClassifierData.from_bow(trn_term_doc, trn_y, val_term_doc, val_y, sl)
learner = md.dotprod_nb_learner(r_adj=20)
learner.fit(0.02, 4, wds=1e-6, cycle_len=1)
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
def prec_at_6(preds,targs):
precision, recall, _ = precision_recall_curve(targs[:,1], preds[:,1])
return recall[precision>=0.6][0]
prec_at_6(*learner.predict_with_targs())
```
| github_jupyter |
# In this tutorial we will cover how to use autodp for the passive and active learning versions of PATE
## 1. The first step is to represent PATE in autodp
It is quite straightforward because it's just a sequence of Gaussian mechanisms with the same parameters.
```
from autodp.mechanism_zoo import GaussianMechanism
import numpy as np
class PATE(GaussianMechanism):
def __init__(self,sigma, m, Binary,name='PATE'):
# sigma is the std of the Gaussian noise added to the voting scores
if Binary:
# This is a binary classification task
sensitivity = 1
else: # for the multiclass case, the L2 sensitivity is sqrt(2)
sensitivity = np.sqrt(2)
GaussianMechanism.__init__(self, sigma=sigma/sensitivity/np.sqrt(m),name=name)
self.params = {'sigma':sigma}
```
## 2. It is straightforward to do privacy loss computation / privacy accounting
```
# Computing the privacy loss after running the algorithm
# let's say after running PATE-PSQ or ASQ with Gaussian mechanism to release m labels
# the noise added to the voted histogram (in the multi-class case),
# or the noise added to the # of teachers who voted positive is sigma
#
m = 100
sigma = 20
# let's say it is a binary classification task
pate_mech = PATE(sigma=20,m=m,Binary=True, name='PATE')
delta = 1e-6
eps = pate_mech.get_approxDP(delta)
print(eps,delta)
pate_mech = PATE(sigma=10,m=m,Binary=True, name='PATE')
delta = 1e-6
eps = pate_mech.get_approxDP(delta)
print(eps,delta)
```
## 3. When we need to fulfill a pre-defined privacy parameter, we do privacy cabliration.
```
# Privacy calibtation: given m, eps, choose sigma
from autodp.calibrator_zoo import eps_delta_calibrator
calibrate = eps_delta_calibrator()
class PATE_binary_m(PATE):
def __init__(self,sigma, name='PATE_m'):
PATE.__init__(self, sigma=sigma,m=100,Binary=True,name=name)
# Find the \sigma parameter that gives the following privacy guarantee
eps = 2.0
delta = 1e-6
mech1 = calibrate(PATE_binary_m,eps,delta,[0,100],name='PATE_eps=2')
print(mech1.name, mech1.params,mech1.get_approxDP(delta))
# Find the \sigma parameter that gives the following privacy guarantee
eps = 0.5
delta = 1e-6
mech2 = calibrate(PATE_binary_m,eps,delta,[0,100],name='PATE_eps=0.5')
print(mech2.name, mech2.params,mech2.get_approxDP(delta))
```
The above sigma parameters will then be used in running the PATE algorithm. For active learning-version of PATE, set $m$ to be the label-budget, rather than the number of public unlabeled data points available.
| github_jupyter |
```
%matplotlib inline
from __future__ import division
import os
import glob
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import numpy as np
import h5py
import pylab as plt
import cv2
import astra
from ipywidgets import interact
from pprint import pprint
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = size / 200 # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{index} / ?'.format(index=index)
else:
progress.value = index
label.value = u'{index} / {size}'.format(
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = unicode(index or '?')
# Data directory
# data_root = '/diskmnt/a/makov/yaivan/MMC_1/'
data_root = '/media/makov/buext4/yaivan/MMC_1'
raw_root = os.path.join(data_root,'Raw')
out_file = os.path.join(data_root, 'raw.h5')
# files = !ls {raw_root}
# pprint(sorted(files))
# SkyScan config output
def print_config(config):
for k,v in config._sections.items():
print('[{}]'.format(k))
for kk, vv in v.items():
print('\t{} = {}'.format(kk,vv))
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(os.path.join(raw_root,'MMC1_2.82um_.log'))
print_config(config)
```
## Some useful parameters:
* [System]
* Camera Pixel Size (um) = 11.32
* CameraXYRatio = 1.0023
* [Acquisition]
* Filename Prefix = MMC1_2.82um_
* Number of Files = 2030
* Source Voltage (kV) = 100
* Source Current (uA) = 100
* Number of Rows = 2096
* Number of Columns = 4000
* Camera binning = 1x1
* Image Rotation = 0.6500
* Image Pixel Size (um) = 2.82
* Object to Source (mm) = 56.135
* Camera to Source (mm) = 225.082
* Vertical Object Position (mm) = 6.900
* Optical Axis (line) = 960
* Filter = Al 0.5 mm
* Image Format = TIFF
* Depth (bits) = 16
* Exposure (ms) = 1767
* Rotation Step (deg) = 0.100
* Frame Averaging = ON (15)
* Random Movement = OFF (10)
* Use 360 Rotation = NO
* Geometrical Correction = ON
* Camera Offset = OFF
* Median Filtering = ON
* Flat Field Correction = ON
* Rotation Direction = CC
* Scanning Trajectory = ROUND
* Study Date and Time = Mar 19, 2015 10:11:11
* Scan duration = 16:08:02
* [Reconstruction]
* Reconstruction Program = NRecon
* Program Version = Version: 1.6.5.8
* Reconstruction engine = NReconServer
* Dataset Origin = Skyscan1172
* Dataset Prefix = MMC1_2.82um_
* First Section = 96
* Last Section = 1981
* Reconstruction duration per slice (seconds) = 1.859491
* Total reconstruction time (1886 slices) in seconds = 3507.000000
* Postalignment = -1.00
* Section to Section Step = 1
* Sections Count = 1886
* Result File Type = PNG
* Result File Header Length (bytes) = Unknown: compressed JPG format (100%)
* Result Image Width (pixels) = 4000
* Result Image Height (pixels) = 4000
* Pixel Size (um) = 2.82473
* Reconstruction Angular Range (deg) = 202.90
* Use 180+ = OFF
* Angular Step (deg) = 0.1000
* Smoothing = 0
* Ring Artifact Correction = 16
* Filter cutoff relative to Nyquisit frequency = 100
* Filter type = 0
* Filter type meaning(1) = 0: Hamming (Ramp in case of optical scanner); 1: Hann; 2: Ramp; 3: Almost Ramp;
* Filter type meaning(2) = 11: Cosine; 12: Shepp-Logan; [100,200]: Generalized Hamming, alpha=(iFilter-100)/100
* Undersampling factor = 1
* Threshold for defect pixel mask (%) = 0
* Beam Hardening Correction (%) = 92
* CS Static Rotation (deg) = 0.0
* Minimum for CS to Image Conversion = -0.1800
* Maximum for CS to Image Conversion = 0.5200
* HU Calibration = OFF
* Cone-beam Angle Horiz.(deg) = 11.493867
* Cone-beam Angle Vert.(deg) = 6.037473
```
object_name = config._sections['Acquisition']['Filename Prefix']
print('Object name:', object_name)
data_files = glob.glob(os.path.join(raw_root,object_name+'[0-9]'*4+'.tif'))
data_files = sorted(data_files)
print('Data files found:', len(data_files))
print('First file:', data_files[0])
print('Last file:', data_files[-1])
test_data = plt.imread(data_files[0])
print('Size of image:', test_data.shape)
print('Image data type:', test_data.dtype)
# if not packed raw HDF5 file found, create it
#TODO: add adittional reference iif files
if not os.path.exists(out_file):
with h5py.File(out_file,'w-') as h5f:
h5f.create_dataset('data', (len(data_files), test_data.shape[0], test_data.shape[1]),
chunks=True, dtype=test_data.dtype)
for idf, df in log_progress(enumerate(data_files)):
df = plt.imread(data_files[idf])
h5f['data'][idf]=df
def build_reconstruction_geomety(detector_size, angles):
# proj_geom = astra.create_proj_geom('parallel', 1.0, detector_size, angles)
#Object to Source (mm) = 56.135
#Camera to Source (mm) = 225.082
# All distances in [pixels]
pixel_size = 2.82473e-3
os_distance = 56.135/pixel_size
ds_distance = 225.082/pixel_size
proj_geom = astra.create_proj_geom('fanflat', ds_distance/os_distance, detector_size, angles,
os_distance, (ds_distance-os_distance))
return proj_geom
def astra_tomo2d_fanflat_fbp(sinogram, angles):
angles = angles.astype('float64') # hack for astra stability, may be removed in future releases
detector_size = sinogram.shape[1]
rec_size = detector_size # size of reconstruction region
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = build_reconstruction_geomety(detector_size, angles)
sinogram_id = astra.data2d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('FBP_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = {}
cfg['option']['ShortScan'] = True
# cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 1)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.clear()
return rec
def astra_tomo2d_fanflat_sirt(sinogram, angles):
angles = angles.astype('float64') # hack for astra stability, may be removed in future releases
detector_size = sinogram.shape[1]
rec_size = detector_size # size of reconstruction region
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = build_reconstruction_geomety(detector_size, angles)
sinogram_id = astra.data2d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('SIRT_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = {}
# cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 2000)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.clear()
return rec
# Define the plugin class (has to subclass astra.plugin.base)
# Note that usually, these will be defined in a separate package/module
class SIRTPlugin(astra.plugin.base):
"""Example of an ASTRA plugin class, implementing a simple 2D SIRT algorithm.
Options:
'rel_factor': relaxation factor (optional)
"""
# The astra_name variable defines the name to use to
# call the plugin from ASTRA
astra_name = "SIRT-PLUGIN"
def initialize(self,cfg, rel_factor = 1):
self.W = astra.OpTomo(cfg['ProjectorId'])
self.vid = cfg['ReconstructionDataId']
self.sid = cfg['ProjectionDataId']
self.rel = rel_factor
def run(self, its):
v = astra.data2d.get_shared(self.vid)
s = astra.data2d.get_shared(self.sid)
print(s.shape)
W = self.W
for i in range(its):
v[:] += self.rel*(W.T*(s - (W*v).reshape(s.shape))).reshape(v.shape)/s.size
# from plugin import SIRTPlugin
def astra_tomo2d_fanflat_plugin(sinogram, angles):
angles = angles.astype('float64') # hack for astra stability, may be removed in future releases
detector_size = sinogram.shape[1]
rec_size = detector_size # size of reconstruction region
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = build_reconstruction_geomety(detector_size, angles)
proj_id = astra.create_projector('cuda',proj_geom,vol_geom)
sinogram_id = astra.data2d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
astra.plugin.register(SIRTPlugin)
print(astra.plugin.get_registered())
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('SIRT-PLUGIN')
cfg['ProjectorId'] = proj_id
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = {}
cfg['option']['rel_factor'] = 1.5
# cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 10)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.clear()
return rec
def create_sinogram(data, angles):
angles = angles.astype('float64') # hack for astra stability, may be removed in future releases
detector_size = data.shape[1]
rec_size = detector_size # size of reconstruction region
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = build_reconstruction_geomety(detector_size, angles)
proj_id = astra.create_projector('cuda',proj_geom,vol_geom)
W = astra.OpTomo(proj_id)
P = data
sinogram = W * P
sinogram = sinogram.reshape([len(angles), detector_size])
return np.rot90(sinogram,3)
try:
from functools32 import lru_cache
except ImportError:
from functools import lru_cache
def cv_rotate(x,angle):
"""
Rotate square array using OpenCV2 around center of the array
:param x: numpy array
:param angle: angle in degrees
:return: rotated array
"""
x_center=tuple(np.array((x.shape[1],x.shape[0]),dtype='float32')/2.0-0.5)
rot_mat=cv2.getRotationMatrix2D(x_center,angle,1.0)
xro=cv2.warpAffine(x,rot_mat,(x.shape[1],x.shape[0]),flags=cv2.INTER_LINEAR)
return xro
@lru_cache(maxsize=32)
def load_sinogram(sinogram_number):
# Without thermal correction
with h5py.File(out_file,'r') as h5f:
sinogram = h5f['data'][:,-sinogram_number,:]
# With thermal correction
# Loading geometrical correction map generated by NRecon
# corrections_map = np.loadtxt(os.path.join(raw_root,'MMC1_2.82um__TS.crv'),skiprows=2)
# with h5py.File(out_file,'r') as h5f:
# sinogram = np.zeros_like(h5f['data'][:,-sinogram_number,:])
# for ii in tqdm.tqdm(range(sinogram.shape[0])):
# sinogram[ii] = np.roll(
# h5f['data'][ii,-sinogram_number-int(corrections_map[ii,2]),:],
# -int(corrections_map[ii,1])
# )
# select only central 180 deg for reconstruction
# sinogram = sinogram[int(sinogram.shape[0]/2-900):1800-int(sinogram.shape[0]/2-900)]
# sinogram = sinogram[:1800]
sinogram = np.array(sinogram/2.**16)
return sinogram
def get_sinogram(sinogram_number):
sinogram = load_sinogram(sinogram_number)
# sinogram_max = np.mean(sinogram, axis=1)
# for ii in range(sinogram.shape[0]):
# sinogram[ii] *= sinogram_max[ii]/sinogram_max.mean()
# plt.plot(np.max(sinogram, axis=1))
# plt.show()
# np.savetxt('sinogram.txt', sinogram)
# angles_reduce = 1
# size_reduce = 1
# sinogram = cv2.resize(sinogram,(sinogram.shape[1]//size_reduce, sinogram.shape[0]//angles_reduce))
# print sinogram.min(),sinogram.max()
# sinogram[sinogram<1e-3] = 1
sinogram = np.rot90(sinogram)
sinogram = -np.log(sinogram)
# Corrections here !!!
sinoram_max = sinogram.max()
# for i in range(sinogram.shape[0]):
# sinogram[i] = sinogram[i] - sinogram[i].min()
sinogram = sinogram/sinoram_max
return sinogram
def get_reconstruction(sinogram, reconstruction_function):
angles = np.arange(sinogram.shape[1])*0.1-11.493867*2
angles = angles.astype('float64')/180.*np.pi
# angles = angles + (angles.max()+angles.min())/2.
# angles = angles - np.pi
astra_data = np.rot90(sinogram)
astra_rec = reconstruction_function(astra_data, angles)
astra_rec = np.rot90(astra_rec)
return astra_rec
def get_reconstruction_fbp(sinogram):
return get_reconstruction(sinogram, astra_tomo2d_fanflat_fbp)
def get_reconstruction_sirt(sinogram):
return get_reconstruction(sinogram, astra_tomo2d_fanflat_sirt)
def get_reconstruction_plugin(sinogram):
return get_reconstruction(sinogram, astra_tomo2d_fanflat_plugin)
astra_tomo2d_fanflat_plugin
def show_sinogram(sinogram_number):
sinogram = get_sinogram(sinogram_number)
plt.figure(figsize=(7,7))
plt.imshow(sinogram, cmap=plt.cm.gray_r)
plt.colorbar()
plt.axis('tight')
plt.title('Original')
plt.show()
def show_reconstruction(rec, roi, title=None):
# plt.figure(figsize=(12,10))
plt.imshow(rec[roi], cmap=plt.cm.gray, interpolation='nearest')
# plt.colorbar(orientation='vertical')
if title is None:
plt.title('Astra')
else:
plt.title('Astra {}'.format(title))
# plt.show()
def show_original(sinogram_number, roi):
# plt.figure(figsize=(12,10))
source_rec = plt.imread(os.path.join(data_root,
'Reconstructed','{}_rec{:04d}.png'.format(object_name, sinogram_number)))[...,0]
source_rec = np.rot90(source_rec)
source_rec = np.array(source_rec)*(0.52+0.18)-0.18
plt.imshow(source_rec[roi], cmap=plt.cm.gray, interpolation='nearest')
# plt.colorbar(orientation='vertical')
plt.title('Skyscan')
# plt.show()
N=960
sinogram_orig = get_sinogram(N)
rec_fbp = get_reconstruction_fbp(sinogram_orig)
angles = np.arange(sinogram_orig.shape[1])*0.1+90-11.493867*2
sinogram_fbp = create_sinogram(rec_fbp, angles*np.pi/180)
# # rec_plugin = get_reconstruction_plugin(sinogram)
# rec_sirt = get_reconstruction_sirt(sinogram)
rois = []
# rois.append(np.ix_(np.r_[1900:2400],np.r_[2000:2500]))
# rois.append(np.ix_(np.r_[1100:1700],np.r_[2500:3200]))
# rois.append(np.ix_(np.r_[2660:2800],np.r_[2200:2400]))
# rois.append(np.ix_(np.r_[2750:2900],np.r_[2600:2800]))
# rois.append(np.ix_(np.r_[3000:3500],np.r_[2000:3000]))
# rois.append(np.ix_(np.r_[2700:3500],np.r_[1300:2100]))
# rois.append(np.ix_(np.r_[1000:1600],np.r_[1600:2200]))
rois.append(np.ix_(np.r_[0:4000],np.r_[0:4000]))
# for roi in rois:
# plt.figure(figsize = (20,20))
# plt.subplot(121)
# show_original(N ,roi)
# plt.subplot(122)
# show_reconstruction(rec_fbp, roi, 'FBP')
# # plt.subplot(224)
# # show_reconstruction(rec_sirt, roi, 'SIRT')
# # plt.show()
# # show_reconstruction(rec_plugin, roi, 'SIRT-PLUGIN')
# # show_reconstruction(rec_sirt, roi, 'SIRT')
for roi in rois:
plt.figure(figsize = (10,10))
show_reconstruction(rec_fbp, roi, 'FBP')
plt.savefig('fbp_no_corrections.png')
# show_sinogram(N)
# plt.figure(figsize=(7,7))
# plt.imshow(sinogram_fbp, cmap=plt.cm.gray_r)
# plt.colorbar()
# plt.axis('tight')
# plt.title('FBP')
# plt.show()
# save_marina_txt(sinogram_orig,'sinogram_original_no_corrections')
# save_marina_txt(sinogram_fbp, 'sinogram_fbp_no_corrections')
# save_marina_txt(rec_fbp, 'rec_fbp_no_corrections')
# gamma corractions
from skimage.exposure import adjust_gamma
N=960
for gamma in np.arange(0.5,2,0.1):
sinogram_orig = get_sinogram(N)
sinogram_orig = adjust_gamma(sinogram_orig, gamma)
rec_fbp = get_reconstruction_fbp(sinogram_orig)
angles = np.arange(sinogram_orig.shape[1])*0.1+90-11.493867*2
sinogram_fbp = create_sinogram(rec_fbp, angles*np.pi/180)
# # rec_plugin = get_reconstruction_plugin(sinogram)
# rec_sirt = get_reconstruction_sirt(sinogram)
rois = []
# rois.append(np.ix_(np.r_[1900:2400],np.r_[2000:2500]))
# rois.append(np.ix_(np.r_[1100:1700],np.r_[2500:3200]))
# rois.append(np.ix_(np.r_[2660:2800],np.r_[2200:2400]))
# rois.append(np.ix_(np.r_[2750:2900],np.r_[2600:2800]))
# rois.append(np.ix_(np.r_[3000:3500],np.r_[2000:3000]))
# rois.append(np.ix_(np.r_[2700:3500],np.r_[1300:2100]))
# rois.append(np.ix_(np.r_[1000:1600],np.r_[1600:2200]))
rois.append(np.ix_(np.r_[0:4000],np.r_[0:4000]))
for roi in rois:
plt.figure(figsize = (20,20))
plt.subplot(121)
show_original(N ,roi)
plt.subplot(122)
show_reconstruction(rec_fbp, roi, 'FBP')
# # plt.subplot(224)
# # show_reconstruction(rec_sirt, roi, 'SIRT')
# # plt.show()
# # show_reconstruction(rec_plugin, roi, 'SIRT-PLUGIN')
# # show_reconstruction(rec_sirt, roi, 'SIRT')
show_sinogram(N)
plt.figure(figsize=(7,7))
plt.imshow(sinogram_fbp, cmap=plt.cm.gray_r)
plt.colorbar()
plt.axis('tight')
plt.title('FBP')
plt.show()
save_marina_txt(sinogram_orig,'sinogram_original_gamma_{}'.format(gamma))
save_marina_txt(sinogram_fbp, 'sinogram_fbp_gamma_{}'.format(gamma))
save_marina_txt(rec_fbp, 'rec_fbp_gamma_{}'.format(gamma))
def save_marina_txt(data, name):
file_name = '{}_{}_{}.txt'.format(name, data.shape[0], data.shape[1])
print name, data.shape, data.dtype, file_name
np.savetxt(file_name, data.reshape(np.prod(data.shape),1), fmt='%0.3e')
np.save('rec_sirt',rec_sirt)
np.save('rec_fbp',rec_fbp)
source_rec = plt.imread(os.path.join(data_root,
'Reconstructed','{}_rec{:04d}.png'.format(object_name, N)))[...,0]
np.save('rec_orig',source_rec)
rec_sirt = np.load('rec_sirt.npy')
rec_fbp = np.load('rec_fbp.npy')
for name in ['rec_sirt', 'rec_fbp', 'rec_orig']:
x = np.load(name+'.npy')
print name, x.shape, x.dtype
np.savetxt('{}_{}_{}.txt'.format(name, x.shape[0], x.shape[1]), x.reshape(np.prod(x.shape),1), fmt='%0.3e')
!head rec_sirt_4000_4000.txt
!gzip *.txt rec.zip
save_marina_txt(sinogram_orig,'sinogram_original_normalized')
import tomopy
data_orig = np.load('rec_sirt.npy')
print data_orig.shape
data = np.zeros(shape=(1, data_orig.shape[0], data_orig.shape[1]))
data[0] = data_orig*1e3
data_corr = tomopy.misc.corr.remove_ring(data)
plt.imshow(data_corr[0,1900:2100,1900:2100])
plt.colorbar()
plt.imshow(data[0,1900:2100,1900:2100])
plt.colorbar()
save_marina_txt(data[0],'rec_sirt_norings')
!gzip rec_sirt_norings_4000_4000.txt
N=980
sinogram_orig = get_sinogram(N)
rec_fbp = get_reconstruction_fbp(sinogram_orig)
save_marina_txt(rec_fbp,'rec_fbp_980')
!gzip rec_fbp_980_4000_4000.txt
```
| github_jupyter |
# TRTR Dataset F
```
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import os
print('Libraries imported!!')
#define directory of functions and actual directory
HOME_PATH = '' #home path of the project
FUNCTIONS_DIR = "EVALUATION FUNCTIONS/UTILITY"
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for data labelling analisys
from utility_evaluation import DataPreProcessor
from utility_evaluation import train_evaluate_model
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
```
## 1. Read data
```
#read real dataset
train_data = pd.read_csv(HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/F_IndianLiverPatient_Real_Train.csv')
categorical_columns = ['gender','class']
for col in categorical_columns :
train_data[col] = train_data[col].astype('category')
train_data
#read test data
test_data = pd.read_csv(HOME_PATH + 'REAL DATASETS/TEST DATASETS/F_IndianLiverPatient_Real_Test.csv')
for col in categorical_columns :
test_data[col] = test_data[col].astype('category')
test_data
target = 'class'
#quick look at the breakdown of class values
print('Train data')
print(train_data.shape)
print(train_data.groupby(target).size())
print('#####################################')
print('Test data')
print(test_data.shape)
print(test_data.groupby(target).size())
```
## 2. Pre-process training data
```
target = 'class'
categorical_columns = ['gender']
numerical_columns = train_data.select_dtypes(include=['int64','float64']).columns.tolist()
categories = [np.array([0, 1])]
data_preprocessor = DataPreProcessor(categorical_columns, numerical_columns, categories)
x_train = data_preprocessor.preprocess_train_data(train_data.loc[:, train_data.columns != target])
y_train = train_data.loc[:, target]
x_train.shape, y_train.shape
```
## 3. Preprocess test data
```
x_test = data_preprocessor.preprocess_test_data(test_data.loc[:, test_data.columns != target])
y_test = test_data.loc[:, target]
x_test.shape, y_test.shape
```
## 4. Create a dataset to save the results
```
results = pd.DataFrame(columns = ['model','accuracy','precision','recall','f1'])
results
```
## 4. Train and evaluate Random Forest Classifier
```
rf_results = train_evaluate_model('RF', x_train, y_train, x_test, y_test)
results = results.append(rf_results, ignore_index=True)
rf_results
```
## 5. Train and Evaluate KNeighbors Classifier
```
knn_results = train_evaluate_model('KNN', x_train, y_train, x_test, y_test)
results = results.append(knn_results, ignore_index=True)
knn_results
```
## 6. Train and evaluate Decision Tree Classifier
```
dt_results = train_evaluate_model('DT', x_train, y_train, x_test, y_test)
results = results.append(dt_results, ignore_index=True)
dt_results
```
## 7. Train and evaluate Support Vector Machines Classifier
```
svm_results = train_evaluate_model('SVM', x_train, y_train, x_test, y_test)
results = results.append(svm_results, ignore_index=True)
svm_results
```
## 8. Train and evaluate Multilayer Perceptron Classifier
```
mlp_results = train_evaluate_model('MLP', x_train, y_train, x_test, y_test)
results = results.append(mlp_results, ignore_index=True)
mlp_results
```
## 9. Save results file
```
results.to_csv('RESULTS/models_results_real.csv', index=False)
results
```
| github_jupyter |
### PRMT-2039 ["HYPOTHESIS"] Table showing distribution number of messages per pathway and status
We have so far assumed that a particular transfer status is associated with a particular number of messages:
Eg Pending would typically be expected to have 3, Integrated would usually expect to see 4.
However, we have seen fewer that 3 for pending in inspection of some Vision to Vision transfers.
By generating a full table:
- for 6 months of transfers,
- broken down by status and supplier pathway,
- showing what % of transfers had what number of messages,
we can:
- check the degree to which this assumption holds.
- Identify areas (eg by pathway and/or status) for further investigation
- This may allow us to redefine our statuses from their current 4
```
import pandas as pd
import numpy as np
# Using data generated from branch PRMT-1742-duplicates-analysis.
# This is needed to correctly handle duplicates.
# Once the upstream pipeline has a fix for duplicate EHRs, then we can go back to using the main output.
transfer_file_location = "s3://prm-gp2gp-data-sandbox-dev/transfers-duplicates-hypothesis/"
transfer_files = [
"9-2020-transfers.parquet",
"10-2020-transfers.parquet",
"11-2020-transfers.parquet",
"12-2020-transfers.parquet",
"1-2021-transfers.parquet",
"2-2021-transfers.parquet"
]
transfer_input_files = [transfer_file_location + f for f in transfer_files]
transfers_raw = pd.concat((
pd.read_parquet(f)
for f in transfer_input_files
))
# In the data from the PRMT-1742-duplicates-analysis branch, these columns have been added , but contain only empty values.
transfers_raw = transfers_raw.drop(["sending_supplier", "requesting_supplier"], axis=1)
# Given the findings in PRMT-1742 - many duplicate EHR errors are misclassified, the below reclassifies the relevant data
has_at_least_one_successful_integration_code = lambda errors: any((np.isnan(error) or error==15 for error in errors))
successful_transfers_bool = transfers_raw['request_completed_ack_codes'].apply(has_at_least_one_successful_integration_code)
transfers = transfers_raw.copy()
transfers.loc[successful_transfers_bool, "status"] = "INTEGRATED"
# Correctly interpret certain sender errors as failed.
# This is explained in PRMT-1974. Eventaully this will be fixed upstream in the pipeline.
pending_sender_error_codes=[6,7,10,24,30,23,14,99]
transfers_with_pending_sender_code_bool=transfers['sender_error_code'].isin(pending_sender_error_codes)
transfers_with_pending_with_error_bool=transfers['status']=='PENDING_WITH_ERROR'
transfers_which_need_pending_to_failure_change_bool=transfers_with_pending_sender_code_bool & transfers_with_pending_with_error_bool
transfers.loc[transfers_which_need_pending_to_failure_change_bool,'status']='FAILED'
# Add integrated Late status
eight_days_in_seconds=8*24*60*60
transfers_after_sla_bool=transfers['sla_duration']>eight_days_in_seconds
transfers_with_integrated_bool=transfers['status']=='INTEGRATED'
transfers_integrated_late_bool=transfers_after_sla_bool & transfers_with_integrated_bool
transfers.loc[transfers_integrated_late_bool,'status']='INTEGRATED LATE'
# If the record integrated after 28 days, change the status back to pending or pending with error.
# This is to handle each month consistentently and to always reflect a transfers status 28 days after it was made.
# TBD how this is handled upstream in the pipeline
twenty_eight_days_in_seconds=28*24*60*60
transfers_after_month_bool=transfers['sla_duration']>twenty_eight_days_in_seconds
transfers_pending_at_month_bool=transfers_after_month_bool & transfers_integrated_late_bool
transfers.loc[transfers_pending_at_month_bool,'status']='PENDING'
transfers_with_early_error_bool=(~transfers.loc[:,'sender_error_code'].isna()) |(~transfers.loc[:,'intermediate_error_codes'].apply(len)>0)
transfers.loc[transfers_with_early_error_bool & transfers_pending_at_month_bool,'status']='PENDING_WITH_ERROR'
# Supplier name mapping
supplier_renaming = {
"EGTON MEDICAL INFORMATION SYSTEMS LTD (EMIS)":"EMIS",
"IN PRACTICE SYSTEMS LTD":"Vision",
"MICROTEST LTD":"Microtest",
"THE PHOENIX PARTNERSHIP":"TPP",
None: "Unknown"
}
asid_lookup_file = "s3://prm-gp2gp-data-sandbox-dev/asid-lookup/asidLookup-Mar-2021.csv.gz"
asid_lookup = pd.read_csv(asid_lookup_file)
lookup = asid_lookup[["ASID", "MName", "NACS","OrgName"]]
transfers = transfers.merge(lookup, left_on='requesting_practice_asid',right_on='ASID',how='left')
transfers = transfers.rename({'MName': 'requesting_supplier', 'ASID': 'requesting_supplier_asid', 'NACS': 'requesting_ods_code','OrgName':'requesting_practice_name'}, axis=1)
transfers = transfers.merge(lookup, left_on='sending_practice_asid',right_on='ASID',how='left')
transfers = transfers.rename({'MName': 'sending_supplier', 'ASID': 'sending_supplier_asid', 'NACS': 'sending_ods_code','OrgName':'sending_practice_name'}, axis=1)
transfers["sending_supplier"] = transfers["sending_supplier"].replace(supplier_renaming.keys(), supplier_renaming.values())
transfers["requesting_supplier"] = transfers["requesting_supplier"].replace(supplier_renaming.keys(), supplier_renaming.values())
# Import the message data - output of PRMT-2038 (see 33-PRMT-2038-generate-new-fields-raw-splunk-data notebook)
interaction_file_name='s3://prm-gp2gp-data-sandbox-dev/extra-fields-data-from-splunk/Sept_20_Feb_21_conversations_interaction_messages.parquet'
message_lists=pd.read_parquet(interaction_file_name)
# Merge the datasets
combined_transfers=transfers.merge(message_lists,left_on='conversation_id',right_index=True ,how='left')
# Add in the number of messages as the main field
combined_transfers['interaction length']=combined_transfers['interaction name'].apply(len)
# Generate a table to count the transfers of each message length broken down by pathway/status
message_count_table=combined_transfers.pivot_table(index=['requesting_supplier','sending_supplier','status'],columns='interaction length',values='conversation_id',aggfunc='count').fillna(0).astype(int)
message_count_table
# For each pathway/status, what is the percentage in each message length?
message_count_table_percentages=message_count_table.div(message_count_table.sum(axis=1),axis=0).multiply(100).round(2)
message_count_table_percentages.reset_index()
# Uncomment to output CSV to S3 location
# message_count_table_percentages.reset_index().to_csv('s3://prm-gp2gp-data-sandbox-dev/notebook-outputs/34--PRMT-2039-message-length-counts-by-pathway-and-status.csv')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.utils
from sklearn.datasets import make_classification, load_diabetes
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, balanced_accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
np.set_printoptions(threshold=5) # to limit printing
from vflow import Vset, init_args, dict_to_df, perturbation_stats
from vflow.pipeline import build_graph
```
# Basic Pipeline
`vflow` allows us to easily construct a pipeline with several perturbations (e.g. different data subsamples, models, and metrics) by wrapping the set of functions at each stage in a `Vset`. We can then perform aggregate operations on our `Vset` (e.g. to fit all perturbations) and easily access downstream results.
```
# initialize data
np.random.seed(13)
X, y = make_classification(n_samples=50, n_features=5)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
X_train, X_test, y_train, y_test = init_args(
(X_train, X_test, y_train, y_test),
names=['X_train', 'X_test', 'y_train', 'y_test'] # optionally name the args
)
# subsample data
subsampling_funcs = [
partial(sklearn.utils.resample, n_samples=20, random_state=i)
for i in range(3)
]
subsampling_set = Vset(name='subsampling',
modules=subsampling_funcs,
output_matching=True)
X_trains, y_trains = subsampling_set(X_train, y_train)
# fit models
models = [LogisticRegression(max_iter=1000, tol=0.1), DecisionTreeClassifier()]
modeling_set = Vset(name='modeling',
modules=models,
module_keys=["LR", "DT"])
modeling_set.fit(X_trains, y_trains)
preds_test = modeling_set.predict(X_test)
# get metrics
binary_metrics_set = Vset(name='binary_metrics',
modules=[accuracy_score, balanced_accuracy_score],
module_keys=["Acc", "Bal_Acc"])
binary_metrics = binary_metrics_set.evaluate(preds_test, y_test)
```
Our pipeline can be visualized from any stage using `build_graph(vset, draw=True)`:
```
G = build_graph(binary_metrics, draw=True)
plt.show()
```
`Vset` outputs can be easily converted to pandas dataframes using `dict_to_df(out)`:
```
df = dict_to_df(binary_metrics)
df
```
We can then compute aggregate statistics on specified pipeline stages using `perturbations_stats(data, *group_by)`:
```
perturbation_stats(df, 'subsampling')
```
# Feature Engineering Pipeline
This `vflow` pipeline predicts disease progression using the diabetes dataset (regression).
```
# get data as df
np.random.seed(13)
data = load_diabetes()
df = pd.DataFrame.from_dict(data['data'])
df.columns = data['feature_names']
y = data['target']
X_train, X_test, y_train, y_test = init_args(train_test_split(df, y, random_state=123),
names=['X_train', 'X_test', 'y_train', 'y_test'])
# feature extraction - extracts two different sets of features from the same data
def extract_feats(df: pd.DataFrame, feat_names=['age', 'sex', 'bmi', 'bp']):
'''extract specific columns from dataframe
'''
return df[feat_names]
feat_extraction_funcs = [partial(extract_feats, feat_names=['age', 'sex', 'bmi', 'bp']),
partial(extract_feats, feat_names=['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']),
]
feat_extraction = Vset(name='feat_extraction', modules=feat_extraction_funcs, output_matching=True)
X_feats_train = feat_extraction(X_train)
X_feats_test = feat_extraction(X_test)
modeling_set = Vset(name='modeling',
modules=[DecisionTreeRegressor(), RandomForestRegressor()],
module_keys=["DT", "RF"])
modeling_set.fit(X_feats_train, y_train)
# get predictions
preds_all = modeling_set.predict(X_feats_train)
# get metrics
hard_metrics_set = Vset(name='hard_metrics',
modules=[r2_score],
module_keys=["r2"])
hard_metrics = hard_metrics_set.evaluate(preds_all, y_train)
# inspect the pipeline
G = build_graph(hard_metrics, draw=True)
plt.show()
df = dict_to_df(hard_metrics)
df
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1 </span>Goal</a></span></li><li><span><a href="#Var" data-toc-modified-id="Var-2"><span class="toc-item-num">2 </span>Var</a></span></li><li><span><a href="#Init" data-toc-modified-id="Init-3"><span class="toc-item-num">3 </span>Init</a></span></li><li><span><a href="#Combining-all-antismash-GBKs" data-toc-modified-id="Combining-all-antismash-GBKs-4"><span class="toc-item-num">4 </span>Combining all antismash GBKs</a></span><ul class="toc-item"><li><span><a href="#Writing-table-of-BGC-IDs" data-toc-modified-id="Writing-table-of-BGC-IDs-4.1"><span class="toc-item-num">4.1 </span>Writing table of BGC IDs</a></span></li></ul></li><li><span><a href="#BiGSCAPE" data-toc-modified-id="BiGSCAPE-5"><span class="toc-item-num">5 </span>BiGSCAPE</a></span></li><li><span><a href="#Summary" data-toc-modified-id="Summary-6"><span class="toc-item-num">6 </span>Summary</a></span><ul class="toc-item"><li><span><a href="#Load" data-toc-modified-id="Load-6.1"><span class="toc-item-num">6.1 </span>Load</a></span></li><li><span><a href="#Summarize" data-toc-modified-id="Summarize-6.2"><span class="toc-item-num">6.2 </span>Summarize</a></span></li><li><span><a href="#Writing-fam-clan-table" data-toc-modified-id="Writing-fam-clan-table-6.3"><span class="toc-item-num">6.3 </span>Writing fam-clan table</a></span></li></ul></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-7"><span class="toc-item-num">7 </span>sessionInfo</a></span></li></ul></div>
# Goal
* Run `BiGSCAPE` on all BGCs identified by `antismash`
# Var
```
# Dereplicated MAGs
MAG_dir = file.path('/ebio', 'abt3_projects', 'Georg_animal_feces', 'data',
'metagenome', 'multi-study', 'BioProjects', 'summary',
'LLMGA', 'wGeorgAnimal', 'drep-0.95')
# de-rep'd MAG metadata
MAG_meta_file = file.path(MAG_dir, 'drep-MAG_metadata.tsv')
# antismash dir
antismash_dir = file.path(MAG_dir, 'BGCs', 'antismash_v5')
# working dir
work_dir = file.path(MAG_dir, 'BGCs', 'bigscape')
# pfam dir (hmms)
pfam_dir = '/ebio/abt3_projects/databases_no-backup/pfam/v32.0/'
# table of BGC UUIDs
BGC_uuid_file = file.path(antismash_dir, 'BGC_uuids.tsv')
# BGC metadata
BGC_meta_file = '/ebio/abt3_projects/Georg_animal_feces/data/metagenome/multi-study/BioProjects/summary/LLMGA/wGeorgAnimal/drep-0.95/BGCs/antismash_v5/BGC_product_summary.tsv'
# params
conda_env = 'bigscape'
threads = 24
```
# Init
```
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(doParallel)
library(LeyLabRMisc)
library(future)
library(future.batchtools)
library(future.apply)
options(future.wait.interval = 2.0)
df.dims()
set.seed(7834)
make_dir(work_dir)
```
# Combining all antismash GBKs
```
antismash_gbk_dir = file.path(work_dir, 'antismash_gbks')
make_dir(antismash_gbk_dir)
# listing all antismash gbk files
## WARNING: a bit slow
gbk_files = list.files(antismash_dir, '*.gbk', full.names=TRUE, recursive=TRUE)
gbk_files = gbk_files[grepl('.+\\.\\.\\.region[0-9]+\\.gbk$', gbk_files)]
gbk_files %>% length %>% print
# creating an index of file names
gbk_files = data.frame(MAG = basename(dirname(gbk_files)),
gbk_file = gbk_files)
gbk_files
# joining with existing UUIDs
gbk_files = gbk_files %>%
inner_join(read.delim(BGC_uuid_file, sep='\t'),
by=c('MAG', 'gbk_file'))
gbk_files
# # creating unique BGC IDs
# ## WARNING: this is dependent on set.seed
# gbk_files$BGC_ID = sapply(1:nrow(gbk_files), uuid) %>%
# gsub('-', '', .) %>%
# gsub('^', 'BGC-', .)
# gbk_files
# creating output file names
gbk_files = gbk_files %>%
mutate(X = antismash_gbk_dir,
Y = gsub('$', '.gbk', BGC_ID)) %>%
unite(out_file, X, Y, sep='/')
gbk_files
# copying files
cp_file = function(in_file, out_file){
if(out_file == in_file){
stop('output == input')
}
file.copy(in_file, out_file, overwrite = TRUE)
}
ret = mapply(cp_file,
in_file=gbk_files$gbk_file %>% as.character,
out_file=gbk_files$out_file %>% as.character)
ret %>% length
# checking number of files
list.files(antismash_gbk_dir, '*.gbk') %>% length %>% print
```
## Writing table of BGC IDs
```
# WARNING: not needed unless changing
BGC_id_file = file.path(antismash_dir, 'BGC_uuids.tsv')
gbk_files %>%
dplyr::select(BGC_ID, MAG, gbk_file) %>%
write.table(file=BGC_id_file, sep='\t', quote=FALSE, row.names=FALSE)
cat('File written:', BGC_id_file, '\n')
```
# BiGSCAPE
```
exe = '/ebio/abt3_projects/Georg_animal_feces/bin/BiG-SCAPE/bigscape.py'
cmd = '{exe} -i {in_dir} -o {out_dir} --pfam_dir {pfam_dir} --cores {threads} --mibig --include_gbk_str \"*\"'
cmd = glue::glue(cmd, exe=exe,
in_dir=antismash_gbk_dir,
out_dir=work_dir, pfam_dir=pfam_dir,
threads=threads)
cmd
bash_job(cmd, conda_env=conda_env, stderr=TRUE)
```
# Summary
```
cat('Output:', work_dir, '\n')
list.files(file.path(work_dir, 'html_content'))
```
## Load
```
# Loading all clan files
clan_files = list.files(file.path(work_dir, 'network_files'),
pattern='*_clans_0.30_0.70.tsv',
recursive=TRUE, full.names=TRUE)
clan_files %>% length
#' function to merge clans
merge_clans = function(F){
df = read.delim(F, sep='\t')
df$BGC_type = F %>% dirname %>% basename
return(df)
}
clans = clan_files %>%
as.list %>%
lapply(merge_clans) %>%
do.call(rbind, .) %>%
mutate(MAG_BGC = grepl('^BGC-', X.BGC.Name))
# status
clans$X.BGC.Name %>% length
clans$X.BGC.Name %>% unique %>% length
clans$BGC_type %>% table
clans$MAG_BGC %>% table
clans
# numbers of clans & families
cat('No. of clans:', clans %>%
filter(MAG_BGC == TRUE) %>%
.$Clan.Number %>% unique %>% length, '\n')
cat('No. of families:', clans %>%
filter(MAG_BGC == TRUE) %>%
.$Family.Number %>% unique %>% length, '\n')
clans = clans %>%
left_join(gbk_files %>% dplyr::select(-out_file),
by=c('X.BGC.Name'='BGC_ID'))
clans
# adding BGC metadata & writing table
BGC_summary_file = file.path(work_dir, 'BGC-BiGSCAPE_summary.tsv')
BGC_meta = read.delim(BGC_meta_file, sep='\t')
BGC_meta %>%
left_join(clans, c('gbk_file')) %>%
distinct(product, gbk_file, X.BGC.Name, .keep_all=TRUE) %>%
dplyr::select(genome_ID, product, Clan.Number, Family.Number, BGC_type) %>%
write_table(BGC_summary_file)
```
## Summarize
```
# BGC distribution
dt.dims(40)
clans %>%
group_by(BGC_type, MAG_BGC) %>%
summarize(n = n()) %>%
ungroup() %>%
arrange(MAG_BGC)
# MAG-BGCs assocated with a known BGC
fam_wRef = clans %>%
filter(MAG_BGC == FALSE) %>%
.$Family.Number
cat('Families with a ref BGC:', fam_wRef %>% unique %>% length, '\n')
# which MAGs BGCs?
clans %>%
filter(Family.Number %in% fam_wRef,
MAG_BGC == TRUE) %>%
group_by(BGC_type) %>%
summarize(n = n()) %>%
ungroup()
# MAG-BGCs assocated with a known BGC
clan_wRef = clans %>%
filter(MAG_BGC == FALSE) %>%
.$Clan.Number
cat('Clans with a ref BGC:', clan_wRef %>% unique %>% length, '\n')
# adding ref info
clans = clans %>%
mutate(BGC_fam_has_ref = Family.Number %in% fam_wRef)
clans
# fraction in gene fam with ref
clans_s = clans %>%
group_by(BGC_type, BGC_fam_has_ref) %>%
summarize(n = n_distinct(Family.Number)) %>%
ungroup()
p = clans_s %>%
ggplot(aes(BGC_type, n, fill=BGC_fam_has_ref)) +
geom_bar(stat='identity', position='dodge') +
geom_text(aes(label=n),
position = position_dodge(width = 1),
vjust=-0.1, size=2.7) +
scale_y_log10(limits=c(1,700)) +
scale_fill_discrete('BGC family\nhas BiGSCAPE\nRef.') +
labs(x='BGC', y='BGC families') +
theme_bw() +
theme(
axis.text.x = element_text(angle=45, hjust=1)
)
dims(5,3,res=250)
Plot(p)
dt.dims(40)
clans_s %>%
mutate(BGC_fam_has_ref = ifelse(BGC_fam_has_ref == TRUE, 'Ref', 'NoRef')) %>%
spread(BGC_fam_has_ref, n) %>%
mutate(perc_fams_w_ref = round(Ref / (Ref + NoRef) * 100,1)) %>%
rename('BGC' = BGC_type,
'No BiGSCAPE ref.' = NoRef,
'BiGSCAPE ref.' = Ref,
'Perc. with ref.' = perc_fams_w_ref) %>%
arrange(-`Perc. with ref.`)
```
## Writing fam-clan table
```
outfile = file.path(work_dir, 'bigscape_fam-clan.tsv')
write_table(clans, outfile)
```
# sessionInfo
```
sessionInfo()
condaInfo(conda_env)
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex AI: Vertex AI Migration: AutoML Video Object Tracking
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/vertex-ai-samples/tree/master/notebooks/official/migration/UJ15%20Vertex%20SDK%20AutoML%20Object%20Tracking.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/vertex-ai-samples/tree/master/notebooks/official/migration/UJ15%20Vertex%20SDK%20AutoML%20Object%20Tracking.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
### Dataset
The dataset used for this tutorial is the [Traffic](https://todo) from [??](todo). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
### Costs
This tutorial uses billable components of Google Cloud:
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
### Set up your local development environment
If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
- The Cloud Storage SDK
- Git
- Python 3
- virtualenv
- Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).
2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).
3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.
4. To install Jupyter, run `pip3 install jupyter` on the command-line in a terminal shell.
5. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
6. Open this notebook in the Jupyter Notebook Dashboard.
## Installation
Install the latest version of Vertex SDK for Python.
```
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip3 install --upgrade tensorflow $USER_FLAG
```
### Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
```
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
This tutorial does not require a GPU runtime.
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)
4. If you are running this notebook locally, you will need to install the [Cloud SDK]((https://cloud.google.com/sdk)).
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
```
## Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
```
#### Location of Cloud Storage training data.
Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
```
IMPORT_FILE = "gs://automl-video-demo-data/traffic_videos/traffic_videos_labels.csv"
```
#### Quick peek at your data
This tutorial uses a version of the Traffic dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.
```
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
```
## Create a dataset
### [datasets.create-dataset-api](https://cloud.google.com/vertex-ai/docs/datasets/create-dataset-api)
### Create the Dataset
Next, create the `Dataset` resource using the `create` method for the `VideoDataset` class, which takes the following parameters:
- `display_name`: The human readable name for the `Dataset` resource.
- `gcs_source`: A list of one or more dataset index files to import the data items into the `Dataset` resource.
This operation may take several minutes.
```
dataset = aip.VideoDataset.create(
display_name="Traffic" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.video.object_tracking,
)
print(dataset.resource_name)
```
*Example Output:*
INFO:google.cloud.aiplatform.datasets.dataset:Creating VideoDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create VideoDataset backing LRO: projects/759209241365/locations/us-central1/datasets/5948525032035581952/operations/6913187331100901376
INFO:google.cloud.aiplatform.datasets.dataset:VideoDataset created. Resource name: projects/759209241365/locations/us-central1/datasets/5948525032035581952
INFO:google.cloud.aiplatform.datasets.dataset:To use this VideoDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.VideoDataset('projects/759209241365/locations/us-central1/datasets/5948525032035581952')
INFO:google.cloud.aiplatform.datasets.dataset:Importing VideoDataset data: projects/759209241365/locations/us-central1/datasets/5948525032035581952
INFO:google.cloud.aiplatform.datasets.dataset:Import VideoDataset data backing LRO: projects/759209241365/locations/us-central1/datasets/5948525032035581952/operations/6800597340416638976
## Train a model
### [training.automl-api](https://cloud.google.com/vertex-ai/docs/training/automl-api)
### Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
#### Create training pipeline
An AutoML training pipeline is created with the `AutoMLVideoTrainingJob` class, with the following parameters:
- `display_name`: The human readable name for the `TrainingJob` resource.
- `prediction_type`: The type task to train the model for.
- `classification`: A video classification model.
- `object_tracking`: A video object tracking model.
- `action_recognition`: A video action recognition model.
The instantiated object is the DAG (directed acyclic graph) for the training pipeline.
```
dag = aip.AutoMLVideoTrainingJob(
display_name="traffic_" + TIMESTAMP,
prediction_type="object_tracking",
)
print(dag)
```
*Example output:*
<google.cloud.aiplatform.training_jobs.AutoMLVideoTrainingJob object at 0x7fc3b6c90f10>
#### Run the training pipeline
Next, you run the DAG to start the training job by invoking the method `run`, with the following parameters:
- `dataset`: The `Dataset` resource to train the model.
- `model_display_name`: The human readable name for the trained model.
- `training_fraction_split`: The percentage of the dataset to use for training.
- `test_fraction_split`: The percentage of the dataset to use for test (holdout data).
The `run` method when completed returns the `Model` resource.
The execution of the training pipeline will take upto 20 minutes.
```
model = dag.run(
dataset=dataset,
model_display_name="traffic_" + TIMESTAMP,
training_fraction_split=0.8,
test_fraction_split=0.2,
)
```
*Example output:*
INFO:google.cloud.aiplatform.training_jobs:View Training:
https://console.cloud.google.com/ai/platform/locations/us-central1/training/6090621516762841088?project=759209241365
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088 current state:
...
INFO:google.cloud.aiplatform.training_jobs:AutoMLVideoTrainingJob run completed. Resource name: projects/759209241365/locations/us-central1/trainingPipelines/6090621516762841088
INFO:google.cloud.aiplatform.training_jobs:Model available at projects/759209241365/locations/us-central1/models/1899701006099283968
## Evaluate the model
### [projects.locations.models.evaluations.list](https://cloud.devsite.corp.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.models.evaluations/list)
## Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
```
# Get model resource ID
models = aip.Model.list(filter="display_name=traffic_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
```
*Example output:*
name: "projects/759209241365/locations/us-central1/models/623915674158235648/evaluations/4280507618583117824"
metrics_schema_uri: "gs://google-cloud-aiplatform/schema/modelevaluation/classification_metrics_1.0.0.yaml"
metrics {
struct_value {
fields {
key: "auPrc"
value {
number_value: 0.9891107
}
}
fields {
key: "confidenceMetrics"
value {
list_value {
values {
struct_value {
fields {
key: "precision"
value {
number_value: 0.2
}
}
fields {
key: "recall"
value {
number_value: 1.0
}
}
}
}
## Make batch predictions
### [predictions.batch-prediction](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions)
### Get test item(s)
Now do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.
```
test_items = ! gsutil cat $IMPORT_FILE | head -n2
cols_1 = test_items[0].split(",")
cols_2 = test_items[1].split(",")
if len(cols_1) > 12:
test_item_1 = str(cols_1[1])
test_item_2 = str(cols_2[1])
test_label_1 = str(cols_1[2])
test_label_2 = str(cols_2[2])
else:
test_item_1 = str(cols_1[0])
test_item_2 = str(cols_2[0])
test_label_1 = str(cols_1[1])
test_label_2 = str(cols_2[1])
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
```
### Make a batch input file
Now make a batch input file, which you store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each video. The dictionary contains the key/value pairs:
- `content`: The Cloud Storage path to the video.
- `mimeType`: The content type. In our example, it is a `avi` file.
- `timeSegmentStart`: The start timestamp in the video to do prediction on. *Note*, the timestamp must be specified as a string and followed by s (second), m (minute) or h (hour).
- `timeSegmentEnd`: The end timestamp in the video to do prediction on.
```
import json
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {
"content": test_item_1,
"mimeType": "video/avi",
"timeSegmentStart": "0.0s",
"timeSegmentEnd": "5.0s",
}
f.write(json.dumps(data) + "\n")
data = {
"content": test_item_2,
"mimeType": "video/avi",
"timeSegmentStart": "0.0s",
"timeSegmentEnd": "5.0s",
}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
```
### Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
- `job_display_name`: The human readable name for the batch prediction job.
- `gcs_source`: A list of one or more batch request input files.
- `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls.
- `sync`: If set to True, the call will block while waiting for the asynchronous batch job to complete.
```
batch_predict_job = model.batch_predict(
job_display_name="traffic_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
sync=False,
)
print(batch_predict_job)
```
*Example output:*
INFO:google.cloud.aiplatform.jobs:Creating BatchPredictionJob
<google.cloud.aiplatform.jobs.BatchPredictionJob object at 0x7f806a6112d0> is waiting for upstream dependencies to complete.
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob created. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296
INFO:google.cloud.aiplatform.jobs:To use this BatchPredictionJob in another session:
INFO:google.cloud.aiplatform.jobs:bpj = aiplatform.BatchPredictionJob('projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296')
INFO:google.cloud.aiplatform.jobs:View Batch Prediction Job:
https://console.cloud.google.com/ai/platform/locations/us-central1/batch-predictions/5110965452507447296?project=759209241365
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296 current state:
JobState.JOB_STATE_RUNNING
### Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter `sync` to `True` in the `batch_predict()` method to block until the batch prediction job is completed.
```
batch_predict_job.wait()
```
*Example Output:*
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob created. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328
INFO:google.cloud.aiplatform.jobs:To use this BatchPredictionJob in another session:
INFO:google.cloud.aiplatform.jobs:bpj = aiplatform.BatchPredictionJob('projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328')
INFO:google.cloud.aiplatform.jobs:View Batch Prediction Job:
https://console.cloud.google.com/ai/platform/locations/us-central1/batch-predictions/181835033978339328?project=759209241365
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:
JobState.JOB_STATE_SUCCEEDED
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob run completed. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328
### Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:
- `content`: The prediction request.
- `prediction`: The prediction response.
- `id`: The internal assigned unique identifiers for each prediction request.
- `displayName`: The class names for the predicted label.
- `confidences`: The predicted confidence, between 0 and 1, per class label.
- `timeSegmentStart`: The time offset in the video to the start of the video sequence.
- `timeSegmentEnd`: The time offset in the video to the end of the video sequence.
- `frames`: Location with frames of the tracked object.
```
import json
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
```
*Example Output:*
{'instance': {'content': 'gs://automl-video-demo-data/traffic_videos/highway_005.mp4', 'mimeType': 'video/avi', 'timeSegmentStart': '0.0s', 'timeSegmentEnd': '5.0s'}, 'prediction': [{'id': '3767468895993069568', 'displayName': 'sedan', 'timeSegmentStart': '5s', 'timeSegmentEnd': '5s', 'confidence': 0.61968166, 'frames': [{'timeOffset': '5s', 'xMin': 0.62725407, 'xMax': 0.8197686, 'yMin': 0.5445565, 'yMax': 0.6752002}]}, {'id': '3767468895993069568', 'displayName': 'sedan', 'timeSegmentStart': '0.100s', 'timeSegmentEnd': '0.600s', 'confidence': 0.5820198, 'frames': [{'timeOffset': '0.100s', 'xMin': 0.82725096, 'xMax': 0.99906015, 'yMin': 0.6930067, 'yMax': 0.87328714}, {'timeOffset': '0.200s', 'xMin': 0.7275455, 'xMax': 0.96733177, 'yMin': 0.56486815, 'yMax': 0.7271576}, {'timeOffset': '0.300s', 'xMin': 0.6559273, 'xMax': 0.922594, 'yMin': 0.67381793, 'yMax': 0.87236154}, {'timeOffset': '0.400s', 'xMin': 0.52804255, 'xMax': 0.8188649, 'yMin': 0.7136257, 'yMax': 0.9117379}, {'timeOffset': '0.500s', 'xMin': 0.38841337, 'xMax': 0.68499833, 'yMin': 0.7241354, 'yMax': 0.9293221}, {'timeOffset': '0.600s', 'xMin': 0.22706872, 'xMax': 0.5414151, 'yMin': 0.71608, 'yMax': 0.93481314}]}, {'id': '3767468895993069568', 'displayName': 'sedan', 'timeSegmentStart': '3s', 'timeSegmentEnd': '4s', 'confidence': 0.49189684, 'frames': [{'timeOffset': '3s', 'xMin': 0.7998861, 'xMax': 0.9865881, 'yMin': 0.6979363, 'yMax': 0.8698516}, {'timeOffset': '3.100s', 'xMin': 0.7835889, 'xMax': 0.9862553, 'yMin': 0.7052906, 'yMax': 0.8742318}, {'timeOffset': '3.200s', 'xMin': 0.7530042, 'xMax': 0.9377618, 'yMin': 0.5567795, 'yMax': 0.6924586}, {'timeOffset': '3.300s', 'xMin': 0.6876858, 'xMax': 0.8742672, 'yMin': 0.5218122, 'yMax': 0.6595952}, {'timeOffset': '3.400s', 'xMin': 0.62348735, 'xMax': 0.8095002, 'yMin': 0.5097083, 'yMax': 0.6480995}, {'timeOffset': ...
# Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- AutoML Training Job
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
```
# Clone the entire repo.
!git clone https://github.com/tensorflow/tcav.git tcav
%cd tcav
!ls
%cd /content/tcav/tcav/tcav_examples/image_models/imagenet
%run download_and_make_datasets.py --source_dir=YOUR_FOLDER --number_of_images_per_folder=10 --number_of_random_folders=10
%cd /content/tcav
```
# Running TCAV
This notebook walks you through things you need to run TCAV.
Before running this notebook, run the following to download all the data.
```
cd tcav/tcav_examples/image_models/imagenet
python download_and_make_datasets.py --source_dir=YOUR_PATH --number_of_images_per_folder=50 --number_of_random_folders=3
```
In high level, you need:
1. **example images in each folder** (you have this if you ran the above)
* images for each concept
* images for the class/labels of interest
* random images that will be negative examples when learning CAVs (images that probably don't belong to any concepts)
2. **model wrapper** (below uses example from tcav/model.py)
* an instance of ModelWrapper abstract class (in model.py). This tells TCAV class (tcav.py) how to communicate with your model (e.g., getting internal tensors)
3. **act_generator** (below uses example from tcav/activation_generator.py)
* an instance of ActivationGeneratorInterface that tells TCAV class how to load example data and how to get activations from the model
## Requirements
pip install the tcav and tensorflow packages (or tensorflow-gpu if using GPU)
```
%load_ext autoreload
%autoreload 2
import tcav.activation_generator as act_gen
import tcav.cav as cav
import tcav.model as model
import tcav.tcav as tcav
import tcav.utils as utils
import tcav.utils_plot as utils_plot # utils_plot requires matplotlib
import os
import tensorflow as tf
```
## Step 1. Store concept and target class images to local folders
and tell TCAV where they are.
**source_dir**: where images of concepts, target class and random images (negative samples when learning CAVs) live. Each should be a sub-folder within this directory.
Note that random image directories can be in any name. In this example, we are using `random500_0`, `random500_1`,.. for an arbitrary reason.
You need roughly 50-200 images per concept and target class (10-20 pictures also tend to work, but 200 is pretty safe).
**cav_dir**: directory to store CAVs (`None` if you don't want to store)
**target, concept**: names of the target class (that you want to investigate) and concepts (strings) - these are folder names in source_dir
**bottlenecks**: list of bottleneck names (intermediate layers in your model) that you want to use for TCAV. These names are defined in the model wrapper below.
```
# This is the name of your model wrapper (InceptionV3 and GoogleNet are provided in model.py)
model_to_run = 'GoogleNet'
# the name of the parent directory that results are stored (only if you want to cache)
project_name = 'tcav_class_test'
working_dir = '/content/tcav/tcav'
# where activations are stored (only if your act_gen_wrapper does so)
activation_dir = working_dir+ '/activations/'
# where CAVs are stored.
# You can say None if you don't wish to store any.
cav_dir = working_dir + '/cavs/'
# where the images live.
source_dir = '/content/tcav/tcav/tcav_examples/image_models/imagenet/YOUR_FOLDER'
bottlenecks = [ 'mixed4c'] # @param
utils.make_dir_if_not_exists(activation_dir)
utils.make_dir_if_not_exists(working_dir)
utils.make_dir_if_not_exists(cav_dir)
# this is a regularizer penalty parameter for linear classifier to get CAVs.
alphas = [0.1]
target = 'zebra'
concepts = ["dotted","striped","zigzagged"]
```
## Step 2. Write your model wrapper
Next step is to tell TCAV how to communicate with your model. See `model.GoogleNetWrapper_public ` for details.
You can define a subclass of ModelWrapper abstract class to do this. Let me walk you thru what each function does (tho they are pretty self-explanatory). This wrapper includes a lot of the functions that you already have, for example, `get_prediction`.
### 1. Tensors from the graph: bottleneck tensors and ends
First, store your bottleneck tensors in `self.bottlenecks_tensors` as a dictionary. You only need bottlenecks that you are interested in running TCAV with. Similarly, fill in `self.ends` dictionary with `input`, `logit` and `prediction` tensors.
### 2. Define loss
Get your loss tensor, and assigned it to `self.loss`. This is what TCAV uses to take directional derivatives.
While doing so, you would also want to set
```python
self.y_input
```
this simply is a tensorflow place holder for the target index in the logit layer (e.g., 0 index for a dog, 1 for a cat).
For multi-class classification, typically something like this works:
```python
self.y_input = tf.placeholder(tf.int64, shape=[None])
```
For example, for a multiclass classifier, something like below would work.
```python
# Construct gradient ops.
with g.as_default():
self.y_input = tf.placeholder(tf.int64, shape=[None])
self.pred = tf.expand_dims(self.ends['prediction'][0], 0)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(self.y_input, len(self.labels)),
logits=self.pred))
self._make_gradient_tensors()
```
### 3. Call _make_gradient_tensors in __init__() of your wrapper
```python
_make_gradient_tensors()
```
does what you expect - given the loss and bottleneck tensors defined above, it adds gradient tensors.
### 4. Fill in labels, image shapes and a model name.
Get the mapping from labels (strings) to indice in the logit layer (int) in a dictionary format.
```python
def id_to_label(self, idx)
def label_to_id(self, label)
```
Set your input image shape at `self.image_shape`
Set your model name to `self.model_name`
You are done with writing the model wrapper! I wrote two model wrapers, InceptionV3 and Googlenet.
**sess**: a tensorflow session.
```
%cp -av '/content/tcav/tcav/tcav_examples/image_models/imagenet/YOUR_FOLDER/mobilenet_v2_1.0_224' '/content/tcav/tcav/mobilenet_v2_1.0_224'
%rm '/content/tcav/tcav/tcav_examples/image_models/imagenet/YOUR_FOLDER/mobilenet_v2_1.0_224'
%cp -av '/content/tcav/tcav/tcav_examples/image_models/imagenet/YOUR_FOLDER/inception5h' '/content/tcav/tcav/inception5h'
%rm '/content/tcav/tcav/tcav_examples/image_models/imagenet/YOUR_FOLDER/inception5h'
sess = utils.create_session()
# GRAPH_PATH is where the trained model is stored.
GRAPH_PATH = "/content/tcav/tcav/inception5h/tensorflow_inception_graph.pb"
# LABEL_PATH is where the labels are stored. Each line contains one class, and they are ordered with respect to their index in
# the logit layer. (yes, id_to_label function in the model wrapper reads from this file.)
# For example, imagenet_comp_graph_label_strings.txt looks like:
# dummy
# kit fox
# English setter
# Siberian husky ...
LABEL_PATH = "/content/tcav/tcav/inception5h/imagenet_comp_graph_label_strings.txt"
mymodel = model.GoogleNetWrapper_public(sess,
GRAPH_PATH,
LABEL_PATH)
```
## Step 3. Implement a class that returns activations (maybe with caching!)
Lastly, you will implement a class of the ActivationGenerationInterface which TCAV uses to load example data for a given concept or target, call into your model wrapper and return activations. I pulled out this logic outside of mymodel because this step often takes the longest. By making it modular, you can cache your activations and/or parallelize your computations, as I have done in `ActivationGeneratorBase.process_and_load_activations` in `activation_generator.py`.
The `process_and_load_activations` method of the activation generator must return a dictionary of activations that has concept or target name as a first key, and the bottleneck name as a second key. So something like:
```python
{concept1: {bottleneck1: [[0.2, 0.1, ....]]},
concept2: {bottleneck1: [[0.1, 0.02, ....]]},
target1: {bottleneck1: [[0.02, 0.99, ....]]}
```
```
act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100)
```
## You are ready to run TCAV!
Let's do it.
**num_random_exp**: number of experiments to confirm meaningful concept direction. TCAV will search for this many folders named `random500_0`, `random500_1`, etc. You can alternatively set the `random_concepts` keyword to be a list of folders of random concepts. Run at least 10-20 for meaningful tests.
**random_counterpart**: as well as the above, you can optionally supply a single folder with random images as the "positive set" for statistical testing. Reduces computation time at the cost of less reliable random TCAV scores.
```
import absl
absl.logging.set_verbosity(0)
num_random_exp=10
## only running num_random_exp = 10 to save some time. The paper number are reported for 500 random runs.
mytcav = tcav.TCAV(sess,
target,
concepts,
bottlenecks,
act_generator,
alphas,
cav_dir=cav_dir,
num_random_exp=num_random_exp)#10)
print ('This may take a while... Go get coffee!')
results = mytcav.run(run_parallel=False)
print ('done!')
utils_plot.plot_results(results, num_random_exp=num_random_exp)
```
| github_jupyter |
# Arena Scraper
## Import Deps
```
%config IPCompleter.greedy=True
# https://docs.python.org/3/library/datetime.html
import datetime as dt
# https://docs.python.org/3/howto/regex.html
import re
# https://beautiful-soup-4.readthedocs.io/en/latest/
from bs4 import BeautifulSoup as bs
# https://splinter.readthedocs.io/en/latest/#
from splinter import Browser
```
## Using Splinter to Download HTML
But first you want to check when the last time the splinter code was last ran.
```
import pathlib
from os import scandir
def convert_date(timestamp):
d = dt.datetime.utcfromtimestamp(timestamp)
formated_date = d.strftime("%d %b %Y")
return formated_date
file = pathlib.Path("arena.html")
if file.exists():
info = file.stat()
print(f"File already exists and last updated on {convert_date(info.st_mtime)}")
else:
# Using Splinter to automate the browser actions
# Initiate headless driver for deployment
with Browser("chrome", executable_path="chromedriver", headless=True) as browser:
# Visit URL
url = "https://en.wikipedia.org/wiki/List_of_National_Basketball_Association_arenas"
browser.visit(url)
list_of_arena_a_tags = browser.find_by_css("a[class='image']")
print(list_of_arena_a_tags.first.value)
# I know the current URL I've passed to visit
# is the site that holds all the logos I am looking for
# so I just save the site and start working with bs4
# Scrape page into soup
html = browser.html
# Save HTML to local machine so we don't continue to requests
with open("arena.html", "w", encoding="utf8") as f:
f.write(html)
# browser = Browser("chrome", executable_path="chromedriver", headless=True)
# current HTMl file
# opened_html = ""
# Read in local HTML page
with open("arena.html", "r", encoding="UTF-8", errors="strict") as f:
opened_html = f.read()
# Scrape page into soup
soup = bs(opened_html, "html.parser")
# Print out the html structure to plan out the scraping logic
# print(soup.prettify())
# I want to find all the table tags and filter down to a specfic table with arena data
list_of_table_tags = soup.find_all("table")
wanted_table = list_of_table_tags[0]
list_of_arena_images = wanted_table.find_all("a", {"class": "image"})
clean_url_list_bs4 = []
for i in list_of_arena_images:
temp = i.get("href")
clean_url_list_bs4.append(temp)
```
## Using Pandas
```
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_html.html
import pandas as pd
areans_df = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_National_Basketball_Association_arenas",
match="Arena",
)
print(len(areans_df))
print(areans_df)
import time
# Initiate headless driver for deployment
browser = Browser("chrome", executable_path="chromedriver", headless=True)
# Visit URL
url = "https://en.wikipedia.org/wiki/List_of_National_Basketball_Association_arenas"
browser.visit(url)
list_of_arena_a_tags = browser.find_by_css("a[class='image']")
original_url_list_splinter = []
for i in list_of_arena_a_tags:
original_url_list_splinter.append(i["href"])
clean_url_list_splinter = []
for x in original_url_list_splinter:
# Find the Original file link and click that
browser.visit(x)
browser.is_element_present_by_text("Original file", wait_time=5)
more_info_elem = browser.find_by_css("a[class='internal']")
clean_url_list_splinter.append(more_info_elem["href"])
# Stop webdriver and return data
browser.quit()
print(clean_url_list_splinter)
```
## Download Images with Request
```
## Importing Necessary Modules
# https://requests.readthedocs.io/en/master/
# https://docs.python.org/3/library/shutil.html#module-shutil
import shutil # to save it locally
import requests # to get image from the web
counter = 1
# Looping through the img_url_list and downloading
for i in clean_url_list_splinter:
counter = counter + 1
response = requests.get(i, stream=True)
if response.status_code == 200:
response.raw.decode_content = True
with open(f"{str(counter)}.jpg", "wb") as f:
shutil.copyfileobj(response.raw, f)
print(f"Image sucessfully Downloaded: {str(counter)}")
else:
print("Image Couldn't be retreived")
```
| github_jupyter |
# Fine Mapping
I want to take the significant eQTL variants and functionally annotate them
to come up with a list of variants that might be causal. There is a lot of
functional data for the H1-hESC line that I can use along with data from
other places.
```
import copy
import cPickle
import glob
import os
import random
import re
import subprocess
import urllib2
import uuid
import cdpybio as cpb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import pybedtools as pbt
import scipy
import scipy.stats as stats
import seaborn as sns
import tabix
import vcf as pyvcf
import cardipspy as cpy
import ciepy
%matplotlib inline
dy_name = 'fine_mapping'
import socket
if socket.gethostname() == 'fl-hn1' or socket.gethostname() == 'fl-hn2':
dy = os.path.join(ciepy.root, 'sandbox', 'tmp', dy_name)
cpy.makedir(dy)
pbt.set_tempdir(dy)
outdir = os.path.join(ciepy.root, 'output', dy_name)
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output', dy_name)
cpy.makedir(private_outdir)
tg = pd.read_table(cpy.gencode_transcript_gene, index_col=0,
header=None, squeeze=True)
gene_info = pd.read_table(cpy.gencode_gene_info, index_col=0)
transcript_to_gene = pd.read_table(cpy.gencode_transcript_gene, header=None,
squeeze=True, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'eqtl_input',
'tpm_log_filtered_phe_std_norm_peer_resid.tsv')
exp = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'eqtl_processing', 'eqtls01', 'qvalues.tsv')
qvalues = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'eqtl_processing', 'eqtls02', 'qvalues.tsv')
secondary_qvalues = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'eqtl_processing', 'eqtls01', 'lead_variants_single.tsv')
lead_variants_single = pd.read_table(fn, index_col=0)
lead_variants_single_f = lead_variants_single[lead_variants_single.perm_sig]
genes = pbt.BedTool(cpy.gencode_gene_bed)
fn = os.path.join(os.path.split(cpy.roadmap_15_state_annotation)[0], 'EIDlegend.txt')
roadmap_ids = pd.read_table(fn, squeeze=True, index_col=0, header=None)
fn = os.path.join(ciepy.root, 'output', 'eqtl_processing', 'eqtls01', 'gene_variant_pairs.tsv')
gene_variants = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'functional_annotation_analysis',
'encode_stem_cell_chip_seq.tsv')
encode_chip_seq = pd.read_table(fn, index_col=0)
ensg = pd.Series(gene_info.index, index=[x.split('.')[0] for x in gene_info.index])
fn = os.path.join(ciepy.root, 'output', 'cnv_analysis', 'cnv_gene_variants.pickle')
cnv_gv = pd.read_pickle(fn)
subject_meta = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'subject_metadata.tsv'),
index_col=0)
rna_meta = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'rnaseq_metadata.tsv'),
index_col=0)
wgs_meta = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'wgs_metadata.tsv'),
index_col=0)
# ASE results.
fn = os.path.join(ciepy.root, 'output', 'input_data',
'mbased_major_allele_freq.tsv')
maj_af = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data',
'mbased_p_val_ase.tsv')
ase_pval = pd.read_table(fn, index_col=0)
locus_p = pd.Panel({'major_allele_freq':maj_af, 'p_val_ase':ase_pval})
locus_p = locus_p.swapaxes(0, 2)
snv_fns = glob.glob(os.path.join(ciepy.root, 'private_output', 'input_data', 'mbased_snv',
'*_snv.tsv'))
count_fns = glob.glob(os.path.join(ciepy.root, 'private_output', 'input_data', 'allele_counts',
'*mbased_input.tsv'))
snv_res = {}
for fn in snv_fns:
snv_res[os.path.split(fn)[1].split('_')[0]] = pd.read_table(fn, index_col=0)
count_res = {}
for fn in count_fns:
count_res[os.path.split(fn)[1].split('_')[0]] = pd.read_table(fn, index_col=0)
snv_p = pd.Panel(snv_res)
fn = os.path.join(ciepy.root, 'output', 'motif_search', 'matrices.pickle')
with open(fn) as f:
matrices = cPickle.load(f)
fn = os.path.join(ciepy.root, 'output', 'motif_search', 'motif_info_full.tsv')
motif_info_full = pd.read_table(fn, index_col=0)
motif_info_full['target'] = [x.split('_')[0] for x in motif_info_full.index]
fn = os.path.join(ciepy.root, 'output', 'motif_search', 'motif_info_full.tsv')
motif_info = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'motif_search', 'motif_info_rep.tsv')
motif_info_rep = pd.read_table(fn, index_col=0)
motif_info['target'] = [x.split('_')[0] for x in motif_info.index]
```
I'll drop CNVs since they likely act through different mechanisms than I'm considering here.
```
n = gene_variants.shape[0]
print('{:,} total associations.'.format(n))
n = len(set(gene_variants.location))
print('{:,} total distinct SNV, indels, and CNVs with significant associations.'.format(n))
gene_variants = gene_variants[gene_variants.variant_type != 'cnv']
```
## Predicted NMD
I'll use the snpEff annotation of the WGS to look for predicted NMD. I'd like to
see if any of the significant variants are predicted to cause NMD for the eQTL
gene.
If snpEff predicts NMD for a gene, the annotation looks like
PITRM1|ENSG00000107959|11|0.27
This means that PITRM1 has 11 transcripts and 27% of them are predicted to have NMD.
```
fns = glob.glob('/projects/CARDIPS/pipeline/WGS/mergedVCF/annotation/vcf/*snpeff.vcf.gz')
fns = [x for x in fns if len(re.findall('chr\d{1,2}', x)) > 0]
fns = pd.Series(fns, index=[re.findall('chr\d{1,2}', x)[0] for x in fns])
for vcf in fns.values:
out = os.path.join(private_outdir, os.path.split(vcf)[1].replace('.vcf.gz', '_nmd.vcf'))
if not os.path.exists(out):
!zcat {vcf} | java -jar /software/snpEff_v4_1l_core/SnpSift.jar filter "NMD[*]" > {out}
fns = glob.glob(os.path.join(private_outdir, '*_nmd.vcf'))
variants = []
nmd = []
for fn in fns:
vcf_reader = pyvcf.Reader(open(fn))
for r in vcf_reader:
variants.append('chr{}:{}-{}'.format(r.CHROM, r.POS - 1, r.POS))
nmd.append(r.INFO['NMD'])
ind = []
vals = []
for i, v in enumerate(variants):
r = nmd[i]
for n in r:
gene_name, gene_id, num_t, per_t = n.strip(')').split('|')
ind.append(v + ':' + ensg[gene_id])
vals.append([int(num_t), float(per_t)])
nmd = pd.DataFrame(vals, index=ind, columns=['nmd_num_transcripts', 'nmd_percent_transcripts_nmd'])
tdf = nmd.ix[set(nmd.index) & set(gene_variants.index)]
gene_variants = gene_variants.join(tdf)
gene_variants['nmd'] = False
gene_variants.ix[gene_variants.dropna(subset=['nmd_num_transcripts']).index, 'nmd'] = True
g = set(gene_variants.ix[gene_variants.nmd, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_nmd_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_nmd_variant.isnull(), 'gene_has_nmd_variant'] = False
```
## Exons, UTRs, and Promoters
I'll annotate whether variants fall in exons, UTRs or promoters for their significant
genes.
```
gene_variants_bt = pbt.BedTool('\n'.join(set(gene_variants.apply(lambda x: '{}\t{}\t{}'.format(
x['chrom'], x['start'], x['end']), axis=1))), from_string=True)
gene_variants_bt = gene_variants_bt.sort()
exons = pbt.BedTool(cpy.gencode_exon_bed)
utrs = pbt.BedTool(cpy.gencode_utr_bed)
promoters = pbt.BedTool(cpy.gencode_promoter_bed)
res = gene_variants_bt.intersect(exons, wo=True, sorted=True)
df = res.to_dataframe()
df.index = df.chrom + ':' + df.start.astype(str) + '-' + df.end.astype(str)
df['gene'] = transcript_to_gene[df.thickStart].values
df['val'] = pd.Series(df.index, index=df.index) + ':' + df.gene
df = df.drop_duplicates(subset='val')
gene_variants['exonic_same_gene'] = False
gene_variants.ix[set(df.val) & set(gene_variants.index), 'exonic_same_gene'] = True
df['exonic'] = True
df['variant'] = df.index
df = df.drop_duplicates(subset='variant')
gene_variants = gene_variants.merge(df[['exonic']], left_on='location', right_index=True, how='left')
gene_variants.ix[gene_variants.exonic.isnull(), 'exonic'] = False
g = set(gene_variants.ix[gene_variants.exonic, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_exonic_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_exonic_variant.isnull(), 'gene_has_exonic_variant'] = False
g = set(gene_variants.ix[gene_variants.exonic_same_gene, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_exonic_same_gene_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_exonic_same_gene_variant.isnull(), 'gene_has_exonic_same_gene_variant'] = False
res = gene_variants_bt.intersect(utrs, wo=True, sorted=True)
df = res.to_dataframe()
df.index = df.chrom + ':' + df.start.astype(str) + '-' + df.end.astype(str)
df['gene'] = transcript_to_gene[df.thickStart].values
df['val'] = pd.Series(df.index, index=df.index) + ':' + df.gene
df = df.drop_duplicates(subset='val')
gene_variants['utr_same_gene'] = False
gene_variants.ix[set(df.val) & set(gene_variants.index), 'utr_same_gene'] = True
df['utr'] = True
df['variant'] = df.index
df = df.drop_duplicates(subset='variant')
gene_variants = gene_variants.merge(df[['utr']], left_on='location', right_index=True, how='left')
gene_variants.ix[gene_variants.utr.isnull(), 'utr'] = False
res = gene_variants_bt.intersect(promoters, wo=True, sorted=True)
df = res.to_dataframe()
df.index = df.chrom + ':' + df.start.astype(str) + '-' + df.end.astype(str)
df['gene'] = transcript_to_gene[df.thickStart.apply(lambda x: x.split('_')[0])].values
df['val'] = pd.Series(df.index, index=df.index) + ':' + df.gene
df = df.drop_duplicates(subset='val')
gene_variants['promoter_same_gene'] = False
gene_variants.ix[set(df.val) & set(gene_variants.index), 'promoter_same_gene'] = True
df['promoter'] = True
df['variant'] = df.index
df = df.drop_duplicates(subset='variant')
gene_variants = gene_variants.merge(df[['promoter']], left_on='location', right_index=True, how='left')
gene_variants.ix[gene_variants.promoter.isnull(), 'promoter'] = False
g = set(gene_variants.ix[gene_variants.utr, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_utr_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_utr_variant.isnull(), 'gene_has_utr_variant'] = False
g = set(gene_variants.ix[gene_variants.utr_same_gene, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_utr_same_gene_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_utr_same_gene_variant.isnull(),
'gene_has_utr_same_gene_variant'] = False
g = set(gene_variants.ix[gene_variants.promoter_same_gene, 'gene_id'])
df = pd.DataFrame(True, index=g, columns=['gene_has_promoter_same_gene_variant'])
gene_variants = gene_variants.merge(df, left_on='gene_id', right_index=True, how='left')
gene_variants.ix[gene_variants.gene_has_promoter_same_gene_variant.isnull(),
'gene_has_promoter_same_gene_variant'] = False
```
## Maurano et al. 2015 Data
I'm going to grab some data from [Maurano *et al.* 2015](http://www.nature.com/ng/journal/v47/n12/full/ng.3432.html).
The zip file seems to be corrupted. I was able to manually download on my mac
and decompress it using Arhcive Utility but I couldn't get it work from the
command line so that this has to be downloaded by hand.
```
# This has all of the variants tested for imbalance and their significance.
maurano_res = pd.read_table('http://www.nature.com/ng/journal/v47/n12/extref/ng.3432-S5.txt')
# This has SNVs from dbSNP 138 predicted to affect TF binding.
fn = os.path.join(private_outdir, 'ng.3432-S7')
if not os.path.exists(fn):
print('Download ng.3432-S7, decompress, and add to private_outdir.')
print('http://www.nature.com/ng/journal/v47/n12/extref/ng.3432-S7.zip')
else:
maurano_tf_disrupt = pd.read_table(fn)
se = maurano_res.chromEnd - maurano_res.chromStart
se[se > 1].shape
```
The Maurano results are all SNVs.
```
print(len(set(maurano_res.chrom + ':' + maurano_res.chromStart.astype(str) + '-' +
maurano_res.chromEnd.astype(str))))
print(maurano_res.shape[0])
maurano_res.index = (maurano_res.chrom + ':' + maurano_res.chromStart.astype(str) +
'-' + maurano_res.chromEnd.astype(str))
```
The Maurano results are all unique variants.
```
maurano_res.head(1)
```
It seems that the Maurano data is in zero-based coordinates.
```
maurano_res.columns = ['{}_maurano'.format(c) for c in maurano_res.columns]
gene_variants = gene_variants.merge(maurano_res, left_on='location', right_index=True, how='left')
gene_variants = gene_variants.drop(['chrom_maurano', 'chromStart_maurano', 'chromEnd_maurano'], axis=1)
```
I'll also add in the predicted TF disruptions.
```
maurano_tf_disrupt.columns = ['{}_maurano_tf'.format(x) for x in maurano_tf_disrupt.columns]
maurano_tf_disrupt.index = (maurano_tf_disrupt.chrom_maurano_tf + ':' +
maurano_tf_disrupt.snpChromStart_maurano_tf.astype(str) +
'-' + maurano_tf_disrupt.snpChromEnd_maurano_tf.astype(str))
gene_variants = gene_variants.merge(maurano_tf_disrupt, left_on='location',
right_index=True, how='left')
gene_variants = gene_variants.drop(['chrom_maurano_tf', 'snpChromStart_maurano_tf',
'snpChromEnd_maurano_tf'], axis=1)
```
## Roadmap DNase
```
out = os.path.join(outdir, 'roadmap_dnase.tsv')
if not os.path.exists(out):
url = ('http://egg2.wustl.edu/roadmap/data/byFileType'
'/peaks/consolidated/narrowPeak/')
website = urllib2.urlopen(url)
html = website.read()
files = re.findall('href="(E\d\d\d-DNase.macs2.narrowPeak.gz)"', html)
roadmap_dnase_res = pd.DataFrame(
-1, index=[x.split('-')[0] for x in files],
columns=['odds_ratio', 'pvalue'])
urls = ['http://egg2.wustl.edu/roadmap/data/byFileType/peaks/consolidated/narrowPeak/{}'.format(n)
for n in files]
lines = ['iPS-15b Cell Line', 'iPS-18 Cell Line', 'iPS-20b Cell Line',
'iPS DF 6.9 Cell Line', 'iPS DF 19.11 Cell Line', 'H1 Cell Line',
'H9 Cell Line']
urls = [x for x in urls if roadmap_ids[os.path.split(x.split('-')[0])[1]] in lines]
df = pd.DataFrame(False, index=set(gene_variants.location), columns=lines)
for url in urls:
line = roadmap_ids[os.path.split(url)[1].split('-')[0]]
bt = pbt.BedTool(cpb.general.read_gzipped_text_url(url), from_string=True).sort()
res = gene_variants_bt.intersect(bt, wa=True)
tdf = res.to_dataframe()
df.ix[(tdf.chrom + ':' + tdf.start.astype(str) + '-' + tdf.end.astype(str)).values, line] = True
df = df.ix[:, df.sum() > 0]
df.columns = [c.replace(' Cell Line', '').replace(' ', '_') + '_roadmap_dnase' for c in df.columns]
df.to_csv(out, sep='\t')
else:
df = pd.read_table(out, index_col=0)
gene_variants = gene_variants.merge(df, left_on='location', right_index=True, how='left')
```
## ENCODE DNase
```
out = os.path.join(outdir, 'encode_dnase.tsv')
if not os.path.exists(out):
encode_dnase = pd.read_table(os.path.join(ciepy.root, 'output',
'functional_annotation_analysis',
'encode_dnase.tsv'), index_col=0)
bs_types = ['stem cell', 'induced pluripotent stem cell line']
encode_dnase = encode_dnase[encode_dnase.biosample_type.apply(lambda x: x in bs_types)]
cols = (encode_dnase.cell_type.apply(lambda x: x.replace('induced pluripotent stem cell', 'iPSC')) +
'_' + encode_dnase.index + '_dnase')
df = pd.DataFrame(False, index=set(gene_variants.location), columns=cols)
encode_dnase['col'] = cols
for i in encode_dnase.index:
bt = pbt.BedTool(cpb.general.read_gzipped_text_url(encode_dnase.ix[i, 'narrowPeak_url']),
from_string=True).sort()
res = gene_variants_bt.intersect(bt, wa=True)
tdf = res.to_dataframe()
df.ix[(tdf.chrom + ':' + tdf.start.astype(str) + '-' + tdf.end.astype(str)).values,
encode_dnase.ix[i, 'col']] = True
df.to_csv(out, sep='\t')
else:
df = pd.read_table(out, index_col=0)
gene_variants = gene_variants.merge(df, left_on='location', right_index=True, how='left')
```
## ENCODE Transcription Factor ChIP-Seq
```
encode_tf_chip_seq = pd.read_table(os.path.join(ciepy.root, 'output',
'functional_annotation_analysis',
'encode_stem_cell_chip_seq.tsv'), index_col=0)
encode_tf_chip_seq = encode_tf_chip_seq.drop_duplicates(subset='target')
s = set(motif_info.tf) & set(encode_tf_chip_seq.target)
encode_tf_chip_seq = encode_tf_chip_seq[encode_tf_chip_seq.target.apply(lambda x: x in s)]
out = os.path.join(outdir, 'encode_tf_chip_seq.tsv')
if not os.path.exists(out):
df = pd.DataFrame(False, index=set(gene_variants.location), columns=encode_tf_chip_seq.target.values)
for i in encode_tf_chip_seq.index:
bt = pbt.BedTool(cpb.general.read_gzipped_text_url(encode_tf_chip_seq.ix[i, 'narrowPeak_url']),
from_string=True).sort()
res = gene_variants_bt.intersect(bt, wa=True)
tdf = res.to_dataframe()
df.ix[(tdf.chrom + ':' + tdf.start.astype(str) + '-' + tdf.end.astype(str)).values,
encode_tf_chip_seq.ix[i, 'target']] = True
peak_overlap = df
peak_overlap.to_csv(out, sep='\t')
else:
peak_overlap = pd.read_table(out, index_col=0)
#gene_variants = gene_variants.merge(df, left_on='location', right_index=True, how='left')
n = sum(peak_overlap.sum(axis=1) > 0)
print('{:,} of {:,} variants overlap at least one peak.'.format(n, peak_overlap.shape[0]))
```
## TF Motif Disruption
I'd like to identify variants that overlap a binding site for a transcription
factor and also disrupt a motif for that TF.
Things get a bit confusing here for a number of reasons. ChIP-seq experiments
are performed on specific proteins. Sometimes these proteins have the same name as
the transcription factor like SRF for instance. But sometimes the protein that
is pulled down is part of a complex. For instance, ChIP-seq experiments for
JUND, JUN, FOSL1, and others all target the transcription factor AP1 since these
proteins are subunits of AP1.
Another complication is that the motifs discovered by [Kheradpour et al.](http://nar.oxfordjournals.org/content/42/5/2976.full.pdf)
do not always correspond to the particular transcription factor the motif is named after.
For instance, 20 different
"factor groups" have motifs that match the TPA DNA response element (TRE) motif.
For instance, AP1_disc3 matches the TRE motif. This could happen for a number of reasons
(outlined in the paper) such as the two TF's cobind or actually both use the motif etc.
For my purposes here, I need to make a few decisions. Let's use JUND/AP1 as an example.
There is an ENCODE experiment for JUND. There are also motifs discovered using JUND
ChIP-seq data. However, the motifs that are found under JUND peaks are assigned to
AP1 since that's the TF. So let's say we
have a variant that overlaps a JUND peak from ENCODE. Should I test all AP1 motifs for
disruption, even if they were discovered in a different ChIP-seq data set (like from FOSL1)?
Testing all AP1 motifs also means that I'm testing motifs for other TFs (like AP1_disc3 which
is actually the motif for TRE as they mention in the paper). Looking at the data on
http://compbio.mit.edu/encode-motifs/ for AP1, this seems to be unavoidable. Few motifs
are unique to AP1, and those that are unique have much weaker enrichment in the ChIP-seq
peaks.
I think for my purposes here, I'll be inclusive about testing motifs. A disrupted motif
is evidence of some kind of effect.
```
t = motif_info[motif_info.tf.apply(lambda x: x in encode_chip_seq.target.values)]
motif_info = motif_info[motif_info.target.apply(lambda x: x in t.target.values)]
# I'll convert the peak_overlaps into TF overlaps.
tdf = peak_overlap.copy(deep=True)
se = pd.Series(dict(zip(motif_info.tf, motif_info.target)))
tdf.columns = [se[x] for x in tdf.columns]
d = {}
for c in tdf.columns:
t = tdf[c]
if len(t.shape) == 2:
d[c] = t[c].sum(axis=1) > 0
else:
d[c] = t
tf_overlap = pd.DataFrame(d)
out = os.path.join(outdir, 'motif_diffs.tsv')
if not os.path.exists(out):
tdf = gene_variants[['location', 'ref', 'alt']]# + tf_cols]
tdf = tdf.drop_duplicates()
tdf.index = tdf.location
tdf = tdf.drop('location', axis=1)
tf_overlap_yes = tf_overlap[tf_overlap.sum(axis=1) > 0]
tdf = tdf.ix[tf_overlap_yes.index]
d = {}
for i in tf_overlap_yes.index:
d[i] = list(tf_overlap_yes.ix[i][tf_overlap_yes.ix[i]].index)
se = pd.Series(d)
se = se[tdf.index]
target_to_motif = pd.Series(motif_info.index, index=motif_info.target)
tdf['motifs'] = se.apply(lambda x: list(target_to_motif[x]))
from ipyparallel import Client
parallel_client = Client(profile='parallel')
dview = parallel_client[:]
print('Cluster has {} engines.'.format(len(parallel_client.ids)))
with dview.sync_imports():
import cdpybio
import cardipspy
%px cpb = cdpybio
%px cpy = cardipspy
dview.push(dict(tdf=tdf));
dview.push(dict(matrices=matrices));
# matrices_f = {k:matrices[k] for k in motif_info.index}
# dview.push(dict(matrices_f=matrices_f));
# res = dview.map_sync(lambda i: cpb.moodsext.find_motif_disruptions(
# i, tdf.ix[i, 'ref'], tdf.ix[i, 'alt'], cpy.hg19,
# matrices_f), tdf.index)
res = dview.map_sync(lambda i: cpb.moodsext.find_motif_disruptions(
i, tdf.ix[i, 'ref'], tdf.ix[i, 'alt'], cpy.hg19,
{k:matrices[k] for k in tdf.ix[i, 'motifs']}), tdf.index)
motif_d = pd.DataFrame(index=tdf.index, columns=motif_info.index)
a = []
b = []
for i,p in enumerate(tdf.index):
if res[i].shape[0] > 0:
a.append(p)
b.append(res[i])
d = dict(zip(a,b))
p = pd.Panel(d)
motif_d = p.ix[:, :, 'score_diff'].T
motif_d.to_csv(out, sep='\t')
else:
motif_d = pd.read_table(out, index_col=0)
```
For `motif_d`, the value is reference score minus alternate score. So a positive
value means the reference matched the motif better.
I'm not sure what score difference constitutes a disruption. Let's
take a look at the distribution of differences.
```
pd.Series(motif_d.values.flatten()).dropna().hist(bins=100)
ymin, ymax = plt.ylim()
plt.vlines(-2.5, ymin, ymax)
plt.vlines(2.5, ymin, ymax)
plt.ylabel('Number of differences')
plt.xlabel('Score difference');
```
It seems that a score difference greater than 2.5 in magnitude probably
represents a pretty big effect. I'll say these are disruptions.
```
motif_disrupt = motif_d.abs() >= 2.5
motif_disrupt = motif_disrupt[motif_disrupt.sum(axis=1) > 0]
motif_disrupt.to_csv(os.path.join(outdir, 'motif_disruption.tsv'), sep='\t')
#motif_disrupt.columns = ['{}_motif_disrupted'.format(x) for x in motif_disrupt.columns]
#gene_variants = gene_variants.merge(motif_disrupt, left_on='location', right_index=True, how='left')
print('{:,} variants disrupt a motif.'.format(motif_disrupt.shape[0]))
# I'll convert the peak_overlaps into TF overlaps.
tdf = motif_disrupt.copy(deep=True)
tdf.columns = [motif_info_full.ix[x, 'target'] for x in tdf.columns]
d = {}
for c in tdf.columns:
t = tdf[c]
if len(t.shape) == 2:
d[c] = t[c].sum(axis=1) > 0
else:
d[c] = t
tf_disrupt = pd.DataFrame(d)
tf_disrupt.to_csv(os.path.join(outdir, 'tf_disruption.tsv'), sep='\t')
t = pd.DataFrame([tf_disrupt.index], index=['location']).T
t['tf_disrupt'] = True
gene_variants = gene_variants.merge(t, how='outer')
gene_variants.ix[gene_variants['tf_disrupt'].isnull(), 'tf_disrupt'] = False
t = pd.DataFrame([tf_overlap[tf_overlap.sum(axis=1) > 0].index], index=['location']).T
t['tf_overlap'] = True
gene_variants = gene_variants.merge(t, how='outer')
gene_variants.ix[gene_variants['tf_overlap'].isnull(), 'tf_overlap'] = False
tdf = gene_variants[['location', 'ref', 'alt']]# + tf_cols]
tdf = tdf.drop_duplicates()
tdf.index = tdf.location
tdf = tdf.drop('location', axis=1)
tf_overlap_yes = tf_overlap[tf_overlap.sum(axis=1) > 0]
tdf = tdf.ix[tf_overlap_yes.index]
d = {}
for i in tf_overlap_yes.index:
d[i] = list(tf_overlap_yes.ix[i][tf_overlap_yes.ix[i]].index)
se = pd.Series(d)
se = se[tdf.index]
target_to_motif = pd.Series(motif_info.index, index=motif_info.target)
tdf['motifs'] = se.apply(lambda x: list(target_to_motif[x]))
motif_disrupt_info = tdf.copy(deep=True)
motif_disrupt_info['disrupt'] = False
motif_disrupt_info.ix[motif_disrupt.index, 'disrupt'] = True
motif_disrupt_info['overlap_disrupt'] = False
motif_disrupt_info['not_overlap_disrupt'] = False
t = motif_disrupt_info[motif_disrupt_info.disrupt == True]
tf_disrupt.sum(axis=1).hist(bins=range(0, 25))
```
## CNV eQTLS
I'll mark which genes have an overlapping significant CNV.
```
t = pd.DataFrame(index=list(set(cnv_gv[cnv_gv.cnv_overlaps_gene].gene_id)))
t['cnv_eqtl'] = True
gene_variants = gene_variants.merge(t, how='outer', left_on='gene_id', right_index=True)
gene_variants.ix[gene_variants.cnv_eqtl.isnull(), 'cnv_eqtl'] = False
fn = os.path.join(outdir, 'gene_variants_annotated.pickle')
gene_variants.to_pickle(fn)
```
## Feature Exploration
```
n = gene_variants.shape[0]
print('{:,} total SNV and indel associations.'.format(n))
n = len(set(gene_variants.location))
print('{:,} total distinct SNV and indels with significant associations.'.format(n))
n = len(set(gene_variants[gene_variants.nmd == True].gene_id))
print('{:,} genes with a significant NMD variant.'.format(n))
n = len(set(gene_variants[gene_variants.cnv_eqtl == True].gene_id))
print('{:,} genes with a significant overlapping CNV.'.format(n))
n = len(set(gene_variants[gene_variants.cnv_eqtl == True].gene_id) |
set(gene_variants[gene_variants.nmd == True].gene_id))
print('{:,} total in these groups.'.format(n))
gene_variants.variant_type.value_counts()
n = (gene_variants.gene_id.value_counts() > 1).value_counts()[False]
print('{:,} of {:,} genes have only one significant variant.'.format(n, len(set(gene_variants.gene_id))))
gene_variants.gene_id.value_counts().hist(bins=100, log=True)
plt.ylabel('Number of genes')
plt.xlabel('Number of variants');
gene_gb = gene_variants.groupby('gene_id')
num_genes = len(set(gene_variants.gene_id))
n = (gene_gb.exonic.sum() > 0).value_counts()[True]
print('{:,} of {:,} genes have at least one significant exonic variant.'.format(
n, num_genes))
m = (gene_gb.exonic_same_gene.sum() > 0).value_counts()[True]
print('{:,} of these {:,} genes have at least one significant variant in their own exon.'.format(
m, n))
fig, axs = plt.subplots(1, 2, figsize=[12, 4])
ax = gene_gb.exonic.sum().hist(log=True, bins=range(0, int(gene_gb.exonic.sum().max())), ax=axs[0])
ax.set_ylabel('Number of exonic variants')
ax.set_xlabel('Number of genes')
ax = gene_gb.exonic_same_gene.sum().hist(log=True, bins=range(0, int(gene_gb.exonic.sum().max())), ax=axs[1])
ax.set_ylabel('Number of exonic variants in same gene')
ax.set_xlabel('Number of genes');
n = (gene_gb.utr.sum() > 0).value_counts()[True]
print('{:,} of {:,} genes have at least one significant UTR variant.'.format(
n, len(set(gene_variants.gene_id))))
m = (gene_gb.utr_same_gene.sum() > 0).value_counts()[True]
print('{:,} of these {:,} genes have at least one significant variant in their own UTR.'.format(
m, n))
fig, axs = plt.subplots(1, 2, figsize=[12, 4])
ax = gene_gb.utr.sum().hist(log=True, bins=range(0, int(gene_gb.utr.sum().max())), ax=axs[0])
ax.set_ylabel('Number of UTR variants')
ax.set_xlabel('Number of genes')
ax = gene_gb.utr_same_gene.sum().hist(log=True, bins=range(0, int(gene_gb.utr.sum().max())), ax=axs[1])
ax.set_ylabel('Number of UTR variants in same gene')
ax.set_xlabel('Number of genes');
for c in [x for x in gene_variants.columns if 'dnase' in x]:
vc = gene_variants[c].value_counts()
print('{}\t{:.2f}%'.format(c, float(vc[True]) / vc.sum()))
vc = gene_variants.ix[gene_variants['H1-hESC_ENCSR000EMU_dnase'] == True, 'gene_id'].value_counts()
vc.hist(bins=range(0, vc.max() + 1))
plt.ylabel('Number of genes')
plt.xlabel('Number of variants overlapping DNase peak');
gene_variants['significance.level_maurano'].value_counts()
maurano_res['significance.level_maurano'].value_counts()
gene_variants['q.value_maurano'].hist(bins=100)
plt.ylabel('Number of variants')
plt.xlabel('Maurano $q$-value');
se = gene_variants['q.value_maurano'].dropna()
se += se[se > 0].min()
(-np.log10(se)).hist(bins=100)
plt.ylabel('Number of variants')
plt.xlabel('Maurano $-\log_{10}q$-value');
```
### Single variant genes
I'm going to look at genes for which there is only one significant
variant. In theory, these should often be the causal variants.
```
se = gene_variants.gene_id.value_counts() == 1
genes = set(se[se].index)
tdf = gene_variants[gene_variants.gene_id.apply(lambda x: x in genes)]
n = tdf.exonic.value_counts()[True]
print('{} of {} genes have an exonic variant.'.format(n, tdf.shape[0]))
n = tdf.utr.value_counts()[True]
print('{} of {} genes have a UTR variant.'.format(n, tdf.shape[0]))
n = tdf['H1-hESC_ENCSR000EMU_dnase'].value_counts()[True]
print('{} of {} genes have a variant in H1-hESC_ENCSR000EMU DNase peaks.'.format(n, tdf.shape[0]))
maurano_res['significance.level_maurano'].value_counts() / maurano_res.shape[0]
(gene_variants['significance.level_maurano'].value_counts() /
gene_variants['significance.level_maurano'].value_counts().sum())
tdf['significance.level_maurano'].value_counts() / tdf['significance.level_maurano'].value_counts().sum()
tdf['significance.level_maurano'].value_counts()
t = gene_variants.sort_values(by=['gene_id', 'pvalue'])
t = t.drop_duplicates(subset='gene_id')
t['significance.level_maurano'].value_counts() / t['significance.level_maurano'].value_counts().sum()
n = len(set(tf_disrupt.index) & set(tdf.location))
print('The eQTL variant for {} genes disrupts a TF.'.format(n))
n = len(set(tf_disrupt.index) & set(tdf.location) &
set(tdf.ix[tdf['q.value_maurano'] < 0.05, 'location']))
print('{} of these are significant in Maurano et al.'.format(n))
```
## Fine Mapping
I'll start by removing genes are predicted to undergo NMD. I may also want to remove
genes where a CNV overlaps the gene.
```
n = qvalues.perm_sig.sum() - len(set(gene_variants.gene_id))
print('{} genes only had CNV associations.'.format(n))
gene_variants_f = gene_variants[(gene_variants.gene_has_nmd_variant == False)]
a = len(set(gene_variants_f.gene_id))
b = len(set(gene_variants.gene_id))
print('{:,} of {:,} genes don\'t have a significant NMD variant.'.format(a, b))
print('{:,} genes have a significant NMD variant.'.format(b - a))
a = len(set(gene_variants_f.gene_id))
gene_variants_f = gene_variants_f[gene_variants_f.cnv_eqtl == False]
b = len(set(gene_variants_f.gene_id))
print('{:,} of {:,} genes don\'t have a significant overlapping CNV'.format(b, a))
print('{:,} genes have a significant overlapping CNV'.format(a - b))
n = qvalues.perm_sig.sum() - len(set(gene_variants_f.gene_id))
print('Removed {} genes due to CNV or NMD eQTLs.'.format(n))
print('{:,} remaining variants.'.format(len(set(gene_variants_f.location))))
print('{:,} remaining genes.'.format(len(set(gene_variants_f.gene_id))))
no_cnv_nmd_vars = gene_variants_f.location.drop_duplicates()
gene_variants_f.to_csv(os.path.join(outdir, 'no_cnv_nmd_vars_gv.tsv'), sep='\t')
a = peak_overlap.shape[1]
b = tf_overlap.shape[1]
print('Overlapped variants with {} ENCODE experiments for {} TFs.'.format(a, b))
n = sum(peak_overlap.ix[gene_variants_f.location.drop_duplicates()].sum(axis=1) > 0)
print(('{:,} of {:,} variants for {:,} eGenes overlapped at least one peak'.format(
n, gene_variants_f.location.drop_duplicates().shape[0],
len(set(gene_variants_f[gene_variants_f.tf_overlap].gene_id)))))
n = sum(tf_disrupt.ix[gene_variants_f.location.drop_duplicates()].sum(axis=1) > 0)
print('{:,} of {:,} variants disrupted at least one TF.'.format(
n, gene_variants_f.location.drop_duplicates().shape[0]))
gene_variants_f = gene_variants_f[gene_variants_f.tf_disrupt]
len(set(gene_variants_f.gene_id))
tf_overlap.sum().plot.bar()
plt.ylabel('Number of variants that overlap peak');
tf_disrupt.sum().plot.bar()
plt.ylabel('Number of variants that disrupt motif');
(tf_disrupt.sum() / tf_overlap.sum()).plot.bar()
plt.ylabel('Percent peaks with disrupted motif');
tf_disrupt['NANOG'].sum()
gene_variants_f['significance.level_maurano'].value_counts() / gene_variants_f['significance.level_maurano'].value_counts().sum()
gene_variants['significance.level_maurano'].value_counts() / gene_variants['significance.level_maurano'].value_counts().sum()
a = set(gene_variants_f.location) & set(maurano_res.index)
b = set(no_cnv_nmd_vars) & set(maurano_res.index)
print('Maurano assayed {:,} of the eQTL variants.'.format(len(a) + len(b)))
print ('{:.2f}% of all variants tested in Maurano paper were significant (q < 0.05).'.format(
sum(maurano_res['q.value_maurano'] < 0.05) / float(maurano_res.shape[0]) * 100))
print('{:,} of {:,} ({:.2f}%) putative eQTNs were significant in Maurano (q < 0.05).'.format(
sum(maurano_res.ix[a, 'q.value_maurano'] < 0.05), len(a),
sum(maurano_res.ix[a, 'q.value_maurano'] < 0.05) / float(len(a)) * 100))
print('{:,} of {:,} ({:.2f}%) sig. variants that were not putative eQTNs were significant in Maurano (q < 0.05).'.format(
sum(maurano_res.ix[b, 'q.value_maurano'] < 0.05), len(b),
sum(maurano_res.ix[b, 'q.value_maurano'] < 0.05) / float(len(b)) * 100))
pe_sig = sum(maurano_res.ix[a, 'q.value_maurano'] < 0.05)
pe_not_sig = len(a) - pe_sig
not_pe_sig = sum(maurano_res['q.value_maurano'] < 0.05)
not_pe_not_sig = maurano_res.shape[0] - not_pe_sig
odds, p = stats.fisher_exact([[pe_sig, pe_not_sig], [not_pe_sig, not_pe_not_sig]])
print('peQTNs enriched for altering TF binding relative to all Maurano variants '
'(odds={:.4f}, p={:.4e}, Fisher).'.format(odds, p))
pe_sig = sum(maurano_res.ix[a, 'q.value_maurano'] < 0.05)
pe_not_sig = len(a) - pe_sig
not_pe_sig = sum(maurano_res.ix[b, 'q.value_maurano'] < 0.05)
not_pe_not_sig = len(b) - not_pe_sig
odds, p = stats.fisher_exact([[pe_sig, pe_not_sig], [not_pe_sig, not_pe_not_sig]])
print('peQTNs enriched for altering TF binding relative to non-peQTN eQTL variants '
'(odds={:.4f}, p={:.4e}, Fisher).'.format(odds, p))
gene_variants['q.value_maurano'].hist()
gene_variants_f['q.value_maurano'].hist()
n = len(set(gene_variants_f.gene_id))
a = sum(gene_variants_f.gene_id.value_counts() == 1)
print('{:,} of {:,} ({:.2f}%) eGenes have one putative eQTN.'.format(a, n, a / float(n) * 100))
a = sum(gene_variants_f.gene_id.value_counts() <= 5)
print('{:,} of {:,} ({:.2f}%) eGenes have five or less putative eQTNs.'.format(a, n, a / float(n) * 100))
vc = gene_variants_f.gene_id.value_counts().value_counts().sort_index()
vc.plot(kind='bar')
plt.ylabel('Number of eGenes')
plt.xlabel('Number of putative eQTNs per gene');
gene_variants_f['roadmap_dnase_num'] = \
gene_variants_f[[x for x in gene_variants_f.columns
if '_roadmap_dnase' in x]].sum(axis=1)
se = gene_variants_f.roadmap_dnase_num.value_counts()
se = se[range(se.shape[0])]
se.plot(kind='bar')
plt.ylabel('Number of variants')
plt.xlabel('Number of DNase peaks overlapping');
a = gene_variants_f[gene_variants_f.roadmap_dnase_num > 0].drop_duplicates(subset='location').shape[0]
b = gene_variants_f.drop_duplicates(subset='location').shape[0]
print('{:,} of {:,} ({:.2f}%) putative eQTNs overlap a DHS.'.format(a, b, a / float(b) * 100))
a = gene_variants_f[gene_variants_f.roadmap_dnase_num == 4].drop_duplicates(subset='location').shape[0]
b = gene_variants_f.drop_duplicates(subset='location').shape[0]
print('{:,} of {:,} ({:.2f}%) putative eQTNs overlap DHS present in all four lines.'.format(a, b, a / float(b) * 100))
```
[Wen et al. 2015](http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.1005176) says 50% of cis-eQTLs are concentrated within 20kb of the TSS.
```
n = sum(gene_variants_f.tss_dist_abs < 20000)
p = float(n) / gene_variants_f.shape[0]
print('{:,} of {:,} ({:.1f}%) putative causal variants are within 20kb of the nearest TSS.'.format(
n, gene_variants_f.shape[0], p * 100))
lead_variants_single_f.head()
n = len(set(gene_variants_f.location + ':' + gene_variants_f.gene_id) & set(lead_variants_single_f.index))
print('{:,} of my {:,} putative causal variants ({:.2f}%) are also the most significant variants'.format(
n, gene_variants_f.shape[0], float(n) / gene_variants_f.shape[0] * 100))
a = len(set(gene_variants_f.gene_id))
print('The lead variant is a peQTN for {:,} of {:,} ({:.2f}%) genes with peQTNs.'.format(n, a, n / float(a) * 100))
gene_variants_f.index = gene_variants_f.location + ':' + gene_variants_f.gene_id
gene_variants_f.to_csv(os.path.join(outdir, 'peqtns.tsv'), sep='\t')
snpsnap = None
def get_independent_snvs(df):
ld_beds = glob.glob('/publicdata/1KGP_20151103/LD_20151110/tabix/*EUR*.bed.gz')
ld_beds = dict(zip([os.path.split(x)[1].split('_')[0] for x in ld_beds], ld_beds))
df = df.drop_duplicates(subset=['location'])
tdf = df[['chrom', 'start', 'end', 'pvalue']]
tdf['start'] = tdf.end.astype(int)
tdf['end'] = tdf.end.astype(int)
tdf.index = tdf.chrom + ':' + tdf.end.astype(int).astype(str)
indep = cpb.analysis.ld_prune(tdf, ld_beds, snvs=list(snpsnap.index)).drop('pvalue', axis=1)
return indep
def get_snpsnap():
snpsnap_fns = glob.glob('/publicdata/SNPsnap_20151104/EUR_parse/*.tab')
dfs = []
for tab in snpsnap_fns:
df = pd.read_table(tab, index_col=0, low_memory=False)
tdf = df[['snp_maf', 'dist_nearest_gene_snpsnap_protein_coding',
'friends_ld08']]
tdf.index = 'chr' + tdf.index
dfs.append(tdf)
snps = pd.concat(dfs)
snps['maf_bin'] = pd.cut(snps.snp_maf, np.arange(0, 0.55, 0.05))
snps['ld_bin'] = pd.cut(np.log10(snps.friends_ld08.replace(np.nan, 0) + 1), 10)
snps['dist_bin'] = pd.cut(np.log10(snps.dist_nearest_gene_snpsnap_protein_coding
+ 1), 10)
snps = snps[['maf_bin', 'ld_bin', 'dist_bin']]
return snps
fn = os.path.join(outdir, 'independent_fine_mapped_variants.bed')
if not os.path.exists(fn):
if snpsnap is None:
snpsnap = get_snpsnap()
indep = get_independent_snvs(gene_variants_f)
indep.to_csv(os.path.join(outdir, 'independent_fine_mapped_variants.tsv'), sep='\t')
s = '\n'.join(indep[['chrom', 'start', 'end']].apply(
lambda x: '\t'.join([str(y) for y in x.values]), axis=1)) + '\n'
bt = pbt.BedTool(s, from_string=True)
bt = bt.sort()
bt.saveas(fn)
fn = os.path.join(outdir, 'independent_fine_mapped_variants_no_hla.bed')
if not os.path.exists(fn):
if snpsnap is None:
snpsnap = get_snpsnap()
indep = get_independent_snvs(gene_variants_f[gene_variants_f.gene_name.apply(lambda x: 'HLA' not in x)])
indep.to_csv(os.path.join(outdir, 'independent_fine_mapped_variants_no_hla.tsv'), sep='\t')
s = '\n'.join(indep[['chrom', 'start', 'end']].apply(
lambda x: '\t'.join([str(y) for y in x.values]), axis=1)) + '\n'
bt = pbt.BedTool(s, from_string=True)
bt = bt.sort()
bt.saveas(fn)
```
### Random motif disruption
I want to look into how many motif disruptions I would get by chance when searching
this many motifs against this number of variants. There may be a few ways to do this,
though it is tricky. One idea is to randomly assign motifs instead of search the motifs
for the ChIP-seq peak that is overlapped. For instance, if a variant overlaps a JUN peak,
choose motifs for a random different TF. The problem here is that the motifs are correlated
(e.g. a different TF could still be enriched for a JUN motif) so sometimes you may
still expect a disruption. This motif similarity (which I explore some at the end of this
notebook) creates a lot of issues like this.
Another idea is to just look for motif disruptions among the variants that did not overlap
TF peaks. I used 191,871 variants for the fine mapping. These were intersected
with ChIP-seq peaks and 7,840 overlapped a peak for at least one TF. Note that these numbers
are less than reported above (where I actually did the overlapping) because for the fine mapping
I removed variants whose eGene overlapped a CNV eQTL or that had an eQTL variant predicted to
cause NMD. I looked for motif disruptions for these 7,840 and found 3,225
disruptions. So I want to take the 191,871 - 7,840 and randomly choose sets of 7,840. Then
I'll randomly assign the same motifs I used for the real 7,840 and see how many motif disruptions
I get by chance. This will be a slightly biased estimate because some of the variants that did
not overlap peaks probably overlap a peak that was not assayed and may disrupt a motif that I'm
searching give the motif similarity. I think I should still see a difference between the real and
randomly chosen variants though.
```
fn = os.path.join(outdir, 'num_disrupt_random.tsv')
if not os.path.exists(fn):
random.seed('20160725')
tdf = gene_variants[['location', 'ref', 'alt']]# + tf_cols]
tdf = tdf.drop_duplicates()
tdf.index = tdf.location
tdf = tdf.drop('location', axis=1)
tdf = tdf.ix[no_cnv_nmd_vars.values]
tf_overlap_yes = tf_overlap[tf_overlap.sum(axis=1) > 0]
d = {}
for i in tf_overlap_yes.index:
d[i] = list(tf_overlap_yes.ix[i][tf_overlap_yes.ix[i]].index)
se = pd.Series(d)
se = se[se.index & tdf.index]
target_to_motif = pd.Series(motif_info.index, index=motif_info.target)
from ipyparallel import Client
parallel_client = Client(profile='parallel')
dview = parallel_client[:]
print('Cluster has {} engines.'.format(len(parallel_client.ids)))
with dview.sync_imports():
import cdpybio
import cardipspy
%px cpb = cdpybio
%px cpy = cardipspy
#dview.push(dict(tdf=tdf));
dview.push(dict(matrices=matrices));
random_mds = []
for i in range(30):
i = random.sample(tdf.index, (peak_overlap.ix[no_cnv_nmd_vars.values].sum(axis=1) > 0).sum())
tdf_r = tdf.ix[i]
tdf_r['motifs'] = se.apply(lambda x: list(target_to_motif[x])).values
dview.push(dict(tdf_r=tdf_r));
res = dview.map_sync(lambda i: cpb.moodsext.find_motif_disruptions(
i, tdf_r.ix[i, 'ref'], tdf_r.ix[i, 'alt'], cpy.hg19,
{k:matrices[k] for k in tdf_r.ix[i, 'motifs']}), tdf_r.index)
a = []
b = []
for i,p in enumerate(tdf_r.index):
if res[i].shape[0] > 0:
a.append(p)
b.append(res[i])
d = dict(zip(a,b))
p = pd.Panel(d)
motif_d_r = p.ix[:, :, 'score_diff'].T
motif_disrupt_r = motif_d_r.abs() >= 2.5
motif_disrupt_r = motif_disrupt_r[motif_disrupt_r.sum(axis=1) > 0]
random_mds.append(motif_disrupt_r)
num_disrupt_random = pd.Series([x.shape[0] for x in random_mds])
num_disrupt_random.to_csv(fn, sep='\t')
else:
num_disrupt_random = pd.read_table(fn, index_col=0, squeeze=True, header=None)
print('There were {} variants that disrupted motifs on average (s.d. {}, {} samples).'.format(
num_disrupt_random.mean(), num_disrupt_random.std(), num_disrupt_random.shape[0]))
```
### $p$-value comparison
```
leads_p = lead_variants_single_f[lead_variants_single_f.gene_id.apply(lambda x: x in set(gene_variants_f.gene_id))]
gvf_p = gene_variants_f.sort_values(by='pvalue').drop_duplicates(subset='gene_id')
leads_p.sort_values(by='gene_id', inplace=True)
gvf_p.sort_values(by='gene_id', inplace=True)
pvals = pd.DataFrame({'qtn':gvf_p.pvalue.values, 'lead':leads_p.pvalue.values},
index=gvf_p.gene_id)
pvals_log = -np.log10(pvals)
pvals_log['secondary'] = False
pvals_log.ix[set(secondary_qvalues[secondary_qvalues.perm_sig].index) & set(pvals_log.index),
'secondary'] = True
sns.jointplot(pvals_log.lead, pvals_log.qtn, kind='scatter', alpha=0.15);
sns.jointplot(pvals_log[pvals_log.secondary == False].lead,
pvals_log[pvals_log.secondary == False].qtn, kind='scatter', alpha=0.15);
sns.jointplot(pvals_log[pvals_log.secondary].lead,
pvals_log[pvals_log.secondary].qtn, kind='scatter', alpha=0.15);
pvals_log['pdiff'] = pvals_log.lead - pvals_log.qtn
diff_sec = pvals_log[pvals_log.secondary & (pvals_log.pdiff > 1)].shape[0]
no_diff_sec = pvals_log[pvals_log.secondary & (pvals_log.pdiff <= 1)].shape[0]
diff_no_sec = pvals_log[(pvals_log.secondary == False) & (pvals_log.pdiff > 1)].shape[0]
no_diff_no_sec = pvals_log[(pvals_log.secondary == False) & (pvals_log.pdiff <= 1)].shape[0]
s,p = stats.fisher_exact([[diff_sec, no_diff_sec], [diff_no_sec, no_diff_no_sec]])
print('eGenes with secondary eQTLs are enriched for having their peQTN differ in significance '
'from the lead (p={:.2e})'.format(p))
a = sum(pvals_log.pdiff < 1)
b = pvals_log.shape[0]
print('For {:,} of {:,} ({:.2f}%) genes with peQTNs, the peQTN p-value is within one '
'order of magnitude of the lead.'.format(a, b, a / float(b) * 100))
a = sum(pvals_log.pdiff < 2)
b = pvals_log.shape[0]
print('For {:,} of {:,} ({:.2f}%) genes with peQTNs, the peQTN p-value is within two '
'orders of magnitude of the lead.'.format(a, b, a / float(b) * 100))
```
### Validation
#### Proof of concept
I'd like to find some examples to validate as a proof of concept. Some things to consider are:
* known or novel motif
* which TF (relevant to stem cells?)
* association $p$-value
* overlap DHS?
```
tdf = gene_variants_f.copy(deep=True)
# Disrupt known motif
t = motif_disrupt.ix[set(tdf.location)]
t = t[[x for x in t.columns if 'known' in x]]
t = t.sum(axis=1)
t = t[t > 0]
tdf = tdf[tdf.location.apply(lambda x: x in set(t.index))]
print(tdf.shape[0])
# Within one order of magnitude of the lead
tdf = tdf.merge(pvals_log, left_on='gene_id', right_index=True)
tdf = tdf[tdf.pdiff < 1]
print(tdf.shape[0])
# DHS present in at least 11 lines
tdf['dhs_count'] = tdf[[x for x in tdf.columns if 'dnase' in x]].sum(axis=1)
tdf = tdf[tdf.dhs_count >= 11]
print(tdf.shape[0])
# Only SNVs
tdf = tdf[tdf.ref.apply(lambda x: len(x)) == 1]
tdf = tdf[tdf.alt.apply(lambda x: len(x)) == 1]
print(tdf.shape[0])
# Significant in Maurano
tdf = tdf[tdf.location.apply(lambda x: x in set(tdf.location) &
set(maurano_res[maurano_res['q.value_maurano'] < 0.05].index))]
print(tdf.shape[0])
# Not exonic
tdf = tdf[tdf.exonic == False]
print(tdf.shape[0])
# Drop location duplicates
tdf = tdf.drop_duplicates(subset='location')
print(tdf.shape[0])
tdf[[x for x in tdf.columns if 'H1' in x]].sum()
```
I'll define the regions as the DHS from the H1 line from ENCODE.
```
encode_dnase = pd.read_table(os.path.join(ciepy.root, 'output',
'functional_annotation_analysis',
'encode_dnase.tsv'), index_col=0)
bs_types = ['stem cell', 'induced pluripotent stem cell line']
encode_dnase = encode_dnase[encode_dnase.biosample_type.apply(lambda x: x in bs_types)]
vbt = cpb.bedtools.intervals_to_bed(tdf.location)
vbt = vbt.sort()
vbt.saveas(os.path.join(outdir, 'validation_variants.bed'), trackline='track type=bed name="variants"');
dhs_bt = pbt.BedTool(cpb.general.read_gzipped_text_url(encode_dnase.ix['ENCSR000EJN', 'narrowPeak_url']),
from_string=True).sort()
dhs_bt = dhs_bt.intersect(vbt, wa=True)
df = dhs_bt.to_dataframe()
dhs_bt = cpb.bedtools.intervals_to_bed(df.chrom + ':' + df.start.astype(str) + '-' + df.end.astype(str))
dhs_bt.saveas(os.path.join(outdir, 'validation_dhs.bed'), trackline='track type=bed name="H1-hESC DHS"');
se = tf_disrupt.ix[set(tdf.location)].sum()
se[se > 0]
seqs = dhs_bt.sequence(fi=cpy.hg19)
seqs = [x.strip() for x in open(seqs.seqfn).readlines()]
seqs = pd.Series(seqs[1::2], index=[x[1:] for x in seqs[0::2]])
seqs = seqs.apply(lambda x: x.lower())
seqs = seqs.drop_duplicates()
dhs_bt = pbt.BedTool(cpb.general.read_gzipped_text_url(encode_dnase.ix['ENCSR000EJN', 'narrowPeak_url']),
from_string=True).sort()
dhs_bt = dhs_bt.intersect(vbt, wo=True)
df = dhs_bt.to_dataframe(names=range(14))
df.index = df[10] + ':' + df[11].astype(str) + '-' + df[12].astype(str)
df['dhs'] = df[0] + ':' + df[1].astype(str) + '-' + df[2].astype(str)
df = df.drop([0] + range(3, 14), axis=1)
df.columns = ['dhs_start', 'dhs_end', 'dhs']
tdf = tdf.merge(df, left_on='location', right_index=True)
tdf = tdf.merge(df, left_on='location', right_index=True).drop_duplicates()
```
I lose 4 variants here I think because they don't overlap a DHS in this line.
```
alt_seqs = seqs.copy(deep=True)
```
#### ChIP-seq
We want to test our peQTN predictions by doing ChIP-seq and seeing whether the
peQTNs have evidence of allelic TF binding.
I want to look at the heterozygosity of peQTNs that disrupt RAD21 and CTCF
binding sites. We can do ChIP-seq for these two factors.
```
tdf = gene_variants_f[gene_variants_f.variant_type == 'snv']
```
```
fn = os.path.join(ciepy.root, 'private_output', 'eqtl_input',
'filtered_all', '0000.vcf.gz')
vcf_reader = pyvcf.Reader(open(fn), compressed=True)
ind = []
percent_het = []
gts = []
for i in tdf.index:
if tf_disrupt.ix[tdf.ix[i, 'location'], 'CTCF'] == True:
t = vcf_reader.fetch(tdf.ix[i, 'chrom'][3:],
tdf.ix[i, 'start'],
tdf.ix[i, 'end'])
r = t.next()
s = [x.sample for x in r.samples if x.called]
gt = [x.gt_alleles for x in r.samples if x.called]
gt = pd.DataFrame(gt, index=s, columns=['allele_a', 'allele_b'])
gt = gt.ix[rna_meta.ix[rna_meta.in_eqtl, 'wgs_id'].values].dropna()
num_called = float(gt.shape[0])
ind.append(i)
percent_het.append(gt.sum(axis=1).value_counts()[1] / num_called)
gts.append(gt['allele_a'] + gt['allele_b'])
ctcf_het = pd.Series(percent_het, index=ind)
ctcf_genotypes = pd.DataFrame(gts, index=ind)
ind = []
percent_het = []
gts = []
for i in tdf.index:
if tf_disrupt.ix[tdf.ix[i, 'location'], 'RAD21'] == True:
t = vcf_reader.fetch(tdf.ix[i, 'chrom'][3:],
tdf.ix[i, 'start'],
tdf.ix[i, 'end'])
r = t.next()
s = [x.sample for x in r.samples if x.called]
gt = [x.gt_alleles for x in r.samples if x.called]
gt = pd.DataFrame(gt, index=s, columns=['allele_a', 'allele_b'])
gt = gt.ix[rna_meta.ix[rna_meta.in_eqtl, 'wgs_id'].values].dropna()
num_called = float(gt.shape[0])
ind.append(i)
percent_het.append(gt.sum(axis=1).value_counts()[1] / num_called)
gts.append(gt['allele_a'] + gt['allele_b'])
rad21_het = pd.Series(percent_het, index=ind)
rad21_genotypes = pd.DataFrame(gts, index=ind)
print('{} variants that disrupt CTCF.'.format(ctcf_het.shape[0]))
print('{} variants that disrupt RAD21.'.format(rad21_het.shape[0]))
fig,axs = plt.subplots(1, 2)
ctcf_het.hist(ax=axs[0])
axs[0].set_xlabel('Percent samples het.')
axs[0].set_ylabel('Number of variants')
axs[0].set_title('CTCF')
rad21_het.hist(ax=axs[1])
axs[1].set_xlabel('Percent samples het.')
axs[1].set_title('RAD21');
fig,axs = plt.subplots(1, 2)
(ctcf_genotypes == '01').sum(axis=1).hist(ax=axs[0])
axs[0].set_xlabel('Number of hets.')
axs[0].set_ylabel('Number of samples')
axs[0].set_title('CTCF')
(rad21_genotypes == '01').sum(axis=1).hist(ax=axs[1])
axs[1].set_xlabel('Number of hets.')
axs[1].set_title('RAD21');
fn = os.path.join(outdir, 'num_ctcf_het.tsv')
if not os.path.exists(fn):
se = (ctcf_genotypes == '01').sum().sort_values(ascending=False)
se.to_csv(fn, sep='\t')
fn = os.path.join(outdir, 'num_rad21_het.tsv')
if not os.path.exists(fn):
se = (rad21_genotypes == '01').sum().sort_values(ascending=False)
se.to_csv(fn, sep='\t')
```
##### ChIP-seq analysis
We did ChIP-seq for CTCF. Let's see if the peQTNs have evidence of allelic imbalance.
```
# Make ChIP-seq metadata table
exp = ['2433edfc-9dc1-432b-8267-2319f218cf18', 'fdd0575a-4d53-4d02-968c-9c87adc198aa',
'32764cbc-41a2-4b30-9936-329672caf807', '7e43459d-f07f-49d3-a157-c527dda007e7',
'2baa401f-b182-43bb-8669-4a4fe69eacce',]
subj = ['5d86bbdd-4854-449f-b99d-0d04d0ce4ee8', '59ba8d64-a92b-4ae2-bcb4-847de2fdf1dd',
'eb79f47b-d933-4144-8d61-83104493418e', '56f0f073-725f-4cec-b8cb-db6ad0dc2ac0',
'9603be9a-f146-4fc5-bf36-c84fac189b0a']
chip_seq_meta = pd.DataFrame([subj], columns=exp, index=['subject_id']).T
chip_seq_meta['target'] = 'CTCF'
chip_seq_meta['wgs_id'] = ''
for i in chip_seq_meta.index:
t = wgs_meta[wgs_meta.subject_id == chip_seq_meta.ix[i, 'subject_id']]
chip_seq_meta.ix[i, 'wgs_id'] = t.index[0]
# Make CTCF het files
het_vcf = os.path.join(private_outdir, 'ctcf_peqtns.vcf.gz')
vcf_path = '/frazer01/projects/CARDIPS/pipeline/WGS/mergedVCF/CARDIPS_201512.PASS.vcf.gz'
het_bed = cpb.bedtools.intervals_to_bed([':'.join(x.split(':')[0:2])[3:] for x in ctcf_genotypes.index]).sort()
c = 'bcftools view -Oz -R {} {} > {}'.format(het_bed.fn, vcf_path, het_vcf)
subprocess.check_call(c, shell=True)
for sample in set(chip_seq_meta.wgs_id):
out_vcf = os.path.join(private_outdir, sample + '_ctcf_peqtns_hets.vcf')
if not os.path.exists(out_vcf):
c = ('bcftools view -Ou -s {} {} | '
'bcftools view -Ou -g het | '
'bcftools annotate --rename-chrs /repos/cardips-pipelines/RNA/chrom_conv.tsv -Ov > {}_temp.vcf'.format(
sample, het_vcf, sample))
subprocess.check_call(c, shell=True)
c = ('perl ~/repos/cdeboever3/cdpipelines/cdpipelines/scripts/sortByRef.pl '
'{0}_temp.vcf /publicdata/gatk_bundle_2.8/hg19/ucsc.hg19.fasta.fai > {0}_temp_sorted.vcf'.format(sample))
subprocess.check_call(c, shell=True)
c = 'grep -v ^\\# {0}_temp_sorted.vcf | uniq > {0}_temp_body.vcf'.format(sample)
subprocess.check_call(c, shell=True)
c = 'grep ^\\# {0}_temp_sorted.vcf | uniq > {0}_temp_header.vcf'.format(sample)
subprocess.check_call(c, shell=True)
c = 'cat {0}_temp_header.vcf {0}_temp_body.vcf > {1}'.format(sample, out_vcf)
subprocess.check_call(c, shell=True)
c = 'rm {0}_temp.vcf {0}_temp_sorted.vcf {0}_temp_body.vcf {0}_temp_header.vcf'.format(sample)
subprocess.check_call(c, shell=True)
# Process bam files
for chip_sample in chip_seq_meta.index:
sample = chip_seq_meta.ix[chip_sample, 'wgs_id']
bam = '/projects/CARDIPS/pipeline/ChIPseq/sample/{0}/alignment/{0}.filtered.cordSorted.bam'.format(chip_sample)
out_bam = os.path.join(private_outdir, chip_sample + '_reordered.bam')
if not os.path.exists(out_bam):
c = ('java -Xmx4g -jar '
'-XX:ParallelGCThreads=1 '
'-Djava.io.tmpdir={} '
'-jar $picard AddOrReplaceReadGroups '
'VALIDATION_STRINGENCY=SILENT '
'I={} '
'O={}_temp.bam '
'RGID=4 '
'RGLB=lib1 '
'RGPL=illumina '
'RGPU=unit1 '
'RGSM=20'.format(
os.path.join(ciepy.root, 'sandbox', 'tmp', dy_name),
bam,
sample))
subprocess.check_call(c, shell=True)
c = ('java -Xmx4g -jar '
'-XX:ParallelGCThreads=1 '
'-Djava.io.tmpdir={} '
'-jar $picard ReorderSam '
'VALIDATION_STRINGENCY=SILENT '
'I={}_temp.bam '
'O={} '
'REFERENCE=/publicdata/gatk_bundle_2.8/hg19/ucsc.hg19.fasta '
.format(
os.path.join(ciepy.root, 'sandbox', 'tmp', dy_name),
sample,
out_bam,
)
)
subprocess.check_call(c, shell=True)
subprocess.check_call('rm {}_temp.bam'.format(sample), shell=True)
subprocess.check_call('samtools index {}'.format(out_bam), shell=True)
for sample in chip_seq_meta.index:
bam = os.path.join(private_outdir, sample + '_reordered.bam')
if not os.path.exists(os.path.join(private_outdir, sample + '_ctcf_peqtns_het_counts.tsv')):
c = ('java -Xmx3g -jar '
'-XX:ParallelGCThreads=1 '
'-Djava.io.tmpdir={} '
'-jar $GATK '
'-R /publicdata/gatk_bundle_2.8/hg19/ucsc.hg19.fasta '
'-T ASEReadCounter '
'-o {} '
'-I {} '
'-sites {} '
'-overlap COUNT_FRAGMENTS_REQUIRE_SAME_BASE '
'-U ALLOW_N_CIGAR_READS').format(
os.path.join(ciepy.root, 'sandbox', 'tmp', dy_name),
os.path.join(private_outdir, sample + '_ctcf_peqtns_het_counts.tsv'),
bam,
os.path.join(private_outdir, chip_seq_meta.ix[sample, 'wgs_id'] + '_ctcf_peqtns_hets.vcf')
)
subprocess.check_call(c, shell=True)
fn = os.path.join(outdir, 'chip_seq_counts.pickle')
if not os.path.exists(fn):
all_counts = dict()
for sample in chip_seq_meta.index:
counts = pd.read_table(os.path.join(private_outdir, '{}_ctcf_peqtns_het_counts.tsv'.format(sample)))
counts['expectedRefFreq'] = 0.5
counts = counts[counts.totalCount >= 8]
pvals = []
for i in counts.index:
p = stats.binom_test(counts.ix[i, 'refCount'], counts.ix[i, 'totalCount'],
counts.ix[i, 'expectedRefFreq'])
pvals.append(p)
counts['binomialPValue'] = pvals
all_counts[sample] = counts
with open(fn, 'w') as f:
cPickle.dump(all_counts, f)
else:
all_counts = cPickle.load(open(fn))
sig = []
not_sig = []
total = []
sig_vars = set()
for i in all_counts.keys():
tdf = all_counts[i]
vc = (tdf.binomialPValue < 0.005).value_counts()
if True in vc.index:
sig.append(vc[True])
else:
sig.append(0)
not_sig.append(vc[False])
total.append(tdf.shape[0])
sig_vars |= set(tdf.contig + ':' + (tdf.position - 1).astype(str) + '-' + tdf.position.astype(str))
chip_sig = pd.DataFrame([sig, not_sig], columns=all_counts.keys(), index=['sig', 'not_sig']).T
chip_sig['total'] = chip_sig.sig + chip_sig.not_sig
chip_sig['percent'] = chip_sig.sig / chip_sig.total.astype(float)
chip_seq_meta = chip_seq_meta.join(chip_sig)
n = chip_sig.total.mean()
print('Tested {} heterozygous peQTNs predicted to disrupt CTCF on average per sample.'.format(n))
n = chip_sig.percent.mean()
print('{:.1f}% heterozygous peQTNs predicted to disrupt CTCF had significant allelic bias per sample.'.format(n * 100))
a = chip_sig.sig.sum()
b = chip_sig.total.sum()
print('{} of {} peQTNs tested were significant.'.format(a, b))
# Number of reads sequenced per sample
chip_seq_depth = []
for chip_sample in chip_seq_meta.index:
sample = chip_seq_meta.ix[chip_sample, 'wgs_id']
fn = '/projects/CARDIPS/pipeline/ChIPseq/sample/{0}/alignment/alignment.QCmatrix'.format(chip_sample)
with open(fn) as f:
chip_seq_depth.append(int(f.readline().split()[1]))
print('{:,} reads on average per sample.'.format(np.mean(chip_seq_depth)))
```
#### Peaks
```
if not os.path.exists(os.path.join(outdir, 'ctcf_peaks.bed')):
fns = []
for i in chip_seq_meta.index:
fns.append('/projects/CARDIPS/pipeline/ChIPseq/sample/{0}/peakCalling/macs2_callPeak_narrow_withCtrl'
'/macs2_callPeak_peaks.q001.collapse.narrowPeak'.format(i))
peaks = cpb.bedtools.combine(fns)
peaks.saveas(os.path.join(outdir, 'ctcf_peaks.bed'));
if not os.path.exists(os.path.join(outdir, 'ctcf_peaks.saf')):
c = ('python /frazer01/home/cdeboever/repos/cdeboever3/cdpipelines/cdpipelines/convert_bed_to_saf.py '
'{} {}'.format(os.path.join(outdir, 'ctcf_peaks.bed'), os.path.join(outdir, 'ctcf_peaks.saf')))
subprocess.check_call(c, shell=True)
for i in chip_seq_meta.index:
bam = '/projects/CARDIPS/pipeline/ChIPseq/sample/{0}/alignment/{0}.filtered.querySorted.bam'.format(i)
saf = os.path.join(outdir, 'ctcf_peaks.saf')
out = os.path.join(outdir, '{}_featureCounts.tsv'.format(i))
if not os.path.exists(out):
c = 'featureCounts -p -T 8 --donotsort -F SAF -a {} -o {} {}'.format(saf, out, bam)
subprocess.check_call(c, shell=True)
if not os.path.exists(os.path.join(outdir, 'ctcf_peak_counts.tsv')):
ctcf_vars = cpb.bedtools.intervals_to_bed([':'.join(x.split(':')[0:2]) for x in ctcf_genotypes.index]).sort()
res = peaks.intersect(ctcf_vars, wo=True, sorted=True)
res = res.to_dataframe()
fns = glob.glob(os.path.join(outdir, '*_featureCounts.tsv'))
peak_counts = cpb.featureCounts.combine_counts(
fns, define_sample_name=lambda x: os.path.split(x)[1].split('.')[0].split('_')[0])
size_factors = cpb.analysis.deseq2_size_factors(peak_counts, chip_seq_meta, '~subject_id')
peak_counts = peak_counts / size_factors
peak_counts.to_csv(os.path.join(outdir, 'ctcf_peak_counts_all.tsv'), sep='\t')
peak_counts = peak_counts.ix[set(res.chrom + ':' + res.start.astype(str) + '-' + res.end.astype(str))]
peak_counts.to_csv(os.path.join(outdir, 'ctcf_peak_counts_all.tsv'), sep='\t')
else:
peak_counts = pd.read_table(os.path.join(outdir, 'ctcf_peak_counts.tsv'), index_col=0)
fn = os.path.join(private_outdir, 'ctcf_genotypes_tri.tsv')
if not os.path.exists(fn):
ctcf_genotypes_tri = ctcf_genotypes.copy(deep=True)
ctcf_genotypes_tri = ctcf_genotypes_tri.replace('00', 0)
ctcf_genotypes_tri = ctcf_genotypes_tri.replace('01', 1)
ctcf_genotypes_tri = ctcf_genotypes_tri.replace('11', 2)
ctcf_genotypes_tri.index = [':'.join(x.split(':')[0:2]) for x in ctcf_genotypes.index]
ctcf_genotypes_tri['index'] = ctcf_genotypes_tri.index
ctcf_genotypes_tri = ctcf_genotypes_tri.drop_duplicates(subset=['index'])
ctcf_genotypes_tri = ctcf_genotypes_tri.drop('index', axis=1)
ctcf_genotypes_tri = ctcf_genotypes_tri[chip_seq_meta.wgs_id.drop_duplicates()]
cols = []
for i in ctcf_genotypes_tri.columns:
cols.append(chip_seq_meta[chip_seq_meta.wgs_id == i].index[0])
ctcf_genotypes_tri.columns = cols
ctcf_genotypes_tri = ctcf_genotypes_tri[peak_counts.columns]
ctcf_genotypes_tri.to_csv(fn, sep='\t')
else:
ctcf_genotypes_tri = pd.read_table(fn, index_col=0)
var_to_peak = pd.Series((res.chrom + ':' + res.start.astype(str) + '-' + res.end.astype(str)).values,
index=(res.name + ':' + res.score.astype(str) + '-' + res.strand.astype(str)).values).drop_duplicates()
disrupt_sign = []
for i in var_to_peak.index:
t = motif_d.ix[i].dropna()
t = t[[x for x in t.index if 'CTCF' in x]]
if (t > 0).value_counts().shape[0] == 1:
disrupt_sign.append(sum(t > 0) / float(t.shape[0]) > 0.5)
disrupt_sign = pd.Series(dict(zip(var_to_peak.index, disrupt_sign)))
fn = os.path.join(outdir, 'ctcf_aggregate_counts.tsv')
if not os.path.exists(fn):
genotypes = []
counts = []
for i in ctcf_genotypes_tri.index:
if i in var_to_peak.index and i in sig_vars and i in disrupt_sign.index:
if var_to_peak[i] in peak_counts.index:
g = ctcf_genotypes_tri.ix[i]
c = np.log10(peak_counts.ix[var_to_peak[i]] + 1)
c = c - c.mean()
c = c / c.std()
c = list(c)
if disrupt_sign[i] == False:
c.reverse()
genotypes += list(g)
counts += c
agg_counts = pd.DataFrame({'genotypes':genotypes, 'counts':counts})
agg_counts.to_csv(fn, '\t')
else:
agg_counts = pd.read_table(fn, index_col=0)
agg_counts.genotypes.value_counts()
n = len(var_to_peak.index & set(sig_vars) & disrupt_sign.index)
print('{} sites can be used to check direction of effect.'.format(n))
sns.jointplot(x='genotypes', y='counts', data=agg_counts, kind='reg');
plt.figure()
ax = sns.violinplot(x='genotypes', y='counts', data=agg_counts, color='grey',
order=[0, 1, 2], scale='count')
sns.regplot(x='genotypes', y='counts', data=tdf, scatter=False, color='red');
for k in all_counts.keys():
all_counts[k]['sample'] = k
ac = pd.concat(all_counts.values())
ac[ac.binomialPValue < 0.05].shape[0] / float(ac.shape[0])
ac_sig = ac[ac.binomialPValue < 0.05]
```
### 3D interactions
```
fn = os.path.join(ciepy.root, 'output', 'ji_et_al_2015_processing', 'interactions.tsv')
interactions = pd.read_table(fn)
fn = os.path.join(ciepy.root, 'output', 'ji_et_al_2015_processing',
'gene_to_containing_interactions.pickle')
gene_to_containing_interactions = cPickle.load(open(fn))
fn = os.path.join(ciepy.root, 'output', 'ji_et_al_2015_processing',
'chia_to_exon_gene.pickle')
chia_to_exon_gene = cPickle.load(open(fn))
fn = os.path.join(ciepy.root, 'output', 'ji_et_al_2015_processing',
'chia_to_promoter_gene.pickle')
chia_to_promoter_gene = cPickle.load(open(fn))
fn = os.path.join(ciepy.root, 'output', 'ji_et_al_2015_processing',
'chia_peaks.bed')
chia_peaks = pbt.BedTool(fn)
s = '\n'.join(gene_variants_f.location.apply(
lambda x: '\t'.join(cpb.general.parse_region(x)))) + '\n'
var_bt = pbt.BedTool(s, from_string=True)
var_bt = var_bt.sort()
res = var_bt.intersect(chia_peaks, wo=True, sorted=True)
d = {}
for r in res:
ind = '{}:{}-{}'.format(r.chrom, r.start, r.end)
d[ind] = d.get(ind, set()) | set(['{}:{}-{}'.format(*r.fields[-4:-1])])
se = pd.Series(d)
gene_variants_f = gene_variants_f.merge(pd.DataFrame({'chia_peaks': se}),
left_on='location', right_index=True, how='left')
s = '\n'.join(lead_variants_single_f.location.apply(
lambda x: '\t'.join(cpb.general.parse_region(x)))) + '\n'
var_bt = pbt.BedTool(s, from_string=True)
var_bt = var_bt.sort()
res = var_bt.intersect(chia_peaks, wo=True, sorted=True)
d = {}
for r in res:
ind = '{}:{}-{}'.format(r.chrom, r.start, r.end)
d[ind] = d.get(ind, set()) | set(['{}:{}-{}'.format(*r.fields[-4:-1])])
se = pd.Series(d)
lead_variants_single_f = lead_variants_single_f.merge(pd.DataFrame({'chia_peaks': se}),
left_on='location', right_index=True, how='left')
def get_other_end(peaks):
if type(peaks) is set:
other_ends = []
for p in peaks:
other_ends += list(interactions.ix[interactions.peak1 == p, 'peak2'])
other_ends += list(interactions.ix[interactions.peak2 == p, 'peak1'])
return set(other_ends)
else:
return np.nan
def get_promoter_gene(peaks):
if type(peaks) is set:
peaks = peaks & set(chia_to_promoter_gene.index)
genes = []
for p in peaks:
genes += list(chia_to_promoter_gene[p])
out = set(genes)
if len(out) == 0:
return np.nan
else:
return out
else:
return np.nan
gene_variants_f['chia_interaction_peak'] = gene_variants_f.chia_peaks.apply(lambda x: get_other_end(x))
lead_variants_single_f['chia_interaction_peak'] = lead_variants_single_f.chia_peaks.apply(lambda x: get_other_end(x))
gene_variants_f['chia_interaction_promoter_gene'] = \
gene_variants_f.chia_interaction_peak.apply(lambda x: get_promoter_gene(x))
lead_variants_single_f['chia_interaction_promoter_gene'] = \
lead_variants_single_f.chia_interaction_peak.apply(lambda x: get_promoter_gene(x))
gene_variants_f['interacts_with_promoter'] = np.nan
t = gene_variants_f[gene_variants_f.chia_interaction_promoter_gene.isnull() == False]
se = t.apply(lambda x: x['gene_id'] in x['chia_interaction_promoter_gene'], axis=1)
gene_variants_f.ix[se.index, 'interacts_with_promoter'] = se
lead_variants_single_f['interacts_with_promoter'] = np.nan
t = lead_variants_single_f[lead_variants_single_f.chia_interaction_promoter_gene.isnull() == False]
se = t.apply(lambda x: x['gene_id'] in x['chia_interaction_promoter_gene'], axis=1)
lead_variants_single_f.ix[se.index, 'interacts_with_promoter'] = se
shared = set(lead_variants_single_f.index) & set(gene_variants_f.index)
a = lead_variants_single_f.interacts_with_promoter.sum()
b = gene_variants_f.interacts_with_promoter.sum()
c = len(set(lead_variants_single_f[lead_variants_single_f.interacts_with_promoter == True].index)
& set(gene_variants_f[gene_variants_f.interacts_with_promoter == True].index))
print('{} lead variants and {} putative eQTNs ({} shared) '
'interact with promoters.'.format(a, b, c))
lead_interact = lead_variants_single_f.drop(shared).interacts_with_promoter.sum()
lead_no_interact = lead_variants_single_f.drop(shared).shape[0] - lead_interact
put_interact = gene_variants_f.drop(shared).interacts_with_promoter.sum()
put_no_interact = gene_variants_f.drop(shared).shape[0] - put_interact
stats.fisher_exact([[put_interact, put_no_interact], [lead_interact, lead_no_interact]])
sum(gene_variants_f[gene_variants_f.interacts_with_promoter == True].tss_dist_abs > 20000)
sum(lead_variants_single_f[lead_variants_single_f.interacts_with_promoter == True].tss_dist_abs > 20000)
sum(gene_variants_f.tss_dist_abs > 20000)
gene_variants_f.tss_dist_abs.median()
lead_variants_single_f.tss_dist_abs.median()
ttt = lead_variants_single_f[lead_variants_single_f.interacts_with_promoter == True]
ttt.ix['chr17:2296013-2296014:ENSG00000070444.10']
interactions['distance'] = (interactions.end1 - interactions.start1) / 2. - (interactions.end2 - interactions.start2) / 2.
interactions['distance'] = interactions.distance.abs()
np.log10(interactions.distance + 1).hist()
np.log10(lead_variants_single_f[lead_variants_single_f.interacts_with_promoter == True].tss_dist_abs + 1).hist()
np.log10(gene_variants_f[gene_variants_f.interacts_with_promoter == True].tss_dist_abs + 1).hist()
mi = interactions.apply(
lambda x: min(x['start1'], x['start2']), axis=1)
ma = interactions.apply(
lambda x: max(x['end1'], x['end2']), axis=1)
se = ma - mi
# np.log10(se.abs()).hist(bins=50)
# plt.ylabel('Number of loops')
# plt.xlabel('$log_{10}$ size in bp')
# plt.title('Median: {}'.format(se.median()));
np.log10(se.abs()).hist(normed=True, alpha=0.5, label='ChIA-PET', bins=np.arange(0, 9, 0.25), histtype='stepfilled')
np.log10(lead_variants_single_f.tss_dist_abs + 1).hist(normed=True, alpha=0.5, label='Lead variants', bins=np.arange(0, 9, 0.25), histtype='stepfilled')
np.log10(gene_variants_f.tss_dist_abs + 1).hist(normed=True, alpha=0.5, label='peQTNs', bins=np.arange(0, 9, 0.25), histtype='stepfilled')
plt.legend()
plt.xlabel('$\log_{10}$ distance in base pairs')
plt.ylabel('Density')
fn = os.path.join(outdir, 'tss_distance_kde.tsv')
if not os.path.exists(fn):
pdfs = pd.DataFrame(index=np.arange(0, 9 + 0.1, 0.1))
density = scipy.stats.gaussian_kde(np.log10(se.abs()))
pdfs['ChIA-PET interactions'] = density(pdfs.index)
density = scipy.stats.gaussian_kde(np.log10(lead_variants_single_f.tss_dist_abs + 1))
pdfs['Lead variants'] = density(pdfs.index)
density = scipy.stats.gaussian_kde(np.log10(gene_variants_f.tss_dist_abs + 1))
pdfs['peQTNs'] = density(pdfs.index)
pdfs.to_csv(fn, sep='\t')
else:
pdfs = pd.read_table(fn, index_col=0)
pdfs.plot()
plt.xlabel('Distance to TSS')
plt.ylabel('Density');
```
### Motif Similarity
```
s = cpb.general.read_gzipped_text_url('http://compbio.mit.edu/encode-motifs/motifs-sim.txt.gz')
lines = s.strip().split('\n')
lines = [x.split('\t') for x in lines]
vals = [x[1:] for x in lines[1:]]
index = [x[0] for x in lines][1:]
header = lines[0][1:]
motif_sim = pd.DataFrame(vals, index=index, columns=header)
motif_sim = motif_sim.astype(float)
t = motif_sim.ix[motif_info.index, motif_info.index]
sns.clustermap(t, xticklabels=[], yticklabels=[]);
sns.heatmap(motif_sim, xticklabels=[], yticklabels=[]);
tf_overlap.ix[gene_variants_f.location.drop_duplicates()].sum(axis=1).value_counts().sort_index().plot(kind='bar')
motif_disrupt.ix[gene_variants_f.location.drop_duplicates()].sum(axis=1).value_counts().sort_index().plot(kind='bar')
num_tf_motifs = []
for i in gene_variants_f.location.drop_duplicates():
se = motif_disrupt.ix[i]
num_tf_motifs.append(len(set([x.split('_')[0] for x in se[se].index])))
pd.Series(num_tf_motifs).value_counts().sort_index().plot(kind='bar')
for i in gene_variants_f.location.drop_duplicates()[0:5]:
se = motif_disrupt.ix[i]
df = motif_sim.ix[se[se].index]
df = df > 0.75
print('Disrupts motifs: {}'.format(', '.join(list(se[se].index))))
print('These motifs are similar to known motifs for: {}'.format(
', '.join(list(set([x.split('_')[0] for x in df.sum()[df.sum() > 0].index if 'disc' not in x])))))
print('These TFs are not expected: {}'.format(
', '.join(set([x.split('_')[0] for x in df.sum()[df.sum() > 0].index if 'disc' not in x]) -
set([x.split('_')[0] for x in se[se].index]))))
print('----------------------------------------------------------------------------------------------------')
num_sim_motifs = []
for i in gene_variants_f.location.drop_duplicates():
se = motif_disrupt.ix[i]
df = motif_sim.ix[se[se].index]
df = df > 0.75
num_sim_motifs.append(len(set([x.split('_')[0] for x in df.sum()[df.sum() > 0].index if 'disc' not in x]) -
set([x.split('_')[0] for x in se[se].index])))
pd.Series(num_sim_motifs).value_counts().sort_index().plot(kind='bar')
t = []
for a in gene_variants_f.index:
i = gene_variants_f.ix[a, 'location']
se = motif_disrupt.ix[i]
se = se[se]
se = se[[x for x in se.index if 'known' in x]]
s = set(motif_info.ix[se.index, 'target'])
if sum(tf_overlap.ix[i, s] > 0):
t.append(a)
n = len(set(gene_variants_f.ix[t, 'gene_id'])) / float(len(set(gene_variants_f['gene_id'])))
print('{:.2f}% of eGenes with a peQTN have at least one peQTN that disrupts'
' a known motif for the TF that it overlaps.'.format(n * 100))
a = sum(pd.Series(num_sim_motifs) > 0)
b = len(num_sim_motifs)
print('{:,} of {:,} ({:.2f}%) peQTNs disrupt a motif that is '
'similar to a known motif for a different TF (e.g. the variant '
'does not overlap a peak for the other TF)'.format(a, b, a / float(b) * 100))
motif_sim.ix['ETV6_1'].hist()
```
| github_jupyter |
# Python basics 3: Matplotlib
This tutorial introduces matplotlib, a Python library for plotting numpy arrays as images. We will learn how to:
Follow the instructions below to download the tutorial and open it in the Sandbox.
## Download the tutorial notebook
[Download the Python basics 3 tutorial notebook](../_static/python_basics/03_download-matplotlib.ipynb)
[Download the exercise image file](../_static/python_basics/Guinea_Bissau.JPG)
To view this notebook on the Sandbox, you will need to first download the notebook and the image to your computer, then upload both of them to the Sandbox. Ensure you have followed the set-up prerequisities listed in [Python basics 1: Jupyter](./01_jupyter.ipynb), and then follow these instructions:
1. Download the notebook by clicking the first link above. Download the image by clicking the second link above.
2. On the Sandbox, open the **Training** folder.
3. Click the **Upload Files** button as shown below.
<img align="middle" src="../_static/session_2/05_solution_uploadbutton.png" alt="Upload button." width=400>
4. Select the downloaded notebook using the file browser. Click **OK**.
5. Repeat to upload the image file to the **Training** folder. It may take a while for the upload to complete.
5. Both files will appear in the **Training** folder. Double-click the tutorial notebook to open it and begin the tutorial.
You can now use the tutorial notebook as an interactive version of this webpage.
## Introduction to matplotlib's pyplot
We are going to use part of matplotlib called `pyplot`. We can import pyplot by specifying it comes from matplotlib. We will abbreviate `pyplot` to `plt`.
```
%matplotlib inline
# Generates plots in the same page instead of opening a new window
import numpy as np
from matplotlib import pyplot as plt
```
Images are 2-dimensional arrays containing pixels. Therefore, we can use 2-dimensional arrays to represent image data and visualise with matplotlib.
In the example below, we will use the numpy `arange` function to generate a 1-dimensional array filled with elements from `0` to `99`, and then reshape it into a 2-dimensional array using `reshape`.
```
arr = np.arange(100).reshape(10,10)
print(arr)
plt.imshow(arr)
```
If you remember from the [last tutorial](./02_numpy.ipynb), we were able to address regions of a numpy array using the square bracket `[ ]` index notation. For multi-dimensional arrays we can use a comma `,` to distinguish between axes.
```python
[ first dimension, second dimension, third dimension, etc. ]
```
As before, we use colons `:` to denote `[ start : end : stride ]`. We can do this for each dimension.
For example, we can update the values on the left part of this array to be equal to `1`.
```
arr = np.arange(100).reshape(10,10)
arr[:, :5] = 1
plt.imshow(arr)
```
The indexes in the square brackets of `arr[:, :5]` can be broken down like this:
```python
[ 1st dimension start : 1st dimension end, 2nd dimension start : 2nd dimension end ]
```
Dimensions are separated by the comma `,`. Our first dimension is the vertical axis, and the second dimension is the horizontal axis. Their spans are marked by the colon `:`. Therefore:
```python
[ Vertical start : Vertical end, Horizontal start : Horizontal end ]
```
If there are no indexes entered, then the array will take all values. This means `[:, :5]` gives:
```python
[ Vertical start : Vertical end, Horizontal start : Horizontal start + 5 ]
```
Therefore the array index selected the first 5 pixels along the width, at all vertical values.
Now let's see what that looks like on an actual image.
> **Tip**: Ensure you uploaded the file `Guinea_Bissau.JPG` to your **Training** folder along with the tutorial notebook. We will be using this file in the next few steps and exercises.
We can use the pyplot library to load an image using the matplotlib function `imread`. `imread` reads in an image file as a 3-dimensional numpy array. This makes it easy to manipulate the array.
By convention, the first dimension corresponds to the vertical axis, the second to the horizontal axis and the third are the Red, Green and Blue channels of the image. Red-green-blue channels conventionally take on values from 0 to 255.
```
im = np.copy(plt.imread('Guinea_Bissau.JPG'))
# This file path (red text) indicates 'Guinea_Bissau.JPG' is in the
# same folder as the tutorial notebook. If you have moved or
# renamed the file, the file path must be edited to match.
im.shape
```
`Guinea_Bissau.JPG` is an image of Rio Baboque in Guinea-Bissau in 2018. It has been generated from Landsat 8 satellite data.
The results of the above cell show that the image is 590 pixels tall, 602 pixels wide, and has 3 channels. The three channels are red, green, and blue (in that order).
Let's display this image using the pyplot `imshow` function.
```
plt.imshow(im)
```
## Exercises
### 3.1 Let's use the indexing functionality of numpy to select a portion of this image. Select the top-right corner of this image with shape `(200,200)`.
> **Hint:** Remember there are three dimensions in this image. Colons separate spans, and commas separate dimensions.
```
# We already defined im above, but if you have not,
# you can un-comment and run the next line
# im = np.copy(plt.imread('Guinea_Bissau.JPG'))
# Fill in the question marks with the correct indexes
topright = im[?,?,?]
# Plot your result using imshow
plt.imshow(topright)
```
If you have selected the correct corner, there should be not much water in it!
### 3.2 Let's have a look at one of the pixels in this image. We choose the top-left corner with position `(0,0)` and show the values of its RGB channels.
```
# Run this cell to see the colour channel values
im[0,0]
```
The first value corresponds to the red component, the second to the green and the third to the blue. `uint8` can contain values in the range `[0-255]` so the pixel has a lot of red, some green, and not much blue. This pixel is a orange-yellow sandy colour.
Now let's modify the image.
### What happens if we set all the values representing the blue channel to the maximum value?
```
# Run this cell to set all blue channel values to 255
# We first make a copy to avoid modifying the original image
im2 = np.copy(im)
im2[:,:,2] = 255
plt.imshow(im2)
```
> The index notation `[:,:,2]` is selecting pixels at all heights and all widths, but only the 3rd colour channel.
### Can you modify the above code cell to set all red values to the maximum value of `255`?
## Conclusion
We have successfully practised indexing numpy arrays and plotting those arrays using matplotlib. We can now also read a file into Python using `pyplot.imread`. The next lesson covers data cleaning and masking.
| github_jupyter |
<a href="https://colab.research.google.com/github/intel-analytics/analytics-zoo/blob/master/docs/docs/colab-notebook/orca/quickstart/tf_lenet_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>

---
##### Copyright 2018 Analytics Zoo Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
```
## **Environment Preparation**
**Install Java 8**
Run the cell on the **Google Colab** to install jdk 1.8.
**Note:** if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer).
```
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
```
**Install Analytics Zoo**
You can install the latest pre-release version using `pip install --pre --upgrade analytics-zoo`.
```
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo
# Install required dependencies
!pip install tensorflow==1.15.0 tensorflow-datasets==2.1.0
```
## **Distributed TensorFlow (v1.15) using Orca APIs**
In this guide we will describe how to scale out TensorFlow (v1.15) programs using Orca in 4 simple steps.
```
# import necesary libraries and modules
import argparse
from zoo.orca.learn.tf.estimator import Estimator
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
```
### **Step 1: Init Orca Context**
```
OrcaContext.log_output = True # recommended to set it to True when running Analytics Zoo in Jupyter notebook (this will display terminal's stdout and stderr in the Jupyter notebook).
cluster_mode = "local"
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4) # run in local mode
elif cluster_mode == "k8s":
init_orca_context(cluster_mode="k8s", num_nodes=2, cores=2) # run on K8s cluster
elif cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2) # run on Hadoop YARN cluster
```
This is the only place where you need to specify local or distributed mode. View [Orca Context](https://analytics-zoo.readthedocs.io/en/latest/doc/Orca/Overview/orca-context.html) for more details.
**Note**: You should export HADOOP_CONF_DIR=/path/to/hadoop/conf/dir when you run on Hadoop YARN cluster.
### **Step 2: Define the Model**
You may define your model, loss and metrics in the same way as in any standard (single node) TensorFlow program.
```
import tensorflow as tf
def accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1, output_type=labels.dtype)
is_correct = tf.cast(tf.equal(predictions, labels), dtype=tf.float32)
return tf.reduce_mean(is_correct)
def lenet(images):
with tf.variable_scope('LeNet', [images]):
net = tf.layers.conv2d(images, 32, (5, 5), activation=tf.nn.relu, name='conv1')
net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool1')
net = tf.layers.conv2d(net, 64, (5, 5), activation=tf.nn.relu, name='conv2')
net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool2')
net = tf.layers.flatten(net)
net = tf.layers.dense(net, 1024, activation=tf.nn.relu, name='fc3')
logits = tf.layers.dense(net, 10)
return logits
# tensorflow inputs
images = tf.placeholder(dtype=tf.float32, shape=(None, 28, 28, 1))
# tensorflow labels
labels = tf.placeholder(dtype=tf.int32, shape=(None,))
logits = lenet(images)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels))
acc = accuracy(logits, labels)
```
### **Step 3: Define Train Dataset**
```
import tensorflow_datasets as tfds
def preprocess(data):
data['image'] = tf.cast(data["image"], tf.float32) / 255.
return data['image'], data['label']
# get DataSet
dataset_dir = "~/tensorflow_datasets"
mnist_train = tfds.load(name="mnist", split="train", data_dir=dataset_dir)
mnist_test = tfds.load(name="mnist", split="test", data_dir=dataset_dir)
mnist_train = mnist_train.map(preprocess)
mnist_test = mnist_test.map(preprocess)
```
### **Step 4: Fit with Orca Estimator**
First, create an Estimator.
```
est = Estimator.from_graph(inputs=images,
outputs=logits,
labels=labels,
loss=loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"acc": acc})
```
Next, fit the Estimator.
```
max_epoch = 1
est.fit(data=mnist_train,
batch_size=320,
epochs=max_epoch,
validation_data=mnist_test)
```
Finally, evaluate using the Estimator.
```
result = est.evaluate(mnist_test)
print(result)
est.save_tf_checkpoint("/tmp/lenet/model")
```
Now, the accuracy of this model has reached 98%.
```
# Stop orca context when your program finishes
stop_orca_context()
```
| github_jupyter |
# Build a machine learning workflow using Step Functions and SageMaker
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Build a machine learning workflow](#Build-a-machine-learning-workflow)
## Introduction
This notebook describes using the AWS Step Functions Data Science SDK to create and manage workflows. The Step Functions SDK is an open source library that allows data scientists to easily create and execute machine learning workflows using AWS Step Functions and Amazon SageMaker. For more information, see the following.
* [AWS Step Functions](https://aws.amazon.com/step-functions/)
* [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html)
* [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io)
In this notebook we will use the SDK to create steps, link them together to create a workflow, and execute the workflow in AWS Step Functions. The first tutorial shows how to create an ML pipeline workflow, and the second shows how to run multiple experiments in parallel.
```
import sys
!{sys.executable} -m pip install --upgrade stepfunctions
```
## Setup
### Add a policy to your SageMaker role in IAM
**If you are running this notebook on an Amazon SageMaker notebook instance**, the IAM role assumed by your notebook instance needs permission to create and run workflows in AWS Step Functions. To provide this permission to the role, do the following.
1. Open the Amazon [SageMaker console](https://console.aws.amazon.com/sagemaker/).
2. Select **Notebook instances** and choose the name of your notebook instance
3. Under **Permissions and encryption** select the role ARN to view the role on the IAM console
4. Choose **Attach policies** and search for `AWSStepFunctionsFullAccess`.
5. Select the check box next to `AWSStepFunctionsFullAccess` and choose **Attach policy**
If you are running this notebook in a local environment, the SDK will use your configured AWS CLI configuration. For more information, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html).
Next, create an execution role in IAM for Step Functions.
### Create an execution role for Step Functions
You need an execution role so that you can create and execute workflows in Step Functions.
1. Go to the [IAM console](https://console.aws.amazon.com/iam/)
2. Select **Roles** and then **Create role**.
3. Under **Choose the service that will use this role** select **Step Functions**
4. Choose **Next** until you can enter a **Role name**
5. Enter a name such as `StepFunctionsWorkflowExecutionRole` and then select **Create role**
Attach a policy to the role you created. The following steps attach a policy that provides full access to Step Functions, however as a good practice you should only provide access to the resources you need.
1. Under the **Permissions** tab, click **Add inline policy**
2. Enter the following in the **JSON** tab
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:CreateTransformJob",
"sagemaker:DescribeTransformJob",
"sagemaker:StopTransformJob",
"sagemaker:CreateTrainingJob",
"sagemaker:DescribeTrainingJob",
"sagemaker:StopTrainingJob",
"sagemaker:CreateHyperParameterTuningJob",
"sagemaker:DescribeHyperParameterTuningJob",
"sagemaker:StopHyperParameterTuningJob",
"sagemaker:CreateModel",
"sagemaker:CreateEndpointConfig",
"sagemaker:CreateEndpoint",
"sagemaker:DeleteEndpointConfig",
"sagemaker:DeleteEndpoint",
"sagemaker:UpdateEndpoint",
"sagemaker:ListTags",
"lambda:InvokeFunction",
"sqs:SendMessage",
"sns:Publish",
"ecs:RunTask",
"ecs:StopTask",
"ecs:DescribeTasks",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"dynamodb:DeleteItem",
"batch:SubmitJob",
"batch:DescribeJobs",
"batch:TerminateJob",
"glue:StartJobRun",
"glue:GetJobRun",
"glue:GetJobRuns",
"glue:BatchStopJobRun"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"iam:PassRole"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"iam:PassedToService": "sagemaker.amazonaws.com"
}
}
},
{
"Effect": "Allow",
"Action": [
"events:PutTargets",
"events:PutRule",
"events:DescribeRule"
],
"Resource": [
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule"
]
}
]
}
```
3. Choose **Review policy** and give the policy a name such as `StepFunctionsWorkflowExecutionPolicy`
4. Choose **Create policy**. You will be redirected to the details page for the role.
5. Copy the **Role ARN** at the top of the **Summary**
### Configure execution roles
```
import sagemaker
# SageMaker Execution Role
# You can use sagemaker.get_execution_role() if running inside sagemaker's notebook instance
sagemaker_execution_role = sagemaker.get_execution_role() #Replace with ARN if not in an AWS SageMaker notebook
# paste the StepFunctionsWorkflowExecutionRole ARN from above
workflow_execution_role = "<execution-role-arn>"
```
### Import the required modules
```
import boto3
import sagemaker
import time
import random
import uuid
import logging
import stepfunctions
import io
import random
from sagemaker.amazon.amazon_estimator import get_image_uri
from stepfunctions import steps
from stepfunctions.steps import TrainingStep, ModelStep, TransformStep
from stepfunctions.inputs import ExecutionInput
from stepfunctions.workflow import Workflow
from stepfunctions.template import TrainingPipeline
from stepfunctions.template.utils import replace_parameters_with_jsonpath
session = sagemaker.Session()
stepfunctions.set_stream_logger(level=logging.INFO)
region = boto3.Session().region_name
bucket = session.default_bucket()
prefix = 'sagemaker/DEMO-xgboost-regression'
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region, bucket)
```
### Prepare the dataset
The following cell defines utility methods to split a dataset into train, validation, and test datasets. It then defines methods to upload them to an Amazon S3 bucket.
```
def data_split(FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST):
data = [l for l in open(FILE_DATA, 'r')]
train_file = open(FILE_TRAIN, 'w')
valid_file = open(FILE_VALIDATION, 'w')
tests_file = open(FILE_TEST, 'w')
num_of_data = len(data)
num_train = int((PERCENT_TRAIN/100.0)*num_of_data)
num_valid = int((PERCENT_VALIDATION/100.0)*num_of_data)
num_tests = int((PERCENT_TEST/100.0)*num_of_data)
data_fractions = [num_train, num_valid, num_tests]
split_data = [[],[],[]]
rand_data_ind = 0
for split_ind, fraction in enumerate(data_fractions):
for i in range(fraction):
rand_data_ind = random.randint(0, len(data)-1)
split_data[split_ind].append(data[rand_data_ind])
data.pop(rand_data_ind)
for l in split_data[0]:
train_file.write(l)
for l in split_data[1]:
valid_file.write(l)
for l in split_data[2]:
tests_file.write(l)
train_file.close()
valid_file.close()
tests_file.close()
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def upload_to_s3(bucket, channel, filename):
fobj=open(filename, 'rb')
key = prefix+'/'+channel
url = 's3://{}/{}/{}'.format(bucket, key, filename)
print('Writing to {}'.format(url))
write_to_s3(fobj, bucket, key)
```
This notebook uses the XGBoost algorithm to train and host a regression model. We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html) originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In the libsvm converted [version](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), the nominal feature (Male/Female/Infant) has been converted into a real valued feature. Age of abalone is to be predicted from eight physical measurements.
```
try: #python3
from urllib.request import urlretrieve
except: #python2
from urllib import urlretrieve
# Load the dataset
FILE_DATA = 'abalone'
urlretrieve("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone", FILE_DATA)
#split the downloaded data into train/test/validation files
FILE_TRAIN = 'abalone.train'
FILE_VALIDATION = 'abalone.validation'
FILE_TEST = 'abalone.test'
PERCENT_TRAIN = 70
PERCENT_VALIDATION = 15
PERCENT_TEST = 15
data_split(FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST)
#upload the files to the S3 bucket
upload_to_s3(bucket, 'train', FILE_TRAIN)
upload_to_s3(bucket, 'validation', FILE_VALIDATION)
upload_to_s3(bucket, 'test', FILE_TEST)
train_s3_file = bucket_path + "/" + prefix + '/train'
validation_s3_file = bucket_path + "/" + prefix + '/validation'
test_s3_file = bucket_path + "/" + prefix + '/test'
```
### Configure the AWS Sagemaker estimator
```
xgb = sagemaker.estimator.Estimator(
get_image_uri(region, 'xgboost'),
sagemaker_execution_role,
train_instance_count = 1,
train_instance_type = 'ml.m4.4xlarge',
train_volume_size = 5,
output_path = bucket_path + "/" + prefix + "/single-xgboost",
sagemaker_session = session
)
xgb.set_hyperparameters(
objective = 'reg:linear',
num_round = 50,
max_depth = 5,
eta = 0.2,
gamme = 4,
min_child_weight = 6,
subsample = 0.7,
silent = 0
)
```
## Build a machine learning workflow

You can use a workflow to create a machine learning pipeline. The AWS Data Science Workflows SDK provides several AWS SageMaker workflow steps that you can use to construct an ML pipeline. In this tutorial you will use the Train and Transform steps.
* [**TrainingStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) - Starts a Sagemaker training job and outputs the model artifacts to S3.
* [**ModelStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) - Creates a model on SageMaker using the model artifacts from S3.
* [**TransformStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TransformStep) - Starts a SageMaker transform job
* [**EndpointConfigStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) - Defines an endpoint configuration on SageMaker.
* [**EndpointStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) - Deploys the trained model to the configured endpoint.
### Define the input schema for a workflow execution
The [**ExecutionInput**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/placeholders.html#stepfunctions.inputs.ExecutionInput) API defines the options to dynamically pass information to a workflow at runtime.
The following cell defines the fields that must be passed to your workflow when starting an execution.
While the workflow is usually static after it is defined, you may want to pass values dynamically that are used by steps in your workflow. To help with this, the SDK provides a way to create placeholders when you define your workflow. These placeholders can be dynamically assigned values when you execute your workflow.
ExecutionInput values are accessible to each step of your workflow. You have the ability to define a schema for this placeholder collection, as shown in the cell below. When you execute your workflow the SDK will verify if the dynamic input conforms to the schema you defined.
```
# SageMaker expects unique names for each job, model and endpoint.
# If these names are not unique the execution will fail. Pass these
# dynamically for each execution using placeholders.
execution_input = ExecutionInput(schema={
'JobName': str,
'ModelName': str,
'EndpointName': str
})
```
### Create the training step
In the following cell we create the training step and pass the estimator we defined above. See [TrainingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) in the AWS Step Functions Data Science SDK documentation.
```
training_step = steps.TrainingStep(
'Train Step',
estimator=xgb,
data={
'train': sagemaker.s3_input(train_s3_file, content_type='libsvm'),
'validation': sagemaker.s3_input(validation_s3_file, content_type='libsvm')
},
job_name=execution_input['JobName']
)
```
### Create the model step
In the following cell we define a model step that will create a model in SageMaker using the artifacts created during the TrainingStep. See [ModelStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) in the AWS Step Functions Data Science SDK documentation.
The model creation step typically follows the training step. The Step Functions SDK provides the [get_expected_model](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep.get_expected_model) method in the TrainingStep class to provide a reference for the trained model artifacts. Please note that this method is only useful when the ModelStep directly follows the TrainingStep.
```
model_step = steps.ModelStep(
'Save model',
model=training_step.get_expected_model(),
model_name=execution_input['ModelName']
)
```
### Create the transform step
In the following cell we create the transform step. See [TransformStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TransformStep) in the AWS Step Functions Data Science SDK documentation.
```
transform_step = steps.TransformStep(
'Transform Input Dataset',
transformer=xgb.transformer(
instance_count=1,
instance_type='ml.m5.large'
),
job_name=execution_input['JobName'],
model_name=execution_input['ModelName'],
data=test_s3_file,
content_type='text/libsvm'
)
```
### Create an endpoint configuration step
In the following cell we create an endpoint configuration step. See [EndpointConfigStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) in the AWS Step Functions Data Science SDK documentation.
```
endpoint_config_step = steps.EndpointConfigStep(
"Create Endpoint Config",
endpoint_config_name=execution_input['ModelName'],
model_name=execution_input['ModelName'],
initial_instance_count=1,
instance_type='ml.m5.large'
)
```
### Create an endpoint
In the following cell we create a step to deploy the trained model to an endpoint in AWS SageMaker. See [EndpointStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) in the AWS Step Functions Data Science SDK documentation.
```
endpoint_step = steps.EndpointStep(
"Create Endpoint",
endpoint_name=execution_input['EndpointName'],
endpoint_config_name=execution_input['ModelName']
)
```
### Chain together steps for your workflow
Create your workflow definition by chaining the steps together. See [Chain](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.states.Chain) in the AWS Step Functions Data Science SDK documentation.
```
workflow_definition = steps.Chain([
training_step,
model_step,
transform_step,
endpoint_config_step,
endpoint_step
])
```
Create your workflow using the workflow definition above, and render the graph with [render_graph](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.render_graph).
```
workflow = Workflow(
name='MyTrainTransformDeploy_v1',
definition=workflow_definition,
role=workflow_execution_role,
execution_input=execution_input
)
workflow.render_graph()
```
Create the workflow in AWS Step Functions with [create](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.create).
```
workflow.create()
```
Run the workflow with [execute](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.execute).
```
execution = workflow.execute(
inputs={
'JobName': 'regression-{}'.format(uuid.uuid1().hex), # Each Sagemaker Job requires a unique name
'ModelName': 'regression-{}'.format(uuid.uuid1().hex), # Each Model requires a unique name,
'EndpointName': 'regression-{}'.format(uuid.uuid1().hex) # Each Endpoint requires a unique name,
}
)
```
Render workflow progress with the [render_progress](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.render_progress).
This generates a snapshot of the current state of your workflow as it executes. This is a static image. Run the cell again to check progress.
```
execution.render_progress()
```
Use [list_events](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.list_events) to list all events in the workflow execution.
```
execution.list_events(html=True)
```
Use [list_executions](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_executions) to list all executions for a specific workflow.
```
workflow.list_executions(html=True)
```
Use [list_workflows](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_workflows) to list all workflows in your AWS account.
```
Workflow.list_workflows(html=True)
```
---
| github_jupyter |
# The Nurse Assignment Problem
This tutorial includes everything you need to set up IBM Decision Optimization CPLEX Modeling for Python (DOcplex), build a Mathematical Programming model, and get its solution by solving the model on the cloud with IBM ILOG CPLEX Optimizer.
When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.
>This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**
>
>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)
and you can start using Watson Studio Cloud right away).
Table of contents:
- [Describe the business problem](#Describe-the-business-problem)
* [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
* [Use decision optimization](#Use-decision-optimization)
* [Step 1: Import the library](#Step-1:-Import-the-library)
- [Step 2: Model the data](#Step-2:-Model-the-data)
* [Step 3: Prepare the data](#Step-3:-Prepare-the-data)
- [Step 4: Set up the prescriptive model](#Step-4:-Set-up-the-prescriptive-model)
* [Define the decision variables](#Define-the-decision-variables)
* [Express the business constraints](#Express-the-business-constraints)
* [Express the objective](#Express-the-objective)
* [Solve with Decision Optimization](#Solve-with-Decision-Optimization)
* [Step 5: Investigate the solution and run an example analysis](#Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)
* [Summary](#Summary)
****
## Describe the business problem
This notebook describes how to use CPLEX Modeling for Python together with *pandas* to
manage the assignment of nurses to shifts in a hospital.
Nurses must be assigned to hospital shifts in accordance with various skill and staffing constraints.
The goal of the model is to find an efficient balance between the different objectives:
* minimize the overall cost of the plan and
* assign shifts as fairly as possible.
## How decision optimization can help
* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes.
* Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
* Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
<br/>
<u>With prescriptive analytics, you can:</u>
* Automate the complex decisions and trade-offs to better manage your limited resources.
* Take advantage of a future opportunity or mitigate a future risk.
* Proactively update recommendations based on changing events.
* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
## Checking minimum requirements
This notebook uses some features of pandas that are available in version 0.17.1 or above.
```
import pip
REQUIRED_MINIMUM_PANDAS_VERSION = '0.17.1'
try:
import pandas as pd
assert pd.__version__ >= REQUIRED_MINIMUM_PANDAS_VERSION
except:
raise Exception("Version %s or above of Pandas is required to run this notebook" % REQUIRED_MINIMUM_PANDAS_VERSION)
```
## Use decision optimization
### Step 1: Import the library
Run the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
```
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
```
### Step 2: Model the data
The input data consists of several tables:
* The Departments table lists all departments in the scope of the assignment.
* The Skills table list all skills.
* The Shifts table lists all shifts to be staffed. A shift contains a department, a day in the week, plus the start and end times.
* The Nurses table lists all nurses, identified by their names.
* The NurseSkills table gives the skills of each nurse.
* The SkillRequirements table lists the minimum number of persons required for a given department and skill.
* The NurseVacations table lists days off for each nurse.
* The NurseAssociations table lists pairs of nurses who wish to work together.
* The NurseIncompatibilities table lists pairs of nurses who do not want to work together.
#### Loading data from Excel with pandas
We load the data from an Excel file using *pandas*.
Each sheet is read into a separate *pandas* DataFrame.
```
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
from IPython.display import display
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
# This notebook requires pandas to work
import pandas as pd
from pandas import DataFrame
# Make sure that xlrd package, which is a pandas optional dependency, is installed
# This package is required for Excel I/O
try:
import xlrd
except:
if hasattr(sys, 'real_prefix'):
#we are in a virtual env.
!pip install xlrd
else:
!pip install --user xlrd
# Use pandas to read the file, one tab for each table.
data_url = "https://github.com/IBMDecisionOptimization/docplex-examples/blob/master/examples/mp/jupyter/nurses_data.xls?raw=true"
nurse_xls_file = pd.ExcelFile(urlopen(data_url))
df_skills = nurse_xls_file.parse('Skills')
df_depts = nurse_xls_file.parse('Departments')
df_shifts = nurse_xls_file.parse('Shifts')
# Rename df_shifts index
df_shifts.index.name = 'shiftId'
# Index is column 0: name
df_nurses = nurse_xls_file.parse('Nurses', header=0, index_col=0)
df_nurse_skilles = nurse_xls_file.parse('NurseSkills')
df_vacations = nurse_xls_file.parse('NurseVacations')
df_associations = nurse_xls_file.parse('NurseAssociations')
df_incompatibilities = nurse_xls_file.parse('NurseIncompatibilities')
# Display the nurses dataframe
print("#nurses = {}".format(len(df_nurses)))
print("#shifts = {}".format(len(df_shifts)))
print("#vacations = {}".format(len(df_vacations)))
```
In addition, we introduce some extra global data:
* The maximum work time for each nurse.
* The maximum and minimum number of shifts worked by a nurse in a week.
```
# maximum work time (in hours)
max_work_time = 40
# maximum number of shifts worked in a week.
max_nb_shifts = 5
```
Shifts are stored in a separate DataFrame.
```
df_shifts
```
### Step 3: Prepare the data
We need to precompute additional data for shifts.
For each shift, we need the start time and end time expressed in hours, counting from the beginning of the week: Monday 8am is converted to 8, Tuesday 8am is converted to 24+8 = 32, and so on.
#### Sub-step #1
We start by adding an extra column `dow` (day of week) which converts the string "day" into an integer in 0..6 (Monday is 0, Sunday is 6).
```
days = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
day_of_weeks = dict(zip(days, range(7)))
# utility to convert a day string e.g. "Monday" to an integer in 0..6
def day_to_day_of_week(day):
return day_of_weeks[day.strip().lower()]
# for each day name, we normalize it by stripping whitespace and converting it to lowercase
# " Monday" -> "monday"
df_shifts["dow"] = df_shifts.day.apply(day_to_day_of_week)
df_shifts
```
#### Sub-step #2 : Compute the absolute start time of each shift.
Computing the start time in the week is easy: just add `24*dow` to column `start_time`. The result is stored in a new column `wstart`.
```
df_shifts["wstart"] = df_shifts.start_time + 24 * df_shifts.dow
```
#### Sub-Step #3 : Compute the absolute end time of each shift.
Computing the absolute end time is a little more complicated as certain shifts span across midnight. For example, Shift #3 starts on Monday at 18:00 and ends Tuesday at 2:00 AM. The absolute end time of Shift #3 is 26, not 2.
The general rule for computing absolute end time is:
`abs_end_time = end_time + 24 * dow + (start_time>= end_time ? 24 : 0)`
Again, we use *pandas* to add a new calculated column `wend`. This is done by using the *pandas* `apply` method with an anonymous `lambda` function over rows. The `raw=True` parameter prevents the creation of a *pandas* Series for each row, which improves the performance significantly on large data sets.
```
# an auxiliary function to calculate absolute end time of a shift
def calculate_absolute_endtime(start, end, dow):
return 24*dow + end + (24 if start>=end else 0)
# store the results in a new column
df_shifts["wend"] = df_shifts.apply(lambda row: calculate_absolute_endtime(
row.start_time, row.end_time, row.dow), axis=1, raw=True)
```
#### Sub-step #4 : Compute the duration of each shift.
Computing the duration of each shift is now a straightforward difference of columns. The result is stored in column `duration`.
```
df_shifts["duration"] = df_shifts.wend - df_shifts.wstart
```
#### Sub-step #5 : Compute the minimum demand for each shift.
Minimum demand is the product of duration (in hours) by the minimum required number of nurses. Thus, in number of
nurse-hours, this demand is stored in another new column `min_demand`.
Finally, we display the updated shifts DataFrame with all calculated columns.
```
# also compute minimum demand in nurse-hours
df_shifts["min_demand"] = df_shifts.min_req * df_shifts.duration
# finally check the modified shifts dataframe
df_shifts
```
### Step 4: Set up the prescriptive model
```
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
```
#### Create the DOcplex model
The model contains all the business constraints and defines the objective.
We now use CPLEX Modeling for Python to build a Mixed Integer Programming (MIP) model for this problem.
```
from docplex.mp.model import Model
mdl = Model(name="nurses")
```
#### Define the decision variables
For each (nurse, shift) pair, we create one binary variable that is equal to 1 when the nurse is assigned to the shift.
We use the `binary_var_matrix` method of class `Model`, as each binary variable is indexed by _two_ objects: one nurse and one shift.
```
# first global collections to iterate upon
all_nurses = df_nurses.index.values
all_shifts = df_shifts.index.values
# the assignment variables.
assigned = mdl.binary_var_matrix(keys1=all_nurses, keys2=all_shifts, name="assign_%s_%s")
```
#### Express the business constraints
##### Overlapping shifts
Some shifts overlap in time, and thus cannot be assigned to the same nurse.
To check whether two shifts overlap in time, we start by ordering all shifts with respect to their *wstart* and *duration* properties. Then, for each shift, we iterate over the subsequent shifts in this ordered list to easily compute the subset of overlapping shifts.
We use *pandas* operations to implement this algorithm. But first, we organize all decision variables in a DataFrame.
For convenience, we also organize the decision variables in a pivot table with *nurses* as row index and *shifts* as columns. The *pandas* *unstack* operation does this.
```
# Organize decision variables in a DataFrame
df_assigned = DataFrame({'assigned': assigned})
df_assigned.index.names=['all_nurses', 'all_shifts']
# Re-organize the Data Frame as a pivot table with nurses as row index and shifts as columns:
df_assigned_pivot = df_assigned.unstack(level='all_shifts')
# Create a pivot using nurses and shifts index as dimensions
#df_assigned_pivot = df_assigned.reset_index().pivot(index='all_nurses', columns='all_shifts', values='assigned')
# Display first rows of the pivot table
df_assigned_pivot.head()
```
We create a DataFrame representing a list of shifts sorted by *"wstart"* and *"duration"*.
This sorted list will be used to easily detect overlapping shifts.
Note that indices are reset after sorting so that the DataFrame can be indexed with respect to
the index in the sorted list and not the original unsorted list. This is the purpose of the *reset_index()*
operation which also adds a new column named *"shiftId"* with the original index.
```
# Create a Data Frame representing a list of shifts sorted by wstart and duration.
# One keeps only the three relevant columns: 'shiftId', 'wstart' and 'wend' in the resulting Data Frame
df_sorted_shifts = df_shifts.sort_values(['wstart','duration']).reset_index()[['shiftId', 'wstart', 'wend']]
# Display the first rows of the newly created Data Frame
df_sorted_shifts.head()
```
Next, we state that for any pair of shifts that overlap in time, a nurse can be assigned to only one of the two.
```
number_of_incompatible_shift_constraints = 0
for shift in df_sorted_shifts.itertuples():
# Iterate over following shifts
# 'shift[0]' contains the index of the current shift in the df_sorted_shifts Data Frame
for shift_2 in df_sorted_shifts.iloc[shift[0] + 1:].itertuples():
if (shift_2.wstart < shift.wend):
# Iterate over all nurses to force incompatible assignment for the current pair of overlapping shifts
for nurse_assignments in df_assigned_pivot.iloc[:, [shift.shiftId, shift_2.shiftId]].itertuples():
# this is actually a logical OR
mdl.add_constraint(nurse_assignments[1] + nurse_assignments[2] <= 1)
number_of_incompatible_shift_constraints += 1
else:
# No need to test overlap with following shifts
break
print("#incompatible shift constraints: {}".format(number_of_incompatible_shift_constraints))
```
##### Vacations
When the nurse is on vacation, he cannot be assigned to any shift starting that day.
We use the *pandas* *merge* operation to create a join between the *"df_vacations"*, *"df_shifts"*, and *"df_assigned"* DataFrames. Each row of the resulting DataFrame contains the assignment decision variable corresponding to the matching (nurse, shift) pair.
```
# Add 'day of week' column to vacations Data Frame
df_vacations['dow'] = df_vacations.day.apply(day_to_day_of_week)
# Join 'df_vacations', 'df_shifts' and 'df_assigned' Data Frames to create the list of 'forbidden' assigments.
# The 'reset_index()' function is invoked to move 'shiftId' index as a column in 'df_shifts' Data Frame, and
# to move the index pair ('all_nurses', 'all_shifts') as columns in 'df_assigned' Data Frame.
# 'reset_index()' is invoked so that a join can be performed between Data Frame, based on column names.
df_assigned_reindexed = df_assigned.reset_index()
df_vacation_forbidden_assignments = df_vacations.merge(df_shifts.reset_index()[['dow', 'shiftId']]).merge(
df_assigned_reindexed, left_on=['nurse', 'shiftId'], right_on=['all_nurses', 'all_shifts'])
# Here are the first few rows of the resulting Data Frames joins
df_vacation_forbidden_assignments.head()
for forbidden_assignment in df_vacation_forbidden_assignments.itertuples():
# to forbid an assignment just set the variable to zero.
mdl.add_constraint(forbidden_assignment.assigned == 0)
print("# vacation forbids: {} assignments".format(len(df_vacation_forbidden_assignments)))
```
##### Associations
Some pairs of nurses get along particularly well, so we wish to assign them together as a team. In other words, for every such couple and for each shift, both assignment variables should always be equal.
Either both nurses work the shift, or both do not.
In the same way we modeled *vacations*, we use the *pandas* merge operation to create a DataFrame for which each row contains the pair of nurse-shift assignment decision variables matching each association.
```
# Join 'df_assignment' Data Frame twice, based on associations to get corresponding decision variables pairs for all shifts
# The 'suffixes' parameter in the second merge indicates our preference for updating the name of columns that occur both
# in the first and second argument Data Frames (in our case, these columns are 'all_nurses' and 'assigned').
df_preferred_assign = df_associations.merge(
df_assigned_reindexed, left_on='nurse1', right_on='all_nurses').merge(
df_assigned_reindexed, left_on=['nurse2', 'all_shifts'], right_on=['all_nurses', 'all_shifts'], suffixes=('_1','_2'))
# Here are the first few rows of the resulting Data Frames joins
df_preferred_assign.head()
```
The associations constraint can now easily be formulated by iterating on the rows of the *"df_preferred_assign"* DataFrame.
```
for preferred_assign in df_preferred_assign.itertuples():
mdl.add_constraint(preferred_assign.assigned_1 == preferred_assign.assigned_2)
```
##### Incompatibilities
Similarly, certain pairs of nurses do not get along well, and we want to avoid having them together on a shift.
In other terms, for each shift, both nurses of an incompatible pair cannot be assigned together to the sift. Again, we state a logical OR between the two assignments: at most one nurse from the pair can work the shift.
We first create a DataFrame whose rows contain pairs of invalid assignment decision variables, using the same *pandas* `merge` operations as in the previous step.
```
# Join assignment Data Frame twice, based on incompatibilities Data Frame to get corresponding decision variables pairs
# for all shifts
df_incompatible_assign = df_incompatibilities.merge(
df_assigned_reindexed, left_on='nurse1', right_on='all_nurses').merge(
df_assigned_reindexed, left_on=['nurse2', 'all_shifts'], right_on=['all_nurses', 'all_shifts'], suffixes=('_1','_2'))
# Here are the first few rows of the resulting Data Frames joins
df_incompatible_assign.head()
```
The incompatibilities constraint can now easily be formulated, by iterating on the rows of the *"df_incompatible_assign"* DataFrame.
```
for incompatible_assign in df_incompatible_assign.itertuples():
mdl.add_constraint(incompatible_assign.assigned_1 + incompatible_assign.assigned_2 <= 1)
```
##### Constraints on work time
Regulations force constraints on the total work time over a week;
and we compute this total work time in a new variable. We store the variable in an extra column in the nurse DataFrame.
The variable is declared as _continuous_ though it contains only integer values. This is done to avoid adding unnecessary integer variables for the _branch and bound_ algorithm.
These variables are not true decision variables; they are used to express work constraints.
From a *pandas* perspective, we apply a function over the rows of the nurse DataFrame to create this variable and store it into a new column of the DataFrame.
```
# auxiliary function to create worktime variable from a row
def make_var(row, varname_fmt):
return mdl.continuous_var(name=varname_fmt % row.name, lb=0)
# apply the function over nurse rows and store result in a new column
df_nurses["worktime"] = df_nurses.apply(lambda r: make_var(r, "worktime_%s"), axis=1)
# display nurse dataframe
df_nurses
```
###### Define total work time
Work time variables must be constrained to be equal to the sum of hours actually worked.
We use the *pandas* *groupby* operation to collect all assignment decision variables for each nurse in a separate series. Then, we iterate over nurses to post a constraint calculating the actual worktime for each nurse as the dot product of the series of nurse-shift assignments with the series of shift durations.
```
# Use pandas' groupby operation to enforce constraint calculating worktime for each nurse as the sum of all assigned
# shifts times the duration of each shift
for nurse, nurse_assignments in df_assigned.groupby(level='all_nurses'):
mdl.add_constraint(df_nurses.worktime[nurse] == mdl.dot(nurse_assignments.assigned, df_shifts.duration))
# print model information and check we now have 32 extra continuous variables
mdl.print_information()
```
###### Maximum work time
For each nurse, we add a constraint to enforce the maximum work time for a week.
Again we use the `apply` method, this time with an anonymous lambda function.
```
# we use pandas' apply() method to set an upper bound on all worktime variables.
def set_max_work_time(v):
v.ub = max_work_time
# Optionally: return a string for fancy display of the constraint in the Output cell
return str(v) + ' <= ' + str(v.ub)
df_nurses["worktime"].apply(convert_dtype=False, func=set_max_work_time)
```
##### Minimum requirement for shifts
Each shift requires a minimum number of nurses.
For each shift, the sum over all nurses of assignments to this shift
must be greater than the minimum requirement.
The *pandas* *groupby* operation is invoked to collect all assignment decision variables for each shift in a separate series. Then, we iterate over shifts to post the constraint enforcing the minimum number of nurse assignments for each shift.
```
# Use pandas' groupby operation to enforce minimum requirement constraint for each shift
for shift, shift_nurses in df_assigned.groupby(level='all_shifts'):
mdl.add_constraint(mdl.sum(shift_nurses.assigned) >= df_shifts.min_req[shift])
```
#### Express the objective
The objective mixes different (and contradictory) KPIs.
The first KPI is the total salary cost, computed as the sum of work times over all nurses, weighted by pay rate.
We compute this KPI as an expression from the variables we previously defined by using the panda summation over the DOcplex objects.
```
# again leverage pandas to create a series of expressions: costs of each nurse
total_salary_series = df_nurses.worktime * df_nurses.pay_rate
# compute global salary cost using pandas sum()
# Note that the result is a DOcplex expression: DOcplex if fully compatible with pandas
total_salary_cost = total_salary_series.sum()
mdl.add_kpi(total_salary_cost, "Total salary cost")
```
##### Minimizing salary cost
In a preliminary version of the model, we minimize the total salary cost. This is accomplished
using the `Model.minimize()` method.
```
mdl.minimize(total_salary_cost)
mdl.print_information()
```
#### Solve with Decision Optimization
Now we have everything we need to solve the model, using `Model.solve()`. The following cell solves using your local CPLEX (if any, and provided you have added it to your `PYTHONPATH` variable).
```
# Set Cplex mipgap to 1e-5 to enforce precision to be of the order of a unit (objective value magnitude is ~1e+5).
mdl.parameters.mip.tolerances.mipgap = 1e-5
s = mdl.solve(log_output=True)
assert s, "solve failed"
mdl.report()
```
### Step 6: Investigate the solution and then run an example analysis
We take advantage of *pandas* to analyze the results. First we store the solution values of the assignment variables into a new *pandas* Series.
Calling `solution_value` on a DOcplex variable returns its value in the solution (provided the model has been successfully solved).
```
# Create a pandas Series containing actual shift assignment decision variables value
s_assigned = df_assigned.assigned.apply(lambda v: v.solution_value)
# Create a pivot table by (nurses, shifts), using pandas' "unstack" method to transform the 'all_shifts' row index
# into columns
df_res = s_assigned.unstack(level='all_shifts')
# Display the first few rows of the resulting pivot table
df_res.head()
```
#### Analyzing how worktime is distributed
Let's analyze how worktime is distributed among nurses.
First, we compute the global average work time as the total minimum requirement in hours, divided by number of nurses.
```
s_demand = df_shifts.min_req * df_shifts.duration
total_demand = s_demand.sum()
avg_worktime = total_demand / float(len(all_nurses))
print("* theoretical average work time is {0:g} h".format(avg_worktime))
```
Let's analyze the series of deviations to the average, stored in a *pandas* Series.
```
# a pandas series of worktimes solution values
s_worktime = df_nurses.worktime.apply(lambda v: v.solution_value)
# returns a new series computed as deviation from average
s_to_mean = s_worktime - avg_worktime
# take the absolute value
s_abs_to_mean = s_to_mean.apply(abs)
total_to_mean = s_abs_to_mean.sum()
print("* the sum of absolute deviations from mean is {}".format(total_to_mean))
```
To see how work time is distributed among nurses, print a histogram of work time values.
Note that, as all time data are integers, work times in the solution can take only integer values.
```
import matplotlib.pyplot as plt
%matplotlib inline
# we can also plot as a histogram the distribution of worktimes
s_worktime.plot.hist(color='LightBlue')
plt.xlabel("worktime")
```
#### How shifts are distributed
Let's now analyze the solution from the _number of shifts_ perspective.
How many shifts does each nurse work? Are these shifts fairly distributed amongst nurses?
We compute a new column in our result DataFrame for the number of shifts worked,
by summing rows (the *"axis=1"* argument in the *sum()* call indicates to *pandas* that each sum is performed by row instead of column):
```
# a pandas series of #shifts worked
df_worked = df_res[all_shifts].sum(axis=1)
df_res["worked"] = df_worked
df_worked.plot.hist(color="gold", xlim=(0,10))
plt.ylabel("#shifts worked")
```
We see that one nurse works significantly fewer shifts than others do. What is the average number of shifts worked by a nurse? This is equal to the total demand divided by the number of nurses.
Of course, this yields a fractional number of shifts that is not practical, but nonetheless will help us quantify
the _fairness_ in shift distribution.
```
avg_worked = df_shifts["min_req"].sum() / float(len(all_nurses))
print("-- expected avg #shifts worked is {}".format(avg_worked))
worked_to_avg = df_res["worked"] - avg_worked
total_to_mean = worked_to_avg.apply(abs).sum()
print("-- total absolute deviation to mean #shifts is {}".format(total_to_mean))
```
### Introducing a fairness goal
As the above diagram suggests, the distribution of shifts could be improved.
We implement this by adding one extra objective, _fairness_, which balances
the shifts assigned over nurses.
Note that we can edit the model, that is add (or remove) constraints, even after it has been solved.
### Step #1 : Introduce three new variables per nurse to model the
number of shifts worked and positive and negative deviations to the average.
```
# add two extra variables per nurse: deviations above and below average
df_nurses["worked"] = df_nurses.apply(lambda r: make_var(r, "worked%s"), axis=1)
df_nurses["overworked"] = df_nurses.apply(lambda r: make_var(r, "overw_%s"), axis=1)
df_nurses["underworked"] = df_nurses.apply(lambda r: make_var(r, "underw_%s"), axis=1)
```
### Step #2 : Post the constraint that links these variables together.
```
# Use the pandas groupby operation to enforce the constraint calculating number of worked shifts for each nurse
for nurse, nurse_assignments in df_assigned.groupby(level='all_nurses'):
# nb of worked shifts is sum of assigned shifts
mdl.add_constraint(df_nurses.worked[nurse] == mdl.sum(nurse_assignments.assigned))
for nurse in df_nurses.itertuples():
# nb worked is average + over - under
mdl.add_constraint(nurse.worked == avg_worked + nurse.overworked - nurse.underworked)
```
### Step #3 : Define KPIs to measure the result after solve.
```
# finally, define kpis for over and under average quantities
total_overw = mdl.sum(df_nurses["overworked"])
mdl.add_kpi(total_overw, "Total over-worked")
total_underw = mdl.sum(df_nurses["underworked"])
mdl.add_kpi(total_underw, "Total under-worked")
```
Finally, let's modify the objective by adding the sum of `over_worked and under_worked` to the previous objective.
**Note:** The definitions of `over_worked` and `under_worked` as described above are not sufficient to give them an unambiguous value. However, as all these variables are minimized, CPLEX ensures that these variables take the minimum possible values in the solution.
```
mdl.minimize(total_salary_cost + total_overw + total_underw) # incorporate over_worked and under_worked in objective
```
Our modified model is ready to solve.
The `log_output=True` parameter tells CPLEX to print the log on the standard output.
```
sol2 = mdl.solve(log_output=True) # solve again and get a new solution
assert sol2, "Solve failed"
mdl.report()
```
### Step #4 : Alternative solve: multi-objective
Instead of aggregating all objectives into a single goal, one may prioritize goals.
For instance, one may first minimize cost, and then optimize fairness.
This is a 2 steps optimization which may be configured by exploiting the multi-objective API of docplex as follows:
First, redefine the optimization problem as a multi-objective minimization.
Objectives are grouped into sub-problems with respect to the list of priorities that is provided. The number of items in the prioriries list must be the same as the number of objectives.
Sub-problems are solved one after the other in the order of decreasing priorities.
Objectives with same priorities are blended into an aggregated goal, where each objective is weighted according to the values provided in the _weights_ list. If no list of weights is provided, all objectives have unitary weight.
```
from docplex.mp.constants import ObjectiveSense
mdl.set_multi_objective(ObjectiveSense.Minimize, [total_salary_cost, total_overw, total_underw], priorities=[1, 0, 0])
```
For multi-objectives probles, solve is performed in the same way:
```
sol3 = mdl.solve(log_output=True) # solve the multi-objective problem
assert sol3, "Solve failed"
mdl.report()
```
In this example, one gets the same solution when blending all the objectives together (Step #3) or when decomposing the problems into two sub-problems, setting higher priority for cost minimization.
However, if one wants to optimize fairness first, and then cost, multi-objective solve returns a different solution:
```
mdl.set_multi_objective(ObjectiveSense.Minimize, [total_overw, total_underw, total_salary_cost], priorities=[1, 1, 0])
sol4 = mdl.solve(log_output=True) # solve the multi-objective problem
assert sol4, "Solve failed"
mdl.report()
```
## Summary
You learned how to set up and use IBM Decision Optimization CPLEX Modeling for Python to formulate a Mathematical Programming model and solve it with IBM Decision Optimization on Cloud.
You also learned how to setup your problems if different objective are considered for evaluating solutions.
## References
* [CPLEX Modeling for Python documentation](http://ibmdecisionoptimization.github.io/docplex-doc/)
* [Decision Optimization on Cloud](https://developer.ibm.com/docloud/)
* Need help with DOcplex or to report a bug? Please go [here](https://stackoverflow.com/questions/tagged/docplex).
* Contact us at dofeedback@wwpdl.vnet.ibm.com.
Copyright © 2017-2019 IBM. IPLA licensed Sample Materials.
| github_jupyter |
# 100 pandas puzzles
Inspired by [100 Numpy exerises](https://github.com/rougier/numpy-100), here are 100* short puzzles for testing your knowledge of [pandas'](http://pandas.pydata.org/) power.
Since pandas is a large library with many different specialist features and functions, these excercises focus mainly on the fundamentals of manipulating data (indexing, grouping, aggregating, cleaning), making use of the core DataFrame and Series objects.
Many of the excerises here are stright-forward in that the solutions require no more than a few lines of code (in pandas or NumPy... don't go using pure Python or Cython!). Choosing the right methods and following best practices is the underlying goal.
The exercises are loosely divided in sections. Each section has a difficulty rating; these ratings are subjective, of course, but should be a seen as a rough guide as to how inventive the required solution is.
If you're just starting out with pandas and you are looking for some other resources, the official documentation is very extensive. In particular, some good places get a broader overview of pandas are...
- [10 minutes to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html)
- [pandas basics](http://pandas.pydata.org/pandas-docs/stable/basics.html)
- [tutorials](http://pandas.pydata.org/pandas-docs/stable/tutorials.html)
- [cookbook and idioms](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#cookbook)
Enjoy the puzzles!
\* *the list of exercises is not yet complete! Pull requests or suggestions for additional exercises, corrections and improvements are welcomed.*
## Importing pandas
### Getting started and checking your pandas setup
Difficulty: *easy*
**1.** Import pandas under the alias `pd`.
```
import pandas as pd
```
**2.** Print the version of pandas that has been imported.
```
pd.__version__
```
**3.** Print out all the version information of the libraries that are required by the pandas library.
```
pd.show_versions()
```
## DataFrame basics
### A few of the fundamental routines for selecting, sorting, adding and aggregating data in DataFrames
Difficulty: *easy*
Note: remember to import numpy using:
```python
import numpy as np
```
Consider the following Python dictionary `data` and Python list `labels`:
``` python
data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
```
(This is just some meaningless data I made up with the theme of animals and trips to a vet.)
**4.** Create a DataFrame `df` from this dictionary `data` which has the index `labels`.
```
import numpy as np
data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(data, index=labels)
```
**5.** Display a summary of the basic information about this DataFrame and its data (*hint: there is a single method that can be called on the DataFrame*).
```
df.info()
# ...or...
df.describe()
```
**6.** Return the first 3 rows of the DataFrame `df`.
```
df.iloc[:3]
# or equivalently
df.head(3)
```
**7.** Select just the 'animal' and 'age' columns from the DataFrame `df`.
```
df.loc[:, ['animal', 'age']]
# or
df[['animal', 'age']]
```
**8.** Select the data in rows `[3, 4, 8]` *and* in columns `['animal', 'age']`.
```
df.loc[df.index[[3, 4, 8]], ['animal', 'age']]
```
**9.** Select only the rows where the number of visits is greater than 3.
```
df[df['visits'] > 3]
```
**10.** Select the rows where the age is missing, i.e. it is `NaN`.
```
df[df['age'].isnull()]
```
**11.** Select the rows where the animal is a cat *and* the age is less than 3.
```
df[(df['animal'] == 'cat') & (df['age'] < 3)]
```
**12.** Select the rows the age is between 2 and 4 (inclusive).
```
df[df['age'].between(2, 4)]
```
**13.** Change the age in row 'f' to 1.5.
```
df.loc['f', 'age'] = 1.5
```
**14.** Calculate the sum of all visits in `df` (i.e. the total number of visits).
```
df['visits'].sum()
```
**15.** Calculate the mean age for each different animal in `df`.
```
df.groupby('animal')['age'].mean()
```
**16.** Append a new row 'k' to `df` with your choice of values for each column. Then delete that row to return the original DataFrame.
```
df.loc['k'] = [5.5, 'dog', 'no', 2]
# and then deleting the new row...
df = df.drop('k')
```
**17.** Count the number of each type of animal in `df`.
```
df['animal'].value_counts()
```
**18.** Sort `df` first by the values in the 'age' in *decending* order, then by the value in the 'visits' column in *ascending* order (so row `i` should be first, and row `d` should be last).
```
df.sort_values(by=['age', 'visits'], ascending=[False, True])
```
**19.** The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values: 'yes' should be `True` and 'no' should be `False`.
```
df['priority'] = df['priority'].map({'yes': True, 'no': False})
```
**20.** In the 'animal' column, change the 'snake' entries to 'python'.
```
df['animal'] = df['animal'].replace('snake', 'python')
```
**21.** For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (*hint: use a pivot table*).
```
df.pivot_table(index='animal', columns='visits', values='age', aggfunc='mean')
```
## DataFrames: beyond the basics
### Slightly trickier: you may need to combine two or more methods to get the right answer
Difficulty: *medium*
The previous section was tour through some basic but essential DataFrame operations. Below are some ways that you might need to cut your data, but for which there is no single "out of the box" method.
**22.** You have a DataFrame `df` with a column 'A' of integers. For example:
```python
df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})
```
How do you filter out rows which contain the same integer as the row immediately above?
You should be left with a column containing the following values:
```python
1, 2, 3, 4, 5, 6, 7
```
```
df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})
df.loc[df['A'].shift() != df['A']]
# Alternatively, we could use drop_duplicates() here. Note
# that this removes *all* duplicates though, so it won't
# work as desired if A is [1, 1, 2, 2, 1, 1] for example.
df.drop_duplicates(subset='A')
```
**23.** Given a DataFrame of random numeric values:
```python
df = pd.DataFrame(np.random.random(size=(5, 3))) # this is a 5x3 DataFrame of float values
```
how do you subtract the row mean from each element in the row?
```
df = pd.DataFrame(np.random.random(size=(5, 3)))
df.sub(df.mean(axis=1), axis=0)
```
**24.** Suppose you have DataFrame with 10 columns of real numbers, for example:
```python
df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))
```
Which column of numbers has the smallest sum? Return that column's label.
```
df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))
df.sum().idxmin()
```
**25.** How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)?
```
df = pd.DataFrame(np.random.randint(0, 2, size=(10, 3)))
len(df) - df.duplicated(keep=False).sum()
# or perhaps more simply...
len(df.drop_duplicates(keep=False))
```
The next three puzzles are slightly harder.
**26.** In the cell below, you have a DataFrame `df` that consists of 10 columns of floating-point numbers. Exactly 5 entries in each row are NaN values.
For each row of the DataFrame, find the *column* which contains the *third* NaN value.
You should return a Series of column labels: `e, c, d, h, d`
```
nan = np.nan
data = [[0.04, nan, nan, 0.25, nan, 0.43, 0.71, 0.51, nan, nan],
[ nan, nan, nan, 0.04, 0.76, nan, nan, 0.67, 0.76, 0.16],
[ nan, nan, 0.5 , nan, 0.31, 0.4 , nan, nan, 0.24, 0.01],
[0.49, nan, nan, 0.62, 0.73, 0.26, 0.85, nan, nan, nan],
[ nan, nan, 0.41, nan, 0.05, nan, 0.61, nan, 0.48, 0.68]]
columns = list('abcdefghij')
df = pd.DataFrame(data, columns=columns)
(df.isnull().cumsum(axis=1) == 3).idxmax(axis=1)
```
**27.** A DataFrame has a column of groups 'grps' and and column of integer values 'vals':
```python
df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'),
'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})
```
For each *group*, find the sum of the three greatest values. You should end up with the answer as follows:
```
grps
a 409
b 156
c 345
```
```
df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'),
'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})
df.groupby('grps')['vals'].nlargest(3).sum(level=0)
```
**28.** The DataFrame `df` constructed below has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive).
For each group of 10 consecutive integers in 'A' (i.e. `(0, 10]`, `(10, 20]`, ...), calculate the sum of the corresponding values in column 'B'.
The answer should be a Series as follows:
```
A
(0, 10] 635
(10, 20] 360
(20, 30] 315
(30, 40] 306
(40, 50] 750
(50, 60] 284
(60, 70] 424
(70, 80] 526
(80, 90] 835
(90, 100] 852
```
```
df = pd.DataFrame(np.random.RandomState(8765).randint(1, 101, size=(100, 2)), columns = ["A", "B"])
df.groupby(pd.cut(df['A'], np.arange(0, 101, 10)))['B'].sum()
```
## DataFrames: harder problems
### These might require a bit of thinking outside the box...
...but all are solvable using just the usual pandas/NumPy methods (and so avoid using explicit `for` loops).
Difficulty: *hard*
**29.** Consider a DataFrame `df` where there is an integer column 'X':
```python
df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
```
For each value, count the difference back to the previous zero (or the start of the Series, whichever is closer). These values should therefore be
```
[1, 2, 0, 1, 2, 3, 4, 0, 1, 2]
```
Make this a new column 'Y'.
```
df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
izero = np.r_[-1, (df == 0).values.nonzero()[0]] # indices of zeros
idx = np.arange(len(df))
y = df['X'] != 0
df['Y'] = idx - izero[np.searchsorted(izero - 1, idx) - 1]
# http://stackoverflow.com/questions/30730981/how-to-count-distance-to-the-previous-zero-in-pandas-series/
# credit: Behzad Nouri
```
Here's an alternative approach based on a [cookbook recipe](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#grouping):
```
df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
x = (df['X'] != 0).cumsum()
y = x != x.shift()
df['Y'] = y.groupby((y != y.shift()).cumsum()).cumsum()
```
And another approach using a groupby operation:
```
df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
df['Y'] = df.groupby((df['X'] == 0).cumsum()).cumcount()
# We're off by one before we reach the first zero.
first_zero_idx = (df['X'] == 0).idxmax()
df['Y'].iloc[0:first_zero_idx] += 1
```
**30.** Consider the DataFrame constructed below which contains rows and columns of numerical data.
Create a list of the column-row index locations of the 3 largest values in this DataFrame. In this case, the answer should be:
```
[(5, 7), (6, 4), (2, 5)]
```
```
df = pd.DataFrame(np.random.RandomState(30).randint(1, 101, size=(8, 8)))
df.unstack().sort_values()[-3:].index.tolist()
# http://stackoverflow.com/questions/14941261/index-and-column-for-the-max-value-in-pandas-dataframe/
# credit: DSM
```
**31.** You are given the DataFrame below with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals'.
```python
df = pd.DataFrame({"vals": np.random.RandomState(31).randint(-30, 30, size=15),
"grps": np.random.RandomState(31).choice(["A", "B"], 15)})
```
Create a new column 'patched_values' which contains the same values as the 'vals' any negative values in 'vals' with the group mean:
```
vals grps patched_vals
0 -12 A 13.6
1 -7 B 28.0
2 -14 A 13.6
3 4 A 4.0
4 -7 A 13.6
5 28 B 28.0
6 -2 A 13.6
7 -1 A 13.6
8 8 A 8.0
9 -2 B 28.0
10 28 A 28.0
11 12 A 12.0
12 16 A 16.0
13 -24 A 13.6
14 -12 A 13.6
```
```
df = pd.DataFrame({"vals": np.random.RandomState(31).randint(-30, 30, size=15),
"grps": np.random.RandomState(31).choice(["A", "B"], 15)})
def replace(group):
mask = group<0
group[mask] = group[~mask].mean()
return group
df.groupby(['grps'])['vals'].transform(replace)
# http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means/
# credit: unutbu
```
**32.** Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame:
```python
>>> df = pd.DataFrame({'group': list('aabbabbbabab'),
'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]})
>>> df
group value
0 a 1.0
1 a 2.0
2 b 3.0
3 b NaN
4 a 2.0
5 b 3.0
6 b NaN
7 b 1.0
8 a 7.0
9 b 3.0
10 a NaN
11 b 8.0
```
The goal is to compute the Series:
```
0 1.000000
1 1.500000
2 3.000000
3 3.000000
4 1.666667
5 3.000000
6 3.000000
7 2.000000
8 3.666667
9 2.000000
10 4.500000
11 4.000000
```
E.g. the first window of size three for group 'b' has values 3.0, NaN and 3.0 and occurs at row index 5. Instead of being NaN the value in the new column at this row index should be 3.0 (just the two non-NaN values are used to compute the mean (3+3)/2)
```
df = pd.DataFrame({'group': list('aabbabbbabab'),
'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]})
g1 = df.groupby(['group'])['value'] # group values
g2 = df.fillna(0).groupby(['group'])['value'] # fillna, then group values
s = g2.rolling(3, min_periods=1).sum() / g1.rolling(3, min_periods=1).count() # compute means
s.reset_index(level=0, drop=True).sort_index() # drop/sort index
# http://stackoverflow.com/questions/36988123/pandas-groupby-and-rolling-apply-ignoring-nans/
```
## Series and DatetimeIndex
### Exercises for creating and manipulating Series with datetime data
Difficulty: *easy/medium*
pandas is fantastic for working with dates and times. These puzzles explore some of this functionality.
**33.** Create a DatetimeIndex that contains each business day of 2015 and use it to index a Series of random numbers. Let's call this Series `s`.
```
dti = pd.date_range(start='2015-01-01', end='2015-12-31', freq='B')
s = pd.Series(np.random.rand(len(dti)), index=dti)
s
```
**34.** Find the sum of the values in `s` for every Wednesday.
```
s[s.index.weekday == 2].sum()
```
**35.** For each calendar month in `s`, find the mean of values.
```
s.resample('M').mean()
```
**36.** For each group of four consecutive calendar months in `s`, find the date on which the highest value occurred.
```
s.groupby(pd.Grouper(freq='4M')).idxmax()
```
**37.** Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016.
```
pd.date_range('2015-01-01', '2016-12-31', freq='WOM-3THU')
```
## Cleaning Data
### Making a DataFrame easier to work with
Difficulty: *easy/medium*
It happens all the time: someone gives you data containing malformed strings, Python, lists and missing data. How do you tidy it up so you can get on with the analysis?
Take this monstrosity as the DataFrame to use in the following puzzles:
```python
df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm',
'Budapest_PaRis', 'Brussels_londOn'],
'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],
'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],
'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )',
'12. Air France', '"Swiss Air"']})
```
Formatted, it looks like this:
```
From_To FlightNumber RecentDelays Airline
0 LoNDon_paris 10045.0 [23, 47] KLM(!)
1 MAdrid_miLAN NaN [] <Air France> (12)
2 londON_StockhOlm 10065.0 [24, 43, 87] (British Airways. )
3 Budapest_PaRis NaN [13] 12. Air France
4 Brussels_londOn 10085.0 [67, 32] "Swiss Air"
```
(It's some flight data I made up; it's not meant to be accurate in any way.)
**38.** Some values in the the **FlightNumber** column are missing (they are `NaN`). These numbers are meant to increase by 10 with each row so 10055 and 10075 need to be put in place. Modify `df` to fill in these missing numbers and make the column an integer column (instead of a float column).
```
df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm',
'Budapest_PaRis', 'Brussels_londOn'],
'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],
'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],
'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )',
'12. Air France', '"Swiss Air"']})
df['FlightNumber'] = df['FlightNumber'].interpolate().astype(int)
df
```
**39.** The **From\_To** column would be better as two separate columns! Split each string on the underscore delimiter `_` to give a new temporary DataFrame called 'temp' with the correct values. Assign the correct column names 'From' and 'To' to this temporary DataFrame.
```
temp = df.From_To.str.split('_', expand=True)
temp.columns = ['From', 'To']
temp
```
**40.** Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame 'temp'. Standardise the strings so that only the first letter is uppercase (e.g. "londON" should become "London".)
```
temp['From'] = temp['From'].str.capitalize()
temp['To'] = temp['To'].str.capitalize()
temp
```
**41.** Delete the From_To column from **41.** Delete the **From_To** column from `df` and attach the temporary DataFrame 'temp' from the previous questions.`df` and attach the temporary DataFrame from the previous questions.
```
df = df.drop('From_To', axis=1)
df = df.join(temp)
df
```
**42**. In the **Airline** column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. `'(British Airways. )'` should become `'British Airways'`.
```
df['Airline'] = df['Airline'].str.extract('([a-zA-Z\s]+)', expand=False).str.strip()
# note: using .strip() gets rid of any leading/trailing spaces
df
```
**43**. In the **RecentDelays** column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN.
Expand the Series of lists into a new DataFrame named 'delays', rename the columns 'delay_1', 'delay_2', etc. and replace the unwanted RecentDelays column in `df` with 'delays'.
```
# there are several ways to do this, but the following approach is possibly the simplest
delays = df['RecentDelays'].apply(pd.Series)
delays.columns = ['delay_{}'.format(n) for n in range(1, len(delays.columns)+1)]
df = df.drop('RecentDelays', axis=1).join(delays)
df
```
The DataFrame should look much better now:
```
FlightNumber Airline From To delay_1 delay_2 delay_3
0 10045 KLM London Paris 23.0 47.0 NaN
1 10055 Air France Madrid Milan NaN NaN NaN
2 10065 British Airways London Stockholm 24.0 43.0 87.0
3 10075 Air France Budapest Paris 13.0 NaN NaN
4 10085 Swiss Air Brussels London 67.0 32.0 NaN
```
## Using MultiIndexes
### Go beyond flat DataFrames with additional index levels
Difficulty: *medium*
Previous exercises have seen us analysing data from DataFrames equipped with a single index level. However, pandas also gives you the possibilty of indexing your data using *multiple* levels. This is very much like adding new dimensions to a Series or a DataFrame. For example, a Series is 1D, but by using a MultiIndex with 2 levels we gain of much the same functionality as a 2D DataFrame.
The set of puzzles below explores how you might use multiple index levels to enhance data analysis.
To warm up, we'll look make a Series with two index levels.
**44**. Given the lists `letters = ['A', 'B', 'C']` and `numbers = list(range(10))`, construct a MultiIndex object from the product of the two lists. Use it to index a Series of random numbers. Call this Series `s`.
```
letters = ['A', 'B', 'C']
numbers = list(range(10))
mi = pd.MultiIndex.from_product([letters, numbers])
s = pd.Series(np.random.rand(30), index=mi)
s
```
**45.** Check the index of `s` is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex).
```
s.index.is_lexsorted()
# or more verbosely...
s.index.lexsort_depth == s.index.nlevels
```
**46**. Select the labels `1`, `3` and `6` from the second level of the MultiIndexed Series.
```
s.loc[:, [1, 3, 6]]
```
**47**. Slice the Series `s`; slice up to label 'B' for the first level and from label 5 onwards for the second level.
```
s.loc[pd.IndexSlice[:'B', 5:]]
# or equivalently without IndexSlice...
s.loc[slice(None, 'B'), slice(5, None)]
```
**48**. Sum the values in `s` for each label in the first level (you should have Series giving you a total for labels A, B and C).
```
s.sum(level=0)
```
**49**. Suppose that `sum()` (and other methods) did not accept a `level` keyword argument. How else could you perform the equivalent of `s.sum(level=1)`?
```
# One way is to use .unstack()...
# This method should convince you that s is essentially just a regular DataFrame in disguise!
s.unstack().sum(axis=0)
```
**50**. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it.
```
new_s = s.swaplevel(0, 1)
if not new_s.index.is_lexsorted():
new_s = new_s.sort_index()
new_s
```
## Minesweeper
### Generate the numbers for safe squares in a Minesweeper grid
Difficulty: *medium* to *hard*
If you've ever used an older version of Windows, there's a good chance you've played with Minesweeper:
- https://en.wikipedia.org/wiki/Minesweeper_(video_game)
If you're not familiar with the game, imagine a grid of squares: some of these squares conceal a mine. If you click on a mine, you lose instantly. If you click on a safe square, you reveal a number telling you how many mines are found in the squares that are immediately adjacent. The aim of the game is to uncover all squares in the grid that do not contain a mine.
In this section, we'll make a DataFrame that contains the necessary data for a game of Minesweeper: coordinates of the squares, whether the square contains a mine and the number of mines found on adjacent squares.
**51**. Let's suppose we're playing Minesweeper on a 5 by 4 grid, i.e.
```
X = 5
Y = 4
```
To begin, generate a DataFrame `df` with two columns, `'x'` and `'y'` containing every coordinate for this grid. That is, the DataFrame should start:
```
x y
0 0 0
1 0 1
2 0 2
...
```
```
X = 5
Y = 4
p = pd.core.reshape.util.cartesian_product([np.arange(X), np.arange(Y)])
df = pd.DataFrame(np.asarray(p).T, columns=['x', 'y'])
df
```
**52**. For this DataFrame `df`, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4.
```
# One way is to draw samples from a binomial distribution.
df['mine'] = np.random.binomial(1, 0.4, X*Y)
df
```
**53**. Now create a new column for this DataFrame called `'adjacent'`. This column should contain the number of mines found on adjacent squares in the grid.
(E.g. for the first row, which is the entry for the coordinate `(0, 0)`, count how many mines are found on the coordinates `(0, 1)`, `(1, 0)` and `(1, 1)`.)
```
# Here is one way to solve using merges.
# It's not necessary the optimal way, just
# the solution I thought of first...
df['adjacent'] = \
df.merge(df + [ 1, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 1, -1, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, -1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 1, 0, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, 0, 0], on=['x', 'y'], how='left')\
.merge(df + [ 0, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 0, -1, 0], on=['x', 'y'], how='left')\
.iloc[:, 3:]\
.sum(axis=1)
# An alternative solution is to pivot the DataFrame
# to form the "actual" grid of mines and use convolution.
# See https://github.com/jakevdp/matplotlib_pydata2013/blob/master/examples/minesweeper.py
from scipy.signal import convolve2d
mine_grid = df.pivot_table(columns='x', index='y', values='mine')
counts = convolve2d(mine_grid.astype(complex), np.ones((3, 3)), mode='same').real.astype(int)
df['adjacent'] = (counts - mine_grid).ravel('F')
```
**54**. For rows of the DataFrame that contain a mine, set the value in the `'adjacent'` column to NaN.
```
df.loc[df['mine'] == 1, 'adjacent'] = np.nan
```
**55**. Finally, convert the DataFrame to grid of the adjacent mine counts: columns are the `x` coordinate, rows are the `y` coordinate.
```
df.drop('mine', axis=1).set_index(['y', 'x']).unstack()
```
## Plotting
### Visualize trends and patterns in data
Difficulty: *medium*
To really get a good understanding of the data contained in your DataFrame, it is often essential to create plots: if you're lucky, trends and anomalies will jump right out at you. This functionality is baked into pandas and the puzzles below explore some of what's possible with the library.
**56.** Pandas is highly integrated with the plotting library matplotlib, and makes plotting DataFrames very user-friendly! Plotting in a notebook environment usually makes use of the following boilerplate:
```python
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
```
matplotlib is the plotting library which pandas' plotting functionality is built upon, and it is usually aliased to ```plt```.
```%matplotlib inline``` tells the notebook to show plots inline, instead of creating them in a separate window.
```plt.style.use('ggplot')``` is a style theme that most people find agreeable, based upon the styling of R's ggplot package.
For starters, make a scatter plot of this random data, but use black X's instead of the default markers.
```df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})```
Consult the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) if you get stuck!
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})
df.plot.scatter("xs", "ys", color = "black", marker = "x")
```
**57.** Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame.
(Hint: If you're having trouble seeing the plot, try multiplying the Series which you choose to represent size by 10 or more)
*The chart doesn't have to be pretty: this isn't a course in data viz!*
```
df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9],
"hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2],
"happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3],
"caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})
```
```
df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9],
"hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2],
"happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3],
"caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})
df.plot.scatter("hours_in", "productivity", s = df.happiness * 30, c = df.caffienated)
```
**58.** What if we want to plot multiple things? Pandas allows you to pass in a matplotlib *Axis* object for plots, and plots will also return an Axis object.
Make a bar plot of monthly revenue with a line plot of monthly advertising spending (numbers in millions)
```
df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52],
"advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],
"month":range(12)
})
```
```
df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52],
"advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],
"month":range(12)
})
ax = df.plot.bar("month", "revenue", color = "green")
df.plot.line("month", "advertising", secondary_y = True, ax = ax)
ax.set_xlim((-1,12))
```
Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the "candle" (the thick part of the bar) is green if the stock closed above its opening price, or red if below.

This was initially designed to be a pandas plotting challenge, but it just so happens that this type of plot is just not feasible using pandas' methods. If you are unfamiliar with matplotlib, we have provided a function that will plot the chart for you so long as you can use pandas to get the data into the correct format.
Your first step should be to get the data in the correct format using pandas' time-series grouping function. We would like each candle to represent an hour's worth of data. You can write your own aggregation function which returns the open/high/low/close, but pandas has a built-in which also does this.
The below cell contains helper functions. Call ```day_stock_data()``` to generate a DataFrame containing the prices a hypothetical stock sold for, and the time the sale occurred. Call ```plot_candlestick(df)``` on your properly aggregated and formatted stock data to print the candlestick chart.
```
#This function is designed to create semi-interesting random stock price data
import numpy as np
def float_to_time(x):
return str(int(x)) + ":" + str(int(x%1 * 60)).zfill(2) + ":" + str(int(x*60 % 1 * 60)).zfill(2)
def day_stock_data():
#NYSE is open from 9:30 to 4:00
time = 9.5
price = 100
results = [(float_to_time(time), price)]
while time < 16:
elapsed = np.random.exponential(.001)
time += elapsed
if time > 16:
break
price_diff = np.random.uniform(.999, 1.001)
price *= price_diff
results.append((float_to_time(time), price))
df = pd.DataFrame(results, columns = ['time','price'])
df.time = pd.to_datetime(df.time)
return df
def plot_candlestick(agg):
fig, ax = plt.subplots()
for time in agg.index:
ax.plot([time.hour] * 2, agg.loc[time, ["high","low"]].values, color = "black")
ax.plot([time.hour] * 2, agg.loc[time, ["open","close"]].values, color = agg.loc[time, "color"], linewidth = 10)
ax.set_xlim((8,16))
ax.set_ylabel("Price")
ax.set_xlabel("Hour")
ax.set_title("OHLC of Stock Value During Trading Day")
plt.show()
```
**59.** Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices
```
df = day_stock_data()
df.head()
df.set_index("time", inplace = True)
agg = df.resample("H").ohlc()
agg.columns = agg.columns.droplevel()
agg["color"] = (agg.close > agg.open).map({True:"green",False:"red"})
agg.head()
```
**60.** Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the ```plot_candlestick(df)``` function above, or matplotlib's [```plot``` documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html) if you get stuck.
```
plot_candlestick(agg)
```
*More exercises to follow soon...*
| github_jupyter |
# 循环
- 循环是一种控制语句块重复执行的结构
- while 适用于广度遍历
- for 开发中经常使用
## while 循环
- 当一个条件保持真的时候while循环重复执行语句
- while 循环一定要有结束条件,否则很容易进入死循环
- while 循环的语法是:
while loop-contunuation-conndition:
Statement
## 示例:
sum = 0
i = 1
while i <10:
sum = sum + i
i = i + 1
```
i = 0
while i<10:
print('天真好')
i+=1
import os
i = 0
whiel 1:
os.system('calc')#别执行
```
## 错误示例:
sum = 0
i = 1
while i <10:
sum = sum + i
i = i + 1
- 一旦进入死循环可按 Ctrl + c 停止
## EP:


```
#max = 5,number = 0
```
# 验证码
- 随机产生四个字母的验证码,如果正确,输出验证码正确。如果错误,产生新的验证码,用户重新输入。
- 验证码只能输入三次,如果三次都错,返回“别爬了,我们小网站没什么好爬的”
- 密码登录,如果三次错误,账号被锁定
```
while 1:
import random
abcd = random.randint(1000,9999)
print(abcd)
y = eval(input('请随机输入四个数字验证码'))
if y == abcd:
print('验证码输入正确')
break
print('~~~~')#如果break了,则break之后的代码都不运行。
else:
print('验证码输入错误')
while True:
import random
n1 = chr(random.randint(65,90))
n2 = chr(random.randint(97,122))
n3 = chr(random.randint(65,90))
n4 = chr(random.randint(97,122))
N = n1 + n2 + n3 + n4
print('验证码是:',N)
me = input('请输入验证码')
if me == N:
print('验证码正确')
break
else:
print('验证码不正确')
while True:
import random
n1 = chr(random.randint(65,90))
n2 = chr(random.randint(97,122))
n3 = chr(random.randint(65,90))
n4 = chr(random.randint(97,122))
shuffle_n = [n1,n2,n3,n4]
random.shuffle(shuffle_n)
N = "".join(shuffle_n)
print('验证码是:',N)
me = input('请输入验证码')
if me.lower() == N.lower():
print('验证码正确')
break
else:
print('验证码不正确')
while True:
import random
n1 = chr(random.randint(65,90))
n2 = chr(random.randint(97,122))
n3 = chr(random.randint(65,90))
n4 = chr(random.randint(97,122))
choose = random.randint(0,1)
list_ = [[65,90],[97,122]]
range_ = list_[choose]
for i in range(4):
print(i)#可以不要
N += chr(random.randint((range_)[0],range_[1]))
shuffle_n = [n1,n2,n3,n4]
random.shuffle(shuffle_n)
N = "".join(shuffle_n)
print('验证码是:',N)
me = input('请输入验证码')
if me.lower() == N.lower():
print('验证码正确')
break
else:
print('验证码不正确')
N = ""
N +='a'
print(N)
a = "j"
a.upper()
a = "j"
a.lower()
a = "j12"
a.upper()
a = {1,2,3,4,5,6}
random.shuffle(a)
a
```
## 尝试死循环
## 实例研究:猜数字
- 你将要编写一个能够随机生成一个0到10之间的且包括两者的数字程序,这个程序
- 提示用户连续地输入数字直到正确,且提示用户输入的数字是过高还是过低
```
while 1:
import random
N1 = random.randint(0,10)
N2 = random.randint(0,10)
input_ = eval(input('sum'))
if N1 + N2 == input_:
print('correct!!')
break
```
## 使用哨兵值来控制循环
- 哨兵值来表明输入的结束
- 
## 警告

## for 循环
- Python的for 循环通过一个序列中的每个值来进行迭代
- range(a,b,k), a,b,k 必须为整数
- a: start
- b: end
- k: step
- 注意for 是循环一切可迭代对象,而不是只能使用range
```
for i in range (10):#[]步长range(start, stop[, step]) -> range object
print(i)
for i in range (1,10,2):
print(i)
range(10),_iter_()
for i in range (0,10,2):#[]步长range(start, stop[, step]) -> range object
print(i)
for i in range (10,0,-1):#[]步长range(start, stop[, step]) -> range object
print(i)
b = 'ab'
a,__iter__()
#range控制次数
```
# 在Python里面一切皆对象
## EP:
- 
```
i = 1
sum_ = 0
while sum_ < 10000:
sum_ = sum_ + i
i += 1
print(i)
print(sum_)
sum_ = 0
for i in range(10000):
sum_ += i
if sum_ > 10000:
break
print(sum_)
print(i)
i = 0#sum是保留字符
sum_ = 0
while sum_<1001:
sum_ += 1
print(sum_)
sum_ = 0
for i in range(1001):
sum_ = sum_ +
```
## 嵌套循环
- 一个循环可以嵌套另一个循环
- 每次循环外层时,内层循环都会被刷新重新完成循环
- 也就是说,大循环执行一次,小循环会全部执行一次
- 注意:
> - 多层循环非常耗时
- 最多使用3层循环
## EP:
- 使用多层循环完成9X9乘法表
- 显示50以内所有的素数
```
for i in range(1,10):
for j in range(1,i+1):
print('%s*%s=%-2s'%(j,i,i*j),end = ' ')
print()#print一个换行
import random
number = eval(input('请输入一个数'))
if num > 1:
for i in range(2,50):
if i == 2:
print(i,'是素数')
else:
```
## 关键字 break 和 continue
- break 跳出循环,终止循环
- continue 跳出此次循环,继续执行
```
for i in range(10):
for j in range(10):
for k in range(10):
print(i,j,)
for i in range(10):
if i == 5:
break
print(i)
for i in range(5):
for j in range(5):
if j == 3:
print(i,j)
break
for i in range(3):
for j in range(3):
print(i,j)
if j == 2:
continue
```
## 注意


# Homework
- 1

```
integer_sum = 0
positive_numbers = 0
minus_numbers = 0
temp = 1
while temp == 1:
integer = int(input())
if integer > 0 :
positive_numbers += 1
elif integer < 0:
minus_numbers += 1
elif integer == 0:
break
integer_sum += integer
print('正数个数:',positive_numbers)
print('负数个数',minus_numbers)
print('总和',integer_sum)
print('平均数',float(integer_sum / (positive_numbers + minus_numbers)))
import random
N = eval(input('请输入一个整数'))
for i in range()
```
- 2

```
a = eval(input('输入今年的学费:'))
b = 1 + 0.05
c = a * b
d = c * b
e = d * b
f = e * b
g = f * b
h = g * b
i = h * b
j = i * b
k = j * b
l = k * b
print('第十年是',l,'美元')
sum_ = c+d+e+f+g+h+i+j+k+l
print('十年的总学费是',sum_,'美元')
i= 1
b = 10000
sum_ = 0
for i in range(10):
sum_ = (b * 0.05) + b
b = sum_
i += 1
print(sum_)
```
- 3

- 4

```
for i in range(99,1001):
if i%30==0:
print(i,end = " ")
```
- 5

```
import math
min_n = 0
max_n = 0
while(min_n**2)<12000:
min_n+=1
while max_n**3<12000:
max_n+=1
max_n-=1
print(min_n,max_n)
```
- 6

- 7

```
import math
s = 0
for i in range(500,0,-1):
s+=(1/i)
print(s)
```
- 8

```
import math
s = 0
for i in range(99,2,-2):
s+=((i-2)/i)
print(s)
sum = 0
for i in range(3,100,2):
sum += (i-2)/i
print(sum)
```
- 9

```
import math
for i in range(10000,100001,10000):
p = 0
for j in range(i,0,-1):
p+=((-1)**(j+1))/(2*j-1)
print(4*p)
```
- 10

```
l = [ ]
for n in range (1,10000):
for a in range (1,n):
if n%a ==0:
l.append(a)
if sum(l)==n:
print (l)
print (n)
l = []
for i in range(1,1000):
sum_ = 0
for a in range(1,i):
if i%a == 0:
sum_ += a
if sum_ == i:
print(i)
```
- 11

```
a = 0
for i in range (1,7):
for j in range(1,7):
a += 1
print (i,j)
print('组合总个数:',a)
```
- 12

```
a = eval(input('输入1个数:'))
b = eval(input('输入1个数:'))
c = eval(input('输入1个数:'))
d = eval(input('输入1个数:'))
e = eval(input('输入1个数:'))
f = eval(input('输入1个数:'))
g = eval(input('输入1个数:'))
h = eval(input('输入1个数:'))
i = eval(input('输入1个数:'))
j = eval(input('输入1个数:'))
m = ((a+b+c+d+e+f+g+h+i+j)/10)
d = pow((((a-m)**2+(b-m)**2+(c-m)**2+(d-m)**2+(e-m)**2+(f-m)**2+(g-m)**2+(h-m)**2+(i-m)**2+(j-m)**2)/9),1/2)
print('The mean is',m,'The standard deviation is',d)
import math
lis = []
for i in range(10):
lis.append(float(input()))
s = 0
avg = sum(lis)/10
for i in lis:
s+=(i-avg)**2
print(avg)
print(math.sqrt(s/10))
```
| github_jupyter |
# dask-sql
### A SQL Query Layer for Dask
## Introduction
`dask-sql` adds a SQL query layer on top of the Dask distributed Python library, which allows you to query your big and small data with SQL and still use the great power of the Dask ecosystem.
It helps you combine the best of both worlds.
See the [documentation](https://dask-sql.readthedocs.io/) for more information.
## Starting dask-sql
There are two possibilities how you can send your SQL queries to `dask-sql`:
* you use a Python notebook/script, such as the one you have currently opened
* you run the [dask-sql Server](https://dask-sql.readthedocs.io/en/latest/pages/server.html) as a standalone application and connect to it via e.g. your BI tool
We will stick with the first possibility in this notebook, but all SQL commands shown here can also be run via the SQL server.
Before we start, we need do import `dask-sql` and create a `Context`, which collects all the information on the currently registered data tables.
We will also create a small local Dask cluster (this step is not needed, but gives us a bit more debugging options).
If you have a large computation cluster, you can connect to it in this step (have a look [here](https://docs.dask.org/en/latest/setup.html)).
```
from dask_sql import Context
from dask.distributed import Client
client = Client()
c = Context()
client
```
You are now ready to query with SQL!
```
c.sql("""
SELECT 42 AS "the answer"
""", return_futures=False)
```
Some shortcut for the following:
```
c.ipython_magic(auto_include=True)
```
This line allows us to write (instead of the line above)
```
%%sql
SELECT 42 AS "the answer"
```
## Data Input
### 1. From a Dask Dataframe via Python
```
import dask.dataframe as dd
df = dd.read_csv("./iris.csv")
df.head(10)
c.create_table("iris", df)
```
### 2. From an external data source via SQL
```
%%sql
CREATE OR REPLACE TABLE iris
WITH (
location = 'file://./iris.csv',
format = 'csv',
persist = True
)
```
* s3, azure, dbfs (new!), gs, hdfs, ...
* hive (experimental), databricks (experimental), intake
* already loaded data persisted in your Dask cluster
More [information](https://dask-sql.readthedocs.io/en/latest/pages/data_input.html)
### 3. As materialized Queries
```
%%sql
CREATE OR REPLACE TABLE second_iris
AS SELECT * FROM iris
```
### 4. From the notebook
As we have created an ipython magic with `c.ipython_magic(auto_include=True)` we can even just reference any dataframe created in the notebook in our queries.
```
my_data_frame = dd.read_csv("./iris.csv")
%%sql
SELECT * FROM my_data_frame LIMIT 10
```
Please note that using this setting will automatically override any predefined tables with the same name.
## Metadata Information
```
%%sql
SHOW TABLES FROM "schema"
%%sql
SHOW COLUMNS FROM "iris"
```
## Data Query
You can call "normal" SQL `SELECT` statements in `dask-sql`, with all typical components from the standard SQL language.
More information in the [SQL reference](https://dask-sql.readthedocs.io/en/latest/pages/sql.html).
`dask-sql` roughly follows the prestoSQL conventions (e.g. quoting).
<div class="alert alert-info">
#### Note
Not all SQL operators are implemented in `dask-sql` already.
</div>
```
%%sql
SELECT *
FROM iris
LIMIT 10
%%sql
SELECT
sepal_length + sepal_width AS "sum",
SIN(petal_length) AS "sin"
FROM iris
LIMIT 10
%%sql
SELECT
species,
AVG(sepal_length) AS sepal_length,
AVG(sepal_width) AS sepal_width
FROM iris
GROUP BY species
LIMIT 10
%%sql
WITH maximal_values AS (
SELECT
species,
MAX(sepal_length) AS sepal_length
FROM iris
GROUP BY species
)
SELECT lhs.*
FROM iris AS lhs
JOIN maximal_values AS rhs ON lhs.species = rhs.species AND lhs.sepal_length = rhs.sepal_length
print(c.explain("""
WITH maximal_values AS (
SELECT
species,
MAX(sepal_length) AS sepal_length
FROM iris
GROUP BY species
)
SELECT
lhs.*
FROM iris AS lhs
JOIN maximal_values AS rhs
ON lhs.species = rhs.species
AND lhs.sepal_length = rhs.sepal_length
"""))
```
## Custom Functions
```
import numpy as np
def volume(length, width):
return (width / 2) ** 2 * np.pi * length
# As SQL is a typed language, we need to specify all types
c.register_function(volume, "IRIS_VOLUME",
parameters=[("length", np.float64), ("width", np.float64)],
return_type=np.float64)
%%sql
SELECT
sepal_length, sepal_width, IRIS_VOLUME(sepal_length, sepal_width) AS volume
FROM iris
LIMIT 10
```
## Machine Learning
```
df.species.head(100)
%%sql
CREATE OR REPLACE TABLE enriched_iris AS (
SELECT
sepal_length, sepal_width, petal_length, petal_width,
CASE
WHEN species = 'setosa' THEN 0 ELSE CASE
WHEN species = 'versicolor' THEN 1
ELSE 2
END END AS "species",
IRIS_VOLUME(sepal_length, sepal_width) AS volume
FROM iris
)
%%sql
CREATE OR REPLACE TABLE training_data AS (
SELECT
*
FROM enriched_iris
TABLESAMPLE BERNOULLI (50)
)
%%sql
SELECT * FROM training_data
%%sql
CREATE OR REPLACE MODEL my_model WITH (
model_class = 'dask_ml.xgboost.XGBClassifier',
target_column = 'species',
num_class = 3
) AS (
SELECT * FROM training_data
)
%%sql
SELECT
*
FROM PREDICT(
MODEL my_model,
SELECT * FROM enriched_iris
)
%%sql
CREATE OR REPLACE TABLE results AS
SELECT
*
FROM PREDICT(
MODEL my_model,
TABLE enriched_iris
)
%%sql
SELECT
target,
species,
COUNT(*)
FROM
results
GROUP BY target, species
t = c.sql("""
SELECT
target,
species,
COUNT(*) AS "number"
FROM
results
GROUP BY target, species
""").compute()
t.set_index(["target", "species"]).unstack("species").number.plot.bar()
```
| github_jupyter |
# Understanding Tree SHAP for Simple Models
The SHAP value for a feature is the average change in model output by conditioning on that feature when introducing features one at a time over all feature orderings. While this is easy to state, it is challenging to compute. So this notebook is meant to give a few simple examples where we can see how this plays out for very small trees. For arbitrary large trees it is very hard to intuitively guess these values by looking at the tree.
```
import sklearn
import shap
import numpy as np
import graphviz
```
## Single split example
```
# build data
N = 100
M = 4
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:N//2, 0] = 1
y[:N//2] = 1
# fit model
single_split_model = sklearn.tree.DecisionTreeRegressor(max_depth=1)
single_split_model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(single_split_model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
#### Explain the model
Note that the bias term is the expected output of the model over the training dataset (0.5). The SHAP value for features not used in the model is always 0, while for $x_0$ it is just the difference between the expected value and the output of the model.
```
xs = [np.ones(M), np.zeros(M)]
for x in xs:
print()
print(" x =", x)
print("shap_values =", shap.TreeExplainer(single_split_model).shap_values(x))
```
## Two feature AND example
```
# build data
N = 100
M = 4
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:1 * N//4, 1] = 1
X[:N//2, 0] = 1
X[N//2:3 * N//4, 1] = 1
y[:1 * N//4] = 1
# fit model
and_model = sklearn.tree.DecisionTreeRegressor(max_depth=2)
and_model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(and_model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
#### Explain the model
Note that the bias term is the expected output of the model over the training dataset (0.25). The SHAP value for features not used in the model is always 0, while for $x_0$ and $x_1$ it is just the difference between the expected value and the output of the model split equally between them (since they equally contribute to the AND function).
```
xs = [np.ones(M), np.zeros(M)]
for x in xs:
print()
print(" x =", x)
print("shap_values =", shap.TreeExplainer(and_model).shap_values(x))
```
## Two feature OR example
```
# build data
N = 100
M = 4
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:N//2, 0] = 1
X[:1 * N//4, 1] = 1
X[N//2:3 * N//4, 1] = 1
y[:N//2] = 1
y[N//2:3 * N//4] = 1
# fit model
or_model = sklearn.tree.DecisionTreeRegressor(max_depth=2)
or_model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(or_model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
#### Explain the model
Note that the bias term is the expected output of the model over the training dataset (0.75). The SHAP value for features not used in the model is always 0, while for $x_0$ and $x_1$ it is just the difference between the expected value and the output of the model split equally between them (since they equally contribute to the OR function).
```
xs = [np.ones(M), np.zeros(M)]
for x in xs:
print()
print(" x =", x)
print("shap_values =", shap.TreeExplainer(or_model).shap_values(x))
```
## Two feature XOR example
```
# build data
N = 100
M = 4
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:N//2, 0] = 1
X[:1 * N//4, 1] = 1
X[N//2:3 * N//4, 1] = 1
y[1 * N//4:N//2] = 1
y[N//2:3 * N//4] = 1
# fit model
xor_model = sklearn.tree.DecisionTreeRegressor(max_depth=2)
xor_model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(xor_model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
#### Explain the model
Note that the bias term is the expected output of the model over the training dataset (0.5). The SHAP value for features not used in the model is always 0, while for $x_0$ and $x_1$ it is just the difference between the expected value and the output of the model split equally between them (since they equally contribute to the XOR function).
```
xs = [np.ones(M), np.zeros(M)]
for x in xs:
print()
print(" x =", x)
print("shap_values =", shap.TreeExplainer(xor_model).shap_values(x))
```
## Two feature AND + feature boost example
```
# build data
N = 100
M = 4
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:N//2, 0] = 1
X[:1 * N//4, 1] = 1
X[N//2:3 * N//4, 1] = 1
y[:1 * N//4] = 1
y[:N//2] += 1
# fit model
and_fb_model = sklearn.tree.DecisionTreeRegressor(max_depth=2)
and_fb_model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(and_fb_model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
#### Explain the model
Note that the bias term is the expected output of the model over the training dataset (0.75). The SHAP value for features not used in the model is always 0, while for $x_0$ and $x_1$ it is just the difference between the expected value and the output of the model split equally between them (since they equally contribute to the AND function), plus an extra 0.5 impact for $x_0$ since it has an effect of $1.0$ all by itself (+0.5 if it is on and -0.5 if it is off).
```
xs = [np.ones(M), np.zeros(M)]
for x in xs:
print()
print(" x =", x)
print("shap_values =", shap.TreeExplainer(and_fb_model).shap_values(x))
```
| github_jupyter |
# The PICO model
## based on Reese et al (2018): "Antarctic sub-shelf melt rates via PICO"
In part (a) we test a few idealized geometries, in part (b) realistic geometries are presented.
There are a few differences to the original implementation w.r.t to real geometries.
- underlying datasets: we use the BedMachine2 data
- model resolution: we use the BedMachine native grid at 500 m grid spacing, whereas PICO uses 5 km
## Favier's implementation
compare the PICO Model Box Model (BM) to simple parametrization (M), and Plume Model (PME)
- use two constant depths for "ambient" temperatures: 500 m or 700 m
- use 2, 5, or 10 boxes
- avoid pressure dependence of melting becuase it introduces an energetic inconsistency -> uniform melting in boxes
```
import sys
import numpy as np
import xarray as xr
import pandas as pd
import warnings
import geopandas
import matplotlib
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
sys.path.append("..")
# matplotlib.rc_file('../rc_file')
%matplotlib inline
%config InlineBackend.print_figure_kwargs={'bbox_inches':None}
%load_ext autoreload
%autoreload 2
warnings.filterwarnings("ignore", category=matplotlib.MatplotlibDeprecationWarning)
from real_geometry import RealGeometry, glaciers
from PICO import PicoModel, table2
from compare_models import compare_PICO
```
## (a) idealized geometries
```
f, ax = plt.subplots(5,3, figsize=(12,12), sharey='row', constrained_layout=True)
for i, testcase in enumerate(['test1', 'test2', 'test3']):
geo, ds = PicoModel(name=testcase).compute_pico()
geo.draft.plot(ax=ax[0,i])
ax[0,i].set_title(testcase)
ds.melt.plot(ax=ax[1,i])
ds.mk.plot(ax=ax[2,i])
ds.Tk.plot(ax=ax[3,i])
ds.Sk.plot(ax=ax[4,i])
```
These are test geometries, the `test1` is a quasi-1D iceshelf of 100 km length with a grounding line depth of 1000 m and an ice shelf front depth of 500 m. `test2` is simply a rotated version of `test1`. `test3` has a sinusoidal grounding line profile and a flat ice shelf front profile. The geometries (arbitrarily) have 3 boxes. `boxnr=0` represents either the average (for melt) or the ambient conditions (temperature and salinity).
The melt is highest near the grounding line in part because in-situ temperatures are highest there. Both temperature and salinity decrease as the plume ascends towards the ice shelf front.
## (b) real geometries
At first execution, the code creates the real geometries from the BedMachine data and IceVelocity data (these files are too big for version control on Github, but see lines 26f in `real_geometries.py` for their location).
### example: Thwaites glacier
```
geo, ds = PicoModel('Thwaites').compute_pico()
f, ax = plt.subplots(1,4, figsize=(20,4), sharey=True)
geo.draft.plot(ax=ax[0])
geo.rd.plot(ax=ax[1])
geo.box.plot(ax=ax[2])
ds.melt.plot(ax=ax[3])
```
### comparing the 6 currently implemented ice shelves
```
for i, glacier in enumerate(glaciers):
if glacier in ['Ross', 'FilchnerRonne']: # at the BedMachine resolution, these datasets are too big for laptop memory
continue
PicoModel(glacier).compute_pico()
compare_PICO()
```
### maps of Amundsen Sea and East Antarctica
```
proj = ccrs.SouthPolarStereo(true_scale_latitude=-71)
def fn_poly(glacier): return f'../../data/mask_polygons/{glacier}_polygon.geojson'
x5, y5, _, _ = geopandas.read_file(fn_poly('MoscowUniversity'), crs='espg:3031').total_bounds
_, _, x6, y6 = geopandas.read_file(fn_poly('Totten') , crs='espg:3031').total_bounds
x3, _, _, y4 = geopandas.read_file(fn_poly('PineIsland') , crs='espg:3031').total_bounds
_, y3, x4, _ = geopandas.read_file(fn_poly('Dotson') , crs='espg:3031').total_bounds
import matplotlib.ticker as mticker
f = plt.figure(figsize=(8,12))
for i in range(2): # Amundsen Sea, Totten+MoscowUniversity
(x1,x2,y1,y2) = [(x3,x4,y3-1e4,y4+2e4),(x5-1e4,x6,y5,y6+1e4)][i]
shelves = [['PineIsland','Thwaites','Dotson'], ['Totten','MoscowUniversity']][i]
for s, shelf in enumerate(shelves):
(x,y) = [[(.65,.88),(.05,.55),(.05,.2)],[(.3,.8),(.4,.1)]][i][s]
name = [['Pine\nIsland','Thwaites','Dotson/\nCrosson'], ['Totten','Moscow\nUniversity']][i][s]
dsg = xr.open_dataset(RealGeometry(shelf).fn_PICO)
dsP = xr.open_dataset(PicoModel(shelf).fn_PICO_output)
lon, lat = dsg.lon, dsg.lat
for j in range(3):
q = [dsg.draft, dsg.box.where(dsg.mask), dsP.melt.where(dsg.mask)][j]
cmap = ['viridis', 'Spectral','inferno_r'][j]
(vmin,vmax) = [(-2000,0),(1,2),(0,25)][j]
ax = f.add_axes([j/3,.545-.54*i,.33,.45], projection=proj)
ax.set_frame_on(False)
ax.set_extent([x1,x2,y1,y2], crs=proj)
ax.coastlines()
gl = ax.gridlines()
gl.xlocator = mticker.FixedLocator(np.arange(-180,179,5))
gl.ylocator = mticker.FixedLocator(np.arange(-89,89))
im = ax.pcolormesh(lon, lat, q, transform=ccrs.PlateCarree(),
cmap=cmap, vmin=vmin, vmax=vmax)
if i==0: # colorbars
cax = f.add_axes([j/3+.02,.5,.29,.02])
label = ['draft [m]', 'box nr.', 'melt rate [m/yr]'][j]
plt.colorbar(im, cax=cax, orientation='horizontal', label=label)
if j==0: ax.text(x, y, name, weight='bold', transform=ax.transAxes)
if j==2: ax.text(x, y, f'{dsP.mk[0].values:.2f} m/yr', transform=ax.transAxes)
# f, ax = plt.subplots(4, 3, figsize=(15,15))
# for i, key in enumerate(list(ds.keys())[:-1]):
# if i<9: kwargs = {'cbar_kwargs':{'orientation':'horizontal'}}
# else: kwargs = {}
# ds[key].plot(ax=ax[int(i/3), i%3], **kwargs)
```
| github_jupyter |
### Import
```
from dataclasses import dataclass, field, asdict
from typing import List
from csv2shex.csvreader import (
csvreader,
_get_csvrow_dicts_list,
_get_corrected_csvrows_list,
_get_csvshape_dicts_list,
)
from csv2shex.csvrow import CSVRow
from csv2shex.utils import pprint_df
import pandas as pd
```
### Declare
```
@dataclass
class CSVTripleConstraint:
"""Instances hold TAP/CSV row elements that form a triple constraint."""
propertyID: str = None
valueConstraint: str = None
valueShape: str = None
extras: field(default_factory=dict) = None
# propertyLabel: str = None
# mandatory: str = None
# repeatable: str = None
# valueNodeType: str = None
# valueDataType: str = None
# valueConstraintType: str = None
# note: str = None
@dataclass
class CSVShape:
"""Instances hold TAP/CSV row elements that form a shape."""
shapeID: str = None
# shapeLabel: str = None
# shapeClosed: str = None
# start: bool = False
tripleconstraints_list: List[CSVTripleConstraint] = field(default_factory=list)
@dataclass
class CSVSchema:
"""Set of shapes."""
csvrow_dicts_list = [{'shapeID': ':book',
'propertyID': 'dc:creator',
'valueConstraint': '',
'valueShape': ':author'},
{'shapeID': '',
'propertyID': 'dc:type',
'valueConstraint': 'so:Book',
'valueShape': ''},
{'shapeID': ':author',
'propertyID': 'foaf:name',
'valueConstraint': '',
'valueShape': ''}]
```
### For each row
#### 1. Initialize instance of CSVShape
```
for row in csvrow_dicts_list:
shape = CSVShape()
shape.shapeID = row["shapeID"]
shape.tripleconstraints_list = list()
dict_of_shape_objs[shape_dict["shapeID"]] = shape
dict_of_shape_objs
```
#### 2. On finding new shapeID, capture shape-related elements in a shape_dict.
```
shape_dict = dict()
shape_dict["shapeID"] = "b"
shape_dict["shapeLabel"] = "label"
shape_dict["shapeClosed"] = False
shape_dict["start"] = True
shape_dict["tripleconstraints_list"] = list()
shape_dict
```
#### 3. Assign CSVShape instance as value to key "shapeID" in dict_of_shape_objs
```
dict_of_shape_objs = dict()
dict_of_shape_objs[shape_dict["shapeID"]] = cshap
dict_of_shape_objs
"b" in dict_of_shape_objs
# Triple constraints list for shape "b"
dict_of_shape_objs["b"].tripleconstraints_list
```
#### 4. Each new shape is added to dict_of_shape_dicts.
```
shape_dict = dict()
shape_dict["shapeID"] = "c"
shape_dict["shapeLabel"] = "clabel"
shape_dict["shapeClosed"] = False
shape_dict["start"] = False
shape_dict["tripleconstraints_list"] = list()
dict_of_shape_objs[shape_dict["shapeID"]] = CSVShape(**shape_dict)
dict_of_shape_objs
dict_of_shape_objs.keys()
# After first row, for rows that lack shapeIDs, get most-recently-inserted key from dict_of_shape_dicts
list(dict_of_shape_objs.keys())[-1]
```
#### 4.
```
# Problem: append multiple triple constraint dicts to tripleconstraints_list
tc_dict = dict()
tc_dict["propertyID"] = "dc:type"
tc_dict["valueConstraint"] = "foaf:Person"
dict_of_shape_objs["b"].tripleconstraints_list.append(tc_dict)
dict_of_shape_objs
# Problem: append multiple triple constraint dicts to tripleconstraints_list
tc_dict = dict()
tc_dict["propertyID"] = "dc:creator"
tc_dict["valueConstraint"] = "http://example.org/person1"
tc_obj = CSVTripleConstraint(**tc_dict)
tc_obj
CSVTripleConstraint(**tc_dict)
dict_of_shape_objs
# This is to pretty-print the entire CSVShape
vars(CSVShape(shapeID='b', shapeLabel='label', shapeClosed=False, start=True, tripleconstraints_list=[
{'propertyID': 'dc:type', 'valueConstraint': 'foaf:Person'},
{'propertyID': 'dc:creator', 'valueConstraint': 'http://example.org/person1'}]))
```
| github_jupyter |
```
import dtt2hdf
import matplotlib.pyplot as plt
import numpy as np
items = dtt2hdf.read_diaggui('./data/SR2_IP_noise_20200720a.xml')
f = items.results.PSD['K1:VIS-SR2_IP_BLEND_ACCL_IN1'].FHz
lvdtl = items.results.PSD['K1:VIS-SR2_IP_BLEND_LVDTL_IN1'].PSD[0]
lvdtt = items.results.PSD['K1:VIS-SR2_IP_BLEND_LVDTT_IN1'].PSD[0]
accl = items.results.PSD['K1:VIS-SR2_IP_BLEND_ACCL_IN1'].PSD[0]
acct = items.results.PSD['K1:VIS-SR2_IP_BLEND_ACCT_IN1'].PSD[0]
mask = f>=1e-2
f = f[mask]
lvdtl = lvdtl[mask]
lvdtt = lvdtt[mask]
accl = accl[mask]
acct = acct[mask]
plt.loglog(f,lvdtl)
plt.loglog(f,lvdtt)
plt.loglog(f,accl)
plt.loglog(f,acct)
from kontrol.model import *
from kontrol import quad_sum
def low_noise_model(f, seis_N0, lvdt_N0, lvdt_fc):
noise = quad_sum(seis_N0/f**2.5, lvdt_noise(f, lvdt_N0, lvdt_fc, exp=[-0.1,0]))
return(noise)
# Make weight to ignore non-noise components between 0.03 and 0.2 Hz
low_noise_weight = np.ones_like(f)
for i in range(len(low_noise_weight)):
if f[i]>0.03 and f[i]<0.2:
low_noise_weight[i]=0
# lvdtl_args = noise_fit(low_noise_model, f, lvdtl, weight = low_noise_weight)
# lvdtt_args = noise_fit(low_noise_model, f, lvdtt, weight = low_noise_weight)
plt.loglog(f, low_noise_model(f, *lvdtl_args), '--')
plt.loglog(f, low_noise_model(f, *lvdtt_args), '--')
geo_noise_weight = np.ones_like(f)
# Make weight to ignore non-noise components between 0.03 and 0.2 Hz
for i in range(len(geo_noise_weight)):
if (f[i]>0.03 and f[i]<0.2) or (f[i]>0.3 and f[i]<1.5)or f[i]>10:
geo_noise_weight[i]=0
def high_noise_model(f, geo_N0, geo_fc):
return(geophone_noise(f, geo_N0, geo_fc, exp=[-3,-1]))
geol_args = noise_fit(high_noise_model, f, accl, weight = geo_noise_weight)
geot_args = noise_fit(high_noise_model, f, acct, weight = geo_noise_weight)
plt.loglog(f, high_noise_model(f, *geol_args), '--')
plt.loglog(f, high_noise_model(f, *geot_args), '--')
# plt.loglog(f, low_noise_model(f, 1e-6, 1e-6, 1))
# data=np.loadtxt('./data/SR2_IP_noise_20200720a.txt')
# f, accltxt = data[:,0], data[:,4]
# plt.loglog(f,accltxt)
# len(accltxt)
# data
# plt.semilogx(np.angle(items.results.TF['K1:VIS-SR2_IP_BLEND_ACCL_IN1'].CSD[2]))
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.loglog(f,lvdtl, label='LVDT L noise')
plt.loglog(f, low_noise_model(f, *lvdtl_args), '--', label='fit')
plt.ylabel('ASD ($\mu\mathrm{m} / \sqrt{\mathrm{Hz}}$)')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
plt.subplot(222)
plt.loglog(f,lvdtt, label = 'LVDT T noise')
plt.loglog(f, low_noise_model(f, *lvdtt_args), '--', label = 'fit')
plt.ylabel('ASD ($\mu\mathrm{m} / \sqrt{\mathrm{Hz}}$)')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
plt.subplot(223)
plt.loglog(f,accl, label = 'geophone L noise')
plt.loglog(f, high_noise_model(f, *geol_args), '--', label = 'fit')
plt.ylabel('ASD ($\mu\mathrm{m} / \sqrt{\mathrm{Hz}}$)')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
plt.subplot(224)
plt.loglog(f,acct, label = 'geophone T noise')
plt.loglog(f, high_noise_model(f, *geot_args), '--', label = 'fit')
plt.ylabel('ASD ($\mu\mathrm{m} / \sqrt{\mathrm{Hz}}$)')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
from kontrol.filter import (optimize_complementary_filter,
complementary_modified_sekiguchi, complementary_sekiguchi, complementary_lucia)
res = optimize_complementary_filter(
complementary_modified_sekiguchi,
spectra=[low_noise_model(f,*lvdtl_args),high_noise_model(f,*geol_args)],
f=f,
# bounds=[(0.01,10),(0.01,10),(0.01,10),(0.01,10),(0.5,1000),(0.01,10),(0.5,1000)],
x0=[0.32,0.32,0.32,0.32]
)
# res.x/2/np.pi
res.x/2/np.pi
lpfl, hpfl = complementary_modified_sekiguchi(res.x)
# lpfl, hpfl = complementary_lucia(res.x)
# lpfl, hpfl = complementary_sekiguchi(res.x)
plt.figure(figsize=[15,5])
plt.subplot(121)
plt.loglog(f, abs(lpfl.horner(2*np.pi*1j*f)[0][0]), label = 'Low-pass')
plt.loglog(f, abs(hpfl.horner(2*np.pi*1j*f)[0][0]), label = 'High-pass')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
plt.subplot(122)
plt.loglog(f, low_noise_model(f,*lvdtl_args), label = 'LVDT + seismometer noise, L')
plt.loglog(f, high_noise_model(f,*geol_args), label = 'geohpone noise, L')
plt.loglog(f, quad_sum(abs(lpfl.horner(2*np.pi*1j*f)[0][0])*low_noise_model(f,*lvdtl_args),
abs(hpfl.horner(2*np.pi*1j*f)[0][0])*high_noise_model(f,*geol_args)),
label = 'blended noise')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
lpfl, hpfl
res = optimize_complementary_filter(
complementary_modified_sekiguchi,
spectra=[low_noise_model(f,*lvdtt_args),high_noise_model(f,*geot_args)],
f=f,
# bounds=[(0.01,10),(0.01,10),(0.01,10),(0.01,10),(0.5,1000),(0.01,10),(0.5,1000)],
x0=[0.32,0.32,0.32,0.32]
)
lpft, hpft = complementary_modified_sekiguchi(res.x)
plt.figure(figsize=[15,5])
plt.subplot(121)
plt.loglog(f, abs(lpft.horner(2*np.pi*1j*f)[0][0]), label = 'Low-pass')
plt.loglog(f, abs(hpft.horner(2*np.pi*1j*f)[0][0]), label = 'High-pass')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
plt.subplot(122)
plt.loglog(f, low_noise_model(f,*lvdtt_args), label = 'LVDT + seismometer noise, T')
plt.loglog(f, high_noise_model(f,*geot_args), label = 'geohpone noise, T')
plt.loglog(f, quad_sum(abs(lpft.horner(2*np.pi*1j*f)[0][0])*low_noise_model(f,*lvdtt_args),
abs(hpft.horner(2*np.pi*1j*f)[0][0])*high_noise_model(f,*geot_args)),
label = 'blended noise')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
plt.legend(loc=0)
plt.grid()
lpft
lpft, hpft
```
| github_jupyter |
# Python Basics
```
# Printing a string
print("Hello, Python!")
print('Lab at SRM-AP')
```
### Variables
```
# defining a variable : In Python there is no need to mention the data type
var1 = 10 # An integer assignment
var2 = 3.146 # A floating point
var3 = "Hello" # A string
print(var1,' ',var2,' ',var3)
pi = 3.14
print ("Value of Pi is",pi)
```
### Assignment
```
# Assigning same value to multiple variables
var1 = var2 = var3 = 1
print(var1,' ',var2,' ',var3)
# Assigning Different values to variable in a single expression
var1, var2, var3 = 1, 2.5, "john"
print(var1,' ',var2,' ',var3)
# Note: commas can be used for multi-assignments
```
### Slicing
```
# String operations
str = 'Hello World!' # A string
print(str) # Prints complete string
print(str[0]) # Prints first character of the string
print(str[2:5]) # Prints characters starting from 3rd to 5th element
print(str[2:]) # Prints string starting from 3rd character
print(str[:2])
print(str * 2) # Prints string two times
print(str + "TEST") # Prints concatenated string
```
### Data types
```
# Python Lists
list = [ 'abcd', 786 , 2.23, 'john', 70.2 ] # A list
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 ) # A tuple. Tuples are immutable, i.e. cannot be edit later
print(list) # Prints complete list
print(list[0]) # Prints first element of the list
print(tuple[1:3]) # Prints elements starting from 2nd till 3rd
# Lists are ordered sets of objects, whereas dictionaries are unordered sets. But the main difference is that items in dictionaries are accessed via keys and not via their position.
tel = {'jack': 4098, 'sape': 4139}
tel['guido'] = 4127
print(tel)
print(tel['jack'])
del tel['sape']
tel['irv'] = 4127
print(tel)
print(tel.keys())
print(sorted(tel.keys()))
print(sorted(tel.values()))
print('guido' in tel)
print('jack' not in tel)
```
### Conditioning and looping
```
# Square of Even numbers
for i in range(0,10):
if i%2 == 0:
print("Square of ",i," is :",i*i)
else:
print(i,"is an odd number")
```
### Built-in Functions
```
print("Sum of array: ",sum([1,2,3,4]))
print("Length of array: ",len([1,2,3,4]))
print("Absolute value: ",abs(-1234))
print("Round value: ",round(1.2234))
import math as mt # importing a package
print("Log value: ",mt.log(10))
```
### Functions
```
def area(length,width):
return length*width
are = area(10,20)
print("Area of rectangle:",are)
```
### Broadcasting
* Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes
### NumPy
* Numpy is the fundamental package for numerical computing with Python. It contains among other things:
* a powerful N-dimensional array object
* sophisticated (broadcasting) functions
* tools for integrating C/C++ and Fortran code
* useful linear algebra, Fourier transform, and random number capabilities
```
import numpy as np # Importing libraries
a = np.array([0, 1, 2])
b = np.array([5, 5, 5])
print("Matrix A\n", a)
print("Matrix B\n", b)
print("Regular matrix addition A+B\n", a + b)
print("Addition using Broadcasting A+5\n", a + 5)
```
### Broadcasting Rules
When operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when
1. they are equal, or
2. one of them is 1
```
# Lets go for a 2D matrix
c = np.array([[0, 1, 2],[3, 4, 5],[6, 7, 8]])
d = np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]])
e = np.array([1, 2, 3])
print("Matrix C\n", c)
print("Matrix D\n", d)
print("Matrix E\n", e)
print("Regular matrix addition C+D\n", c + d)
print("Addition using Broadcasting C+E\n", c + e)
M = np.ones((3, 3))
print("Matrix M:\n",M)
print("Dimension of M: ",M.shape)
print("Dimension of a: ",a.shape)
print("Addition using Broadcasting")
print(M + a)
# Broadcasting array with matrix
```
## All in one program
```
# Importing libraries
import timeit
# Usage of builtin functions
start = timeit.default_timer()
# Defining a list
array_list = [10,11,15,19,21,32]
array_np_list = []
# Print the list
print("Original List",array_list,"\n")
# Defining a function
def prime(num):
if num > 1:
# check for factors
# Iterating a range of numbers
for i in range(2,num):
if (num % i) == 0:
# Appending data to list
array_np_list.append(num)
print(num,"is not a prime number (",i,"times",num//i,"is",num,")")
# Terminating a loop run
break
else:
print(num,"is a prime number")
# Iterating a list
for item in array_list:
# Calling a function
prime(item)
print("\nNon-prime List",array_np_list,"\n")
end = timeit.default_timer()
# Computing running time
print("Time Taken to run the program:",end - start, "seconds")
```
### Note:
* Python is a procedural Language
* Two versions of Python 2 vs 3
* No braces. i.e. indentation
* No need to explicitly mention data type
## Unvectorized vs Vectorized Implementations
```
# Importing libraries
import numpy as np
# Defining matrices
mat_a = [[6, 7, 8],[5, 4, 5],[1, 1, 1]]
mat_b = [[1, 2, 3],[1, 2, 3],[1, 2, 3]]
# Getting a row from matrix
def get_row(matrix, row):
return matrix[row]
# Getting a coloumn from matrix
def get_column(matrix, column_number):
column = []
for i in range(len(matrix)):
column.append(matrix[i][column_number])
return column
# Multiply a row with coloumn
def unv_dot_product(vector_one, vector_two):
total = 0
if len(vector_one) != len(vector_two):
return total
for i in range(len(vector_one)):
product = vector_one[i] * vector_two[i]
total += product
return total
# Multiply two matrixes
def matrix_multiplication(matrix_one, matrix_two):
m_rows = len(matrix_one)
p_columns = len(matrix_two[0])
result = []
for i in range(m_rows):
row_result = []
for j in range(p_columns):
row = get_row(matrix_one, i)
column = get_column(matrix_two, j)
product = unv_dot_product(row, column)
row_result.append(product)
result.append(row_result)
return result
print("Matrix A: ", mat_a,"\n")
print("Matrix B: ", mat_b,"\n")
print("Unvectorized Matrix Multiplication\n",matrix_multiplication(mat_a,mat_b),"\n")
# Vectorized Implementation
npm_a = np.array(mat_a)
npm_b = np.array(mat_b)
print("Vectorized Matrix Multiplication\n",npm_a.dot(npm_b),"\n")
# A.dot(B) is a numpy built-in function for dot product
```
### Tip:
* Vectorization reduces number of lines of code
* Always prefer libraries and avoid coding from scratch
## Essential Python Packages: Numpy, Pandas, Matplotlib
```
# Load library
import numpy as np
# Create row vector
vector = np.array([1, 2, 3, 4, 5, 6])
print("Vector:",vector)
# Select second element
print("Element 2 in Vector is",vector[1])
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
print("Matrix\n",matrix)
# Select second row
print("Second row of Matrix\n",matrix[1,:])
print("Third coloumn of Matrix\n",matrix[:,2])
# Create Tensor
tensor = np.array([ [[[1, 1], [1, 1]], [[2, 2], [2, 2]]],
[[[3, 3], [3, 3]], [[4, 4], [4, 4]]] ])
print("Tensor\n",tensor)
```
### Matrix properties
```
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
print("Matrix Shape:",matrix.shape)
print("Number of elements:",matrix.size)
print("Number of dimentions:",matrix.ndim)
print("Average of matrix:",np.mean(matrix))
print("Maximum number:",np.max(matrix))
print("Coloumn with minimum numbers:",np.min(matrix, axis=1))
print("Diagnol of matrix:",matrix.diagonal())
print("Determinant of matrix:",np.linalg.det(matrix))
```
### Matrix Operations
```
print("Flattened Matrix\n",matrix.flatten())
print("Reshaping Matrix\n",matrix.reshape(9,1))
print("Transposed Matrix\n",matrix.T)
# Create matrix
matrix_a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
# Create matrix
matrix_b = np.array([[1, 3, 1],
[1, 3, 1],
[1, 3, 8]])
print("Matrix Addition\n",np.add(matrix_a, matrix_b))
print("Scalar Multiplication\n",np.multiply(matrix_a, matrix_b))
print("Matrix Multiplication\n",np.dot(matrix_a, matrix_b))
```
### Pandas
```
import pandas as pd
df=pd.read_csv("Income.csv")
print("Data\n")
df
print("Top Elements\n")
df.head(3)
print("Bottom Elements\n")
df.tail(3)
print("Specific Coloumn\n")
df['State'].head(3)
print("Replace negative numbers with NaN\n")
df.replace(-999,np.nan)
```
## Matplotlib
```
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
```
### Line Plot
```
# Line plot
plt.plot([1,2,3,4],[3,4,5,6])
plt.xlabel('some numbers')
plt.ylabel('some numbers')
plt.show()
### Adding elements to line plots
t = np.arange(0.0, 2.0, 0.01) # Generate equally space numbers between 0 and 2
s = 1 + np.sin(2*np.pi*t) # Apply sin function to the random numbers
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('About as simple as it gets, folks')
plt.grid(True)
plt.savefig("test.png") # Save a plot. Check the directory
plt.show()
```
### Bar Plot
```
y = [3, 10, 7, 5, 3, 4.5, 6, 8.1]
x = range(len(y))
width = 1/1.5
plt.bar(x, y, width, color="green")
plt.show()
```
### Scatter Plot
```
N = 50
# Generate random numbers
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
```
### Histogram
```
mu, sigma = 100, 15
x = mu + sigma*np.random.randn(10000) # Generate random values with some distribution
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
```
### Pie Chart
```
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
```
| github_jupyter |
<img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="לוגו של מיזם לימוד הפייתון. נחש מצויר בצבעי צהוב וכחול, הנע בין האותיות של שם הקורס: לומדים פייתון. הסלוגן המופיע מעל לשם הקורס הוא מיזם חינמי ללימוד תכנות בעברית.">
# <span style="text-align: right; direction: rtl; float: right; clear: both;">תרגילים</span>
## <span style="text-align: right; direction: rtl; float: right; clear: both;">הגדרה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
פונקציית גיבוב היא פונקציה המקבלת קלט כלשהו ומחזירה ערך באורך קבוע.<br>
קיימות פונקציות גיבוב רבות, ולהן שימושים מגוונים.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נמנה כמה תכונות נפוצות של פונקציות גיבוב:
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>דטרמיניזם – עבור קלט מסוים, הערך המוחזר יהיה תמיד זהה.</li>
<li>אחידות – כיוון שהערך המוחזר הוא בגודל קבוע, יש קלטים שעבורם הערך המוחזר זהה. פונקציית גיבוב טובה תנסה למזער את כמות הערכים המוחזרים הזהים.</li>
<li>חד־כיווניות – עבור ערך מסוים שהפונקציה החזירה, קשה למצוא מה הקלט שיצר אותו.</li>
</ul>
## <span style="text-align: right; direction: rtl; float: right; clear: both;">מימוש בסיסי</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בתרגיל זה נממש גרסה פשוטה של פונקציית גיבוב (hash function).<br>
פונקציית הגיבוב שלנו תקבל מחרוזת ותחזיר תמיד פלט באורך זהה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תחילה נכיר את הפונקציה <code dir="ltr" style="direction: ltr">ord(תו)</code>.<br>
פונקציה זו מקבלת תו, ומחזירה ערך מספרי המייצג אותו:<br>
</p>
```
ord('a')
```
<p style="text-align: right; direction: rtl; float: right; clear: both;">
פונקציית הגיבוב שלנו תעבוד כך:<br>
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>נאתחל משתנה עזר בשם <var>hash</var> כך שערכו יהיה 1.</li>
<li>עבור כל אות במחרוזת:</li>
<ol>
<li>נחשב את <code dir="ltr" style="direction: ltr;">ord(letter)</code> ונכפיל במשתנה העזר <var>hash</var>.</li>
<li>נכפיל את הערך שקיבלנו במיקום של האות הבאה במחרוזת.
<li>לתוצאה הזו נבצע מודולו 397,643, ונשמור על <var>hash</var>.</li>
</ol>
<li>כדי שהפלט תמיד יהיה באורך זהה, נשתמש במודולו 100,297 על התוצאה (חשבו: איך מודולו גורם לזה לקרות?)</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
פונקציית הגיבוב שיצרנו מחזירה תמיד ערכים באורך קבוע (בין 0 ל־100,297), כפי שפונקציית גיבוב צריכה להחזיר (לאו דווקא באורך זה, אבל הפלט חייב להיות באורך קבוע).
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
דוגמה:<br>
</p>
<code style="text-align: left; direction: ltr; float: left; clear: both;">myhash('aba')</code><br>
<samp style="text-align: left; direction: ltr; float: left; clear: both;">62242</samp><br>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
החישוב התבצע כך:<br>
</p>
<code style="text-align: left; direction: ltr; float: left; clear: both;">temp_hash = 1</code><br>
<code style="text-align: left; direction: ltr; float: left; clear: both;">temp_hash = (temp_hash * ord('a') * 1) % 397643</code>
<samp style="text-align: left; direction: ltr; float: left; clear: both;"># temp_hash = (1 * 97 * 1) % 397643 = 97</samp><br>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב שכאן הכפלנו ב־1, כיוון שמיקום האות הוא 0 ואנו מכפילים
<em>באינדקס האות הבאה.</em>
</p>
<code style="text-align: left; direction: ltr; float: left; clear: both;">temp_hash = (temp_hash * ord('b') * 2) % 397643</code>
<samp style="text-align: left; direction: ltr; float: left; clear: both;"># temp_hash = (97 * 98 * 2) % 397643 = 19012</samp>
<code style="text-align: left; direction: ltr; float: left; clear: both;">temp_hash = (temp_hash * ord('a') * 3) % 397643</code>
<samp style="text-align: left; direction: ltr; float: left; clear: both;"># temp_hash = (19012 * 97 * 3) % 397643 = 363133</samp>
<code style="text-align: left; direction: ltr; float: left; clear: both;">return temp_hash % 100297</code>
<samp style="text-align: left; direction: ltr; float: left; clear: both;"># temp_hash = 363133 % 100297 = <b>62242</b></samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ממשו את פונקציית הגיבוב.<br>
כדי לבדוק את עצמכם ודאו שקיבלתם את הערכים הבאים עבור המחרוזות הבאות:<br>
</p>
<ul style="text-align: left; direction: ltr; float: left; clear: both;">
<li>
<code>myhash('python course')</code><br>
<samp>75273</samp>
</li>
<li>
<code>myhash('hashing is sababa')</code><br>
<samp>38166</samp>
</li>
<li>
<code>myhash('i calculate hashes for fun')</code><br>
<samp>68720</samp>
</li>
</ul>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב שזוהי פונקציית גיבוב מומצאת.<br>
לעולם לא נשתמש בפונקציות גיבוב שהמצאנו בסביבות אמיתיות שאנחנו מתכנתים(!), משום שדבר כזה יסכן בוודאות את המשתמשים במערכת.<br>
זה עולם שלם שנחקר רבות, ואנחנו תמיד נשתמש רק בפונקציות גיבוב שנחקרו במוסדות הרלוונטיים ופורסמו מטעמם.
</p>
```
# כתבו את הפונקציה שלכם כאן
```
## <span style="text-align: right; direction: rtl; float: right; clear: both;">איקס־עיגול</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נזכיר את החוקים של המשחק המוכר איקס־עיגול:<br>
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>המשחק מתרחש על לוח בגודל 3 על 3.</li>
<li>
במשחק משתתפים שני שחקנים: שחקן שמשתמש באות <em>X</em>
ושחקן שמשתמש באות <em>O</em>.
</li>
<li>
השחקן שמשתמש באות <em>O</em> תמיד מתחיל.
</li>
<li>המשחק נמשך כל עוד יש מקום פנוי בלוח, או עד שהוכרז מנצח.</li>
<li>מנצח הוא מי שהציב את האות השייכת לו בכל התאים הנמצאים באלכסון, בשורה או בטור על הלוח.</li>
</ul>
### <span style="text-align: right; direction: rtl; float: right; clear: both;">מימוש המשחק</p>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">ייצוג הלוח</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
את הלוח נייצג באמצעות רשימה של רשימות.<br>
כל רשימה תייצג שורה בלוח שלנו: הרשימה במיקום 0 תייצג את השורה הראשונה בלוח, הרשימה בשורה 1 את השורה השנייה וכך הלאה.<br>
הרשימות המייצגות את השורות יהיו רשימות של תווים, כאשר בכל תא יהיה אחד מבין התווים האפשריים – '<i>O</i>', '<i>X</i>' או '<i>-</i>'
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לדוגמה, כך נראה לוח ריק:
</p>
<samp style="float: left; clear: both;">[['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']]</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נוח לדמיין את הרשימה הזו כתובה במאונך:
</p>
<samp style="float: left; clear: both;">
[
['-', '-', '-'],
['-', '-', '-'],
['-', '-', '-']
]
</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כעת נראה איך נראה הלוח כאשר יש <samp>'X'</samp>־ים באלכסון:
</p>
<samp style="float: left; clear: both;">
[
['X', '-', '-'],
['-', 'X', '-'],
['-', '-', 'X']
]</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
וללא ההדפסה לאורך:
</p>
<samp style="float: left; clear: both;">[['X', '-', '-'], ['-', 'X', '-'], ['-', '-', 'X']]</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תחילה נממש פונקציה המקבלת את הלוח ובודקת אם יש מנצח כלשהו
(<em>X</em> או <em>O</em>),
ומחזירה את האות של המנצח
(<samp>'X'</samp> או <samp>'O'</samp>)
אם יש מנצח, או <samp>''</samp> (מחרוזת ריקה) אחרת.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
חִשבו אילו בדיקות נידרש לבצע כדי למצוא אם יש בלוח מצב של ניצחון. ממשו את הפונקציה <code>check_board(board)</code> כך שתשתמש בכמה שיותר פונקציות עזר.
</p>
```
# check_board(board) כתבו את הפונקציה שלכם כאן
```
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפני שתמשיכו הלאה, חשוב לוודא שהפונקציה שכתבתם עובדת, לכן נכתוב עבורה סדרה של בדיקות.<br>
כתבו בדיקות עבור המקרים הבאים:
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>ניצחון באלכסון הראשי (שמאל למעלה–ימין למטה).</li>
<li>ניצחון באלכסון המשני (ימין למעלה–שמאל למטה).</li>
<li>ניצחון בכל אחד מהטורים.</li>
<li>ניצחון בכל אחת מהשורות.</li>
<li>לוח מלא ללא ניצחון.</li>
<li>לוח לא מלא ללא ניצחון (במקומות לא מסומנים יופיע הסימן <samp>'-'</samp>).</li>
</ul>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בסך הכול תצטרכו לכתוב 10 בדיקות. בכל בדיקה הפעילו את הפונקציה שלכם <code>check_board(board)</code> על לוח כפי שמתואר ובדקו אם הפלט שמוחזר תואם לפלט שציפיתם לקבל.
</p>
```
# בדיקה עבור אלכסון ראשי
# בדיקה עבור אלכסון משני
# בדיקה עבור טור שמאלי
# בדיקה עבור טור אמצעי
# בדיקה עבור טור ימני
# בדיקה עבור שורה עליונה
# בדיקה עבור שורה אמצעית
# בדיקה עבור שורה תחתונה
# בדיקה עבור לוח מלא ללא ניצחון
# בדיקה עבור לוח לא מלא ללא ניצחון
```
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">פונקציות לבדיקת תקינות קלט</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במהלך המשחק אנו נקלוט מהשחקנים במשחק (המשתמשים) את המקומות בלוח, שבהם הם ירצו למקם את האות שלהם.<br>
מקומות אלו יהיו שני מספרים בתחום 0–2 המציינים את השורה והעמודה שבה יש למקם את האות.<br>
לדוגמה, עבור:
</p>
<samp style="float: left; clear: both;">1 2</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נמקם את האות המתאימה לשחקן, נניח <em>X</em>, בשורה 1 ובעמודה 2, כך:
</p>
<samp style="float: left; clear: both;">[['-', '-', '-'], ['-', '-', 'X'], ['-', '-', '-']]</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
זכרו כי הספירה מתחילה מ־0, ולכן מדובר בשורה האמצעית ובעמודה הימנית.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כעת כתבו פונקציה המקבלת את הלוח ואת האות שמייצגת את השחקן (<em>'X'</em> או <em>'O'</em>). כמו כן, הפונקציה תקלוט מהמשתמש שני מספרים.<br>
הפונקציה תבדוק אם התנאים הבאים מתקיימים, ואם הם מתקיימים היא תמקם את האות הנתונה במיקום המבוקש, ותעדכן את הלוח:<br>
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>מספר השורה הוא בין 0 ל־2.</li>
<li>מספר העמודה הוא בין 0 ל־2.</li>
<li>המקום המבוקש לא תפוס על ידי אות כלשהי (כלומר יש בו <samp>'-'</samp>).</li>
</ul>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב, <b>עליכם לחייב את המשתמש להכניס ערכים חוקיים</b>. כלומר, המשחק לא ימשיך עד שיתקבל קלט תקין (חשבו על דרך שבה התוכנית תמשיך לבקש מהמשתמש ערכים עד שיוכנסו ערכים חוקיים).<br>
כאשר נגלה בשלב מוקדם יותר שהלוח לא ניתן יותר למילוי – המשחק יסתיים.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
רמז: <span style="direction: rtl; background: #000; text: #000">השתמשו בלולאת while</span>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לוח לדוגמה:
</p>
<samp style="float: left; clear: both;">
board = [['-', '-', '-'], ['-', 'O', 'X'], ['-', '-', '-']]
</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הזנה חוקית:
</p>
<code style="float: left; clear: both;">make_turn('X', board)</code>
<br style="clear: both;">
<samp style="float: left; clear: both;">Player 'X' Please choose cell:
0 2
</samp>
<samp style="float: left; clear: both;">[['-', '-', 'X'], ['-', 'O', 'X'], ['-', '-', '-']]</samp>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הזנה לא חוקית:
</p>
<br style="clear: both;">
<samp style="float: left; clear: both;">Player 'X' Please choose cell:
5 -2
Invalid line chosen (5)
Invalid column chosen (-2)
[['-', '-', '-'], ['-', 'O', 'X'], ['-', '-', '-']]
</samp>
<br style="clear: both;">
<samp style="float: left; clear: both; padding-bottom: 5px;">Player 'X' Please choose cell:
1 2
Cell (1,2) is taken, use other.
[['-', '-', '-'], ['-', 'O', 'X'], ['-', '-', '-']]
</samp>
<br style="clear: both;">
<samp style="float: left; clear: both;">Player 'X' Please choose cell:
2 -2
Invalid column chosen (-2)
[['-', '-', '-'], ['-', 'O', 'X'], ['-', '-', '-']]
</samp>
<br style="clear: both;">
<samp style="float: left; clear: both;">Player 'X' Please choose cell:
1 1
Cell (1,1) is taken, use other.
[['-', '-', '-'], ['-', 'O', 'X'], ['-', '-', '-']]
</samp>
<br style="clear: both;">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב שלאחר כל פעולה אנו מדפיסים את הלוח, בין אם הצליחה ובין אם לאו.
</p>
```
# make_turn(player_char,board) כתבו את הפונקציה שלכם כאן
```
<p style="text-align: right; direction: rtl; float: right; clear: both;">
<b>בונוס</b>: ממשו פונקציה שמדפיסה את הלוח לאורך.
</p>
<code style="float: left; clear: both;">print_board(board)</code>
<br>
<samp style="float: left; clear: both;">
[
['-', '-', '-'],
['-', '-', '-'],
['-', '-', '-']
]
</samp>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">מימוש המשחק</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כאמור המשחק ממשיך כל עוד אין מנצח ונותר מקום פנוי בלוח.<br>
נשים לב כי מספר המהלכים החוקיים יכול להיות לכל היותר כגודל הלוח.<br>
כלומר – אם לא הוכרז מנצח במהלך המשחק, המשחק ייגמר לאחר 9 מהלכים עבור לוח בגודל $3\times3$.<br>
נספור כמה מהלכים חוקיים יש במשחק. עבור מספרי מהלך זוגיים (0, 2, 4, ...) ישחק השחקן <em>O</em>, ועבור מספרי מהלך אי־זוגיים ישחק השחקן <em>X</em>.<br>
נתאר את מהלך המשחק בפסאודו־קוד:<br>
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>אתחל את מונה המהלכים ל־0.</li>
<li>כל עוד אין מנצח וגם הלוח לא מלא:
<ul>
<li>אם מספר המהלך זוגי – בצע מהלך שחקן <em>O</em>.</li>
<li>אם מספר המהלך אי־זוגי – בצע מהלך שחקן <em>X</em>.</li>
</ul>
</li>
<li>הגדל את מונה המהלכים ב־1. </li>
<li>אם יש ניצחון – הכרז על המנצח, אחרת הכרז תיקו.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ממשו את המשחק על פי הפונקציות שיצרתם ועל פי תיאור מהלך המשחק.
</p>
```
# tic_tac_toe()
```
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כעת שחקו עם בני משפחה וחברים ;)
</p>
## <span style="text-align: right; direction: rtl; float: right; clear: both;">בנק 2.0</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
מנהלי הבנק היו מרוצים מאוד מהתוכנה הקודמת לניהול בנק שכתבתם וכעת הם מעוניינים לשפר אותה.<br>
תחילה הם הביעו דאגה מחוזק הסיסמאות. מנגנון הסיסמאות הקודם היה חלש ומנהלי הבנק מפחדים שייעשה בו שימוש לרעה.<br>
שמועות מתפשטות מהר מאוד ומנהלי הבנק שמעו שמימשתם גרסה לפונקציית גיבוב. הם מעוניינים להשתמש בה במנגנון הסיסמאות החדש.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
מנגנון הסיסמאות ינוהל כך:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>הבנק ינהל קובץ סיסמאות שייקרא bank_passwd.txt.</li>
<li>כאשר משתמש יפתח חשבון בנק, הוא יכניס את הסיסמה שהוא מעוניין שתשמש אותו בהתחברות הבאה.</li>
<li>נחשב את ה־hash של הסיסמה הזו באמצעות פונקציית ה־hash שכתבנו בתחילת מחברת זו.</li>
<li>הבנק ישמור את שם המשתמש ותוצאת ה־hash בקובץ הסיסמאות בצורה קבועה מראש.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כל סיסמה של משתמש תישמר בקובץ בצורה: <samp>username:62242</samp>,<br>
כאשר <samp>username</samp> הוא שם המשתמש שנרשם בפתיחת החשבון, ו־<samp>62242</samp> הוא תוצאת ה־hash עבור הסיסמה שבחר.
</p>
### <span style="text-align: right; direction: rtl; float: right; clear: both;">פתיחת חשבון בנק</span>
<p style="text-align:right;direction:rtl;">בסעיף זה נממש את פונקציית פתיחת חשבון הבנק.<br>
הפונקציה תקבל שם משתמש וסיסמה, ותיצור רשומה חדשה בקובץ הסיסמאות עבור המשתמש שביקש להצטרף לבנק.<br>
כיוון שהבנק שלנו לא עבר לעבוד עם מספרי חשבון, עלינו לוודא ששם המשתמש שנבחר לא קיים כבר בבנק. אם קיים כזה, נדפיס שגיאה ולא נבצע דבר.<br>
אם לא קיים שם משתמש כזה, נחשב את ה־hash של הסיסמה שהזין, ונוסיף לסוף הקובץ רשומה חדשה בייצוג שצוין בתחילת השאלה.
</p>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">דוגמאות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תוכן קובץ הסיסמאות (לצורך הדוגמה):
</p>
<samp>FreddieMercury:56443</samp><br>
<samp>BBKing:33471</samp><br>
<samp>DonaldDuck:17743</samp><br><br>
<code>register_to_bank('FreddieMercury', 'ILoveQueen')</code><br>
<samp>An account with name "FreddieMercury" already exists.<br>Account was not created.</samp><br><br>
<code>register_to_bank('Simba', 'ILoveNala')</code><br>
<samp>Account with name "Simba" was created and added to the bank.</samp><br><br>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תוכן קובץ הסיסמאות כעת:
</p>
<samp>FreddieMercury:56443</samp><br>
<samp>BBKing:19463</samp><br>
<samp>DonaldDuck:17743</samp><br>
<samp>Simba:6362</samp><br>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">תרגיל</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">ממשו את פונקציית הרישום.</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">רמזים:</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li style="direction: rtl; background: #000; text: #000">קראו את המידע מהקובץ לתוך רשימה, זכרו שכל שורה היא מחרוזת וניתן להפעיל עליה פעולות של מחרוזות כדי לחלץ את שם המשתמש.</li>
<li style="direction: rtl; background: #000; text: #000">כאשר פותחים קובץ במצב הוספה, ניתן להוסיף אליו נתונים מבלי למחוק את הקיימים. קראו על פונקציית open וחפשו כיצד עושים זאת.</li>
</ul>
```
# ממשו את פונקציית הרישום כאן register_to_bank(username,password)
```
### <span style="text-align: right; direction: rtl; float: right; clear: both;">מערכת הזדהות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בעקבות השיפור במנגנון חוזק הסיסמאות, הצטרפו לקוחות רבים לבנק.<br>
המנהלים מרוצים וכעת רוצים שתממשו עבורם מערכת הזדהות שעובדת עם קובץ הסיסמאות.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
מערכת ההזדהות תעבוד כך:
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>הלקוח יזין שם משתמש וסיסמה.
<ul>
<li>אם המשתמש לא קיים, נדפיס <samp>Account not registered</samp>, ונסיים.</li>
<li>אם הלקוח קיים, אך הסיסמה שהזין שגויה – יש ללקוח עוד 2 נסיונות להזין סיסמה נכונה.</li>
</ul>
</li>
<li>למשתמש ינתנו 3 ניסיונות בסך הכול להכניס סיסמה נכונה.
<ul>
<li>אם הזין סיסמה שגויה בשלושתם – הרשומה שלו תימחק מקובץ הסיסמאות, ויהיה עליו להירשם מחדש בעתיד.</li>
<li>אחרת, הזיהוי הצליח.</li>
</ul>
</li>
</ul>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
מערכת ההזדהות הינה פונקציה המקבלת שם משתמש וסיסמה.<br>
היא מחזירה <samp>True</samp> אם הזיהוי הצליח, ו־<samp>False</samp> אחרת.
</p>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">דוגמאות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תוכן קובץ הסיסמאות (לצורך הדוגמה):
</p>
<br>
<samp>FreddieMercury:61875</samp><br>
<samp>BBKing:33471</samp><br>
<samp>DonaldDuck:17743</samp><br>
<samp>Simba:6362</samp><br><br>
<code>authenticate('FreddieMercury', 'ILoveQueen')</code><br>
<samp>Wrong password (1/3). # myhash('ILoveQueen') = 99597 != 61875 (in password file)</samp><br><br>
<code>authenticate('FreddieMercury', 'LetItBe')</code><br>
<samp>Wrong password (2/3). # myhash('LetItBe') = 58060 != 61875 (in password file)</samp><br><br>
<code>authenticate('FreddieMercury', 'HeyJude')</code><br>
<samp>Wrong password (3/3). 'FreddieMercury' was removed. # myhash('HeyJude') = 8309 != 61875 (in password file)</samp><br><br>
<code>authenticate('FreddieMercury', 'IHatePasswords!')</code><br>
<samp>Account not registered. # FreddieMercury was removed in the previous example</samp><br><br>
<code>authenticate('Simba', 'ILoveNala')</code><br>
<samp>Welcome 'Simba'. # myhash('ILoveNala') = 6362 == 6362 (in password file)</samp><br><br>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תוכן קובץ הסיסמאות כעת:
</p>
<br>
<samp>BBKing:19463</samp><br>
<samp>DonaldDuck:17743</samp><br>
<samp>Simba:6362</samp><br>
#### <span style="text-align: right; direction: rtl; float: right; clear: both;">תרגול</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ממשו את מערכת ההזדהות.
</p>
```
# authenticate(username,password) ממשו את הפונקציה כאן
```
| github_jupyter |
```
import time
start = time.time()
import numpy as np
import scipy as sc
import bsplines as bsp
import utilitis_opt as utils_opt
import utilitis_pic_Rel
#====================================================================================
# calling epyccel
#====================================================================================
from pyccel.epyccel import epyccel
utils_pic_fast = epyccel(utilitis_pic_Rel, accelerator='openmp')
#utils_pic_fast = epyccel(utilitis_pic_Rel)
print('pyccelization of pic functions done!')
#====================================================================================
#===== Is this run a restart? (restart = 0: no, restart = 1: yes) ===================
restart = 1
max_time = 3440. # maximum runtime in minutes
time_restart_files = 400. # time after which the current configuration is saved in minutes
name_particles = 'restart_files/particles6.npy'
name_fields = 'restart_files/fields6.npy'
name_time_step = 'restart_files/time_step6.npy'
name_control = 'restart_files/control_variate6.npy'
#====================================================================================
#===== saving data? (save = 1: yes, save = 0: no). If yes, name directory ===========
save = 1
title = 'run_nel1800_np8e6.txt'
saving_step = 1 # save data only every saving_stepth time step
time_integr = 1 # do time integration? (1 : yes, 0: no)
#====================================================================================
#===== physical parameters ==========================================================
eps0 = 1.0 # vacuum permittivity
mu0 = 1.0 # vacuum permeability
c = 1.0 # speed of light
qe = -1.0 # electron charge
me = 1.0 # electron mass
B0z = 1.0 # minimum of background magnetic field in z-direction
wce = qe*B0z/me # electron cyclotron frequency
wpe = 5*np.abs(wce) # cold electron plasma frequency
nuh = 6e-3 # ratio of cold/hot electron densities (nh/nc)
nh = nuh*wpe**2 # hot electron density
wpar = 0.2*c # parallel thermal velocity of energetic particles
wperp = 0.53*c # perpendicular thermal velocity of energetic particles
xi = 8.62e-5 # inhomogeneity factor of background magnetic field
bcs_d = 1 # damping of wave fields at boundaries? (1: yes, 0: no)
bcs_g = 1 # field line dependence of initial distribution function? (1: yes, 0: no)
#====================================================================================
#===== initial conditions ===========================================================
k = 2. # wavenumber of initial wave field perturbations
amp = 1e-4 # amplitude of initial wave field perturbations
eps = 0. # amplitude of spatial pertubation of initial distribution function
Ex0 = lambda z : 0*z # initial Ex
Ey0 = lambda z : 0*z # initial Ey
Bx0 = lambda z : 0*z # initial Bx
By0 = lambda z : 0*z # initial By
jx0 = lambda z : 0*z # initial jcx
jy0 = lambda z : 0*z # initial jcy
#====================================================================================
#===== numerical parameters =========================================================
Lz = 327.7 # length of z-domain
Nel = 1800 # number of elements z-direction
T = 5000. # simulation time
dt = 0.04 # time step
p = 3 # degree of B-spline basis functions in V0
Np = np.int(8e6) # number of markers
control = 1 # control variate for noise reduction? (1: yes, 0: no)
Ld = 0.046*Lz # length of damping region at each end
#====================================================================================
#===== evaluation points for the magnetic field======================================
#eva_points_Bx = np.linspace(40., 280., 7)
eva_points_Bx = np.array([100., 140., 180., 220.])
#====================================================================================
#====== create parameter list =======================================================
pa = np.zeros(1*(Nel + p - 1) + 5)
pa[0] = eps0
pa[1] = mu0
pa[2] = c
pa[3] = qe
pa[4] = me
pa[5] = B0z
pa[6] = wce
pa[7] = wpe
pa[8] = nuh
pa[9] = nh
pa[10] = wpar
pa[11] = wperp
pa[12] = k
pa[13] = amp
pa[14] = eps
pa[15] = Lz
pa[16] = Nel
pa[17] = T
pa[18] = dt
pa[19] = p
pa[20] = Np
pa[21] = control
pa[22] = saving_step
pa[23] = xi
pa[24] = Ld
pa[29] = bcs_d
pa[30] = bcs_g
#====================================================================================
#===== discretization of spatial domain =============================================
dz = Lz/Nel # element size
el_b = np.linspace(0, Lz, Nel + 1) # element boundaries
Nbase0 = Nel + p # total number of basis functions in V0
Nbase0_0 = Nbase0 - 2 # number of degrees of freedom in V1
Nbase1 = Nbase0 - 1 # total number of basis functions in V1
Nbase1_0 = Nbase1 # number of degrees of freedom in V1
#====================================================================================
#===== some diagnostic values =======================================================
Eh_eq = Lz*nh*me/2*(wpar**2 + 2*wperp**2) # equilibrium energetic electron energy
en_E = np.array([]) # electric field energy
en_B = np.array([]) # magnetic field energy
en_C = np.array([]) # cold plasma energy
en_H = np.array([]) # energetic electron energy
#====================================================================================
#===== background field in z-direction ==============================================
B_background_z = lambda z : B0z*(1 + xi*(z - Lz/2)**2)
#====================================================================================
#===== initial energetic electron distribution function =============================
def fh0(z, vx, vy, vz):
xiB = 1 - B0z/B_background_z(z)
xiz = 1 + (wperp**2/wpar**2 - 1)*xiB*bcs_g
return (1 + eps*np.cos(k*z))*nh/((2*np.pi)**(3/2)*wpar*wperp**2)*np.exp(-vz**2/(2*wpar**2) - xiz*(vx**2 + vy**2)/(2*wperp**2))
#====================================================================================
#===== Maxwellian for control variate ===============================================
maxwell = lambda vx, vy, vz : nh/((2*np.pi)**(3/2)*wpar*wperp**2)*np.exp(-vz**2/(2*wpar**2) - (vx**2 + vy**2)/(2*wperp**2))
#====================================================================================
#===== sampling distribution for initial markers ====================================
g_sampling = lambda vx, vy, vz : 1/((2*np.pi)**(3/2)*wpar*wperp**2)*np.exp(-vz**2/(2*wpar**2) - (vx**2 + vy**2)/(2*wperp**2))*1/Lz
#====================================================================================
#===== masking function to damp wave fields near boundaries =========================
def damp(z):
if z <= Ld:
return np.sin(np.pi*z/(2*Ld))
elif z >= Lz - Ld:
return np.sin(np.pi*(Lz - z)/(2*Ld))
else:
return 1.0
#====================================================================================
#===== spline knot vector, global mass matrices (in V0 and V1) and gradient matrix ==
Tz = bsp.make_knots(el_b, p, False)
tz = Tz[1:-1]
M0, C0 = utils_opt.matrixAssembly_V0(p, Nbase0, Tz, False)
M1 = utils_opt.matrixAssembly_V1(p, Nbase0, Tz, False)
Mb = utils_opt.matrixAssembly_backgroundField(p, Nbase0, Tz, False, B_background_z)
G = utils_opt.GRAD_1d(p, Nbase0, False)
D = bsp.collocation_matrix(tz, p - 1, eva_points_Bx, False, normalize=True)
print('matrix assembly done!')
#====================================================================================
#===== reserve memory for unknowns ==================================================
ex = np.empty(Nbase0)
ey = np.empty(Nbase0)
bx = np.empty(Nbase1)
by = np.empty(Nbase1)
yx = np.empty(Nbase0)
yy = np.empty(Nbase0)
uj = np.empty(4*Nbase0_0 + 2*Nbase1_0)
z_old = np.empty(Np)
#====================================================================================
#===== initial coefficients with commuting projectors ===============================
proj = utils_opt.projectors_1d(p, Nbase0, Tz, False)
ex[:] = proj.PI_0(Ex0)
ey[:] = proj.PI_0(Ey0)
bx[:] = proj.PI_1(Bx0)
by[:] = proj.PI_1(By0)
yx[:] = proj.PI_0(jx0)
yy[:] = proj.PI_0(jy0)
uj[:] = np.concatenate((ex[1:-1], ey[1:-1], bx, by, yx[1:-1], yy[1:-1]))
print('projection of initial fields done!')
#====================================================================================
#===== construct block matrices for field update ====================================
ZERO_00 = np.zeros((Nbase0_0, Nbase0_0))
ZERO_01 = np.zeros((Nbase0_0, Nbase1_0))
ZERO_11 = np.zeros((Nbase1_0, Nbase1_0))
A1 = np.diag(np.ones(4*Nbase0_0 + 2*Nbase1_0))
A1[0:Nbase0_0, 0:Nbase0_0] = M0
A1[Nbase0_0:2*Nbase0_0, Nbase0_0:2*Nbase0_0] = M0
A1[2*Nbase0_0 + 2*Nbase1_0:3*Nbase0_0 + 2*Nbase1_0, 2*Nbase0_0 + 2*Nbase1_0:3*Nbase0_0 + 2*Nbase1_0] = M0
A1[3*Nbase0_0 + 2*Nbase1_0:4*Nbase0_0 + 2*Nbase1_0, 3*Nbase0_0 + 2*Nbase1_0:4*Nbase0_0 + 2*Nbase1_0] = M0
A2 = np.block([[ZERO_00, ZERO_00, ZERO_01, c**2*np.dot(G.T, M1), -mu0*c**2*M0, ZERO_00], [ZERO_00, ZERO_00, -c**2*np.dot(G.T, M1), ZERO_01, ZERO_00, -mu0*c**2*M0], [ZERO_01.T, G, ZERO_11, ZERO_11, ZERO_01.T, ZERO_01.T], [-G, ZERO_01.T, ZERO_11, ZERO_11, ZERO_01.T, ZERO_01.T], [eps0*wpe**2*M0, ZERO_00, ZERO_01, ZERO_01, ZERO_00, qe/me*Mb], [ZERO_00, eps0*wpe**2*M0, ZERO_01, ZERO_01, -qe/me*Mb, ZERO_00]])
LHS = sc.sparse.csc_matrix(A1 - 1/2*dt*A2)
RHS = sc.sparse.csc_matrix(A1 + 1/2*dt*A2)
LU = sc.sparse.linalg.splu(LHS)
print('LU factorization done!')
if bcs_d == 1:
grev = bsp.greville(Tz, p, False)
coll = bsp.collocation_matrix(Tz, p, grev, False)[1:-1, 1:-1]
gi = np.zeros(Nbase0)
for i in range(Nbase0):
gi[i] = damp(grev[i])
Gi = np.diag(gi[1:-1])
DAMP = np.dot(np.dot(np.linalg.inv(coll), Gi), coll)
else:
DAMP = np.identity(Nbase0_0)
DAMP_block = sc.linalg.block_diag(DAMP, DAMP, np.identity(Nbase1_0), np.identity(Nbase1_0), DAMP, DAMP)
print('damping assembly done!')
#====================================================================================
#===== create particles (z,vx,vy,vz,wk) and sample according to sampling distribution
particles = np.zeros((Np, 5), order='F')
particles[:, 0] = np.random.rand(Np)*Lz
particles[:, 1] = np.random.randn(Np)*wperp
particles[:, 2] = np.random.randn(Np)*wperp
particles[:, 3] = np.random.randn(Np)*wpar
jh = np.zeros(2*Nbase0)
Fh = np.zeros(4*Nbase0_0 + 2*Nbase1_0)
#====================================================================================
#===== parameters for control variate ===============================================
g0 = g_sampling(particles[:, 1], particles[:, 2], particles[:, 3])
w0 = fh0(particles[:, 0], particles[:, 1], particles[:, 2], particles[:, 3])/g_sampling(particles[:, 1], particles[:, 2], particles[:, 3])
#====================================================================================
#===== initialize velocities by pushing back by -dt/2 and compute weights ===========
timea = time.time()
z_old[:] = particles[:, 0]
utils_pic_fast.borisGemRel_bc_2(particles, -dt/2, qe, me, Tz, tz, p, ex, ey, bx, by, B0z, xi, Lz, c)
particles[:, 0] = z_old
particles[:, 4] = w0 - control*maxwell(particles[:, 1], particles[:, 2], particles[:, 3])/g0
timeb = time.time()
print('time for particle push: ' + str(timeb - timea))
#====================================================================================
#===== test timing for hot current computation ======================================
timea = time.time()
utils_pic_fast.hotCurrentRel_bc_2(particles[:, 0], particles[:, 1:], Tz, p, qe, jh, c)
timeb = time.time()
print('time for hot current computation: ' + str(timeb - timea))
#====================================================================================
#===== test timing for linear solver ================================================
timea = time.time()
LU.solve(RHS.dot(uj) + dt*Fh)
timeb = time.time()
print('time for solving linear system: ' + str(timeb - timea))
#====================================================================================
#===== time integration by a time step dt ===========================================
def update():
# ... save old positions
z_old[:] = particles[:, 0]
# ...
# ... update particle velocities from n-1/2 to n+1/2 with fields at time n and positions from n to n+1 with velocities at n+1/2
utils_pic_fast.borisGemRel_bc_2(particles, dt, qe, me, Tz, tz, p, ex, ey, bx, by, B0z, xi, Lz, c)
# ...
# ... update weights with control variate
particles[:, 4] = w0 - control*maxwell(particles[:, 1], particles[:, 2], particles[:, 3])/g0
# ...
# ... compute hot electron current densities
utils_pic_fast.hotCurrentRel_bc_2(1/2*(z_old + particles[:, 0]), particles[:, 1:], Tz, p, qe, jh, c)
# ...
# ... assemble right-hand side of weak formulation
Fh[:Nbase0_0] = -c**2*mu0*jh[2:-2][0::2]
Fh[Nbase0_0:2*Nbase0_0] = -c**2*mu0*jh[2:-2][1::2]
# ...
# ... time integration of E, B, jc from n to n+1 with Crank-Nicolson method (use hot current density at n+1/2)
uj[:] = np.dot(DAMP_block, LU.solve(RHS.dot(uj) + dt*Fh))
ex[:] = np.array([0] + list(uj[:Nbase0_0]) + [0])
ey[:] = np.array([0] + list(uj[Nbase0_0:2*Nbase0_0]) + [0])
bx[:] = uj[2*Nbase0_0:2*Nbase0_0 + Nbase1_0]
by[:] = uj[2*Nbase0_0 + Nbase1_0:2*Nbase0_0 + 2*Nbase1_0]
yx[:] = np.array([0] + list(uj[2*Nbase0_0 + 2*Nbase1_0:3*Nbase0_0 + 2*Nbase1_0]) + [0])
yy[:] = np.array([0] + list(uj[3*Nbase0_0 + 2*Nbase1_0:4*Nbase0_0 + 2*Nbase1_0]) + [0])
# ...
#====================================================================================
if restart == 0:
#===== create data file and save initial fields and energies ====================
if save == 1:
file = open(title, 'ab')
en_E = eps0/2 * (ex[1:-1].dot(M0.dot(ex[1:-1])) + ey[1:-1].dot(M0.dot(ey[1:-1])))
en_B = 1/(2*mu0) * (bx.dot(M1.dot(bx)) + by.dot(M1.dot(by)))
en_C = 1/(2*eps0*wpe**2) * (yx[1:-1].dot(M0.dot(yx[1:-1])) + yy[1:-1].dot(M0.dot(yy[1:-1])))
en_H = me/(2*Np) * particles[:, 4].dot(particles[:, 1]**2 + particles[:, 2]**2 + particles[:, 3]**2) + control*Eh_eq
Bx_save = D.dot(bx)
if save == 1:
data = np.append(Bx_save, np.array([en_E, en_B, en_C, en_H, 0.]))
np.savetxt(file, np.reshape(data, (1, 5 + len(eva_points_Bx))), fmt = '%1.10e')
#==================================================================================
else:
#===== load restart data ==========================================================
if save == 1:
file = open(title, 'ab')
particles[:] = np.load(name_particles)
uj[:] = np.load(name_fields)
w0 = np.load(name_control)[0]
g0 = np.load(name_control)[1]
ex[:] = np.array([0] + list(uj[:Nbase0_0]) + [0])
ey[:] = np.array([0] + list(uj[Nbase0_0:2*Nbase0_0]) + [0])
bx[:] = uj[2*Nbase0_0:2*Nbase0_0 + Nbase1_0]
by[:] = uj[2*Nbase0_0 + Nbase1_0:2*Nbase0_0 + 2*Nbase1_0]
yx[:] = np.array([0] + list(uj[2*Nbase0_0 + 2*Nbase1_0:3*Nbase0_0 + 2*Nbase1_0]) + [0])
yy[:] = np.array([0] + list(uj[3*Nbase0_0 + 2*Nbase1_0:4*Nbase0_0 + 2*Nbase1_0]) + [0])
time_step, counter = np.load(name_time_step)
#==================================================================================
#===== time integration =============================================================
if time_integr == 1:
print('start time integration! (number of time steps : ' + str(int(T/dt)) + ')')
last_time = time.time()
if restart == 0:
time_step = 0
counter = 1
while True:
try:
if (time_step*dt >= T) or ((time.time() - start)/60 > max_time):
if save == 1:
file.close()
np.save('restart_files/particles' + str(counter), particles)
np.save('restart_files/control_variate' + str(counter), np.vstack((w0, g0)))
np.save('restart_files/fields' + str(counter), uj)
np.save('restart_files/time_step' + str(counter), np.array([time_step, counter]))
break
if time_step%50 == 0:
print('time steps finished: ' + str(time_step))
if (time.time() - last_time)/60 > time_restart_files:
np.save('restart_files/particles' + str(counter), particles)
np.save('restart_files/control_variate' + str(counter), np.vstack((w0, g0)))
np.save('restart_files/fields' + str(counter), uj)
np.save('restart_files/time_step' + str(counter), np.array([time_step, counter]))
last_time = time.time()
counter += 1
update()
if time_step%saving_step == 0:
#================================= add data to file ===================================================
en_E = eps0/2 * (ex[1:-1].dot(M0.dot(ex[1:-1])) + ey[1:-1].dot(M0.dot(ey[1:-1])))
en_B = 1/(2*mu0) * (bx.dot(M1.dot(bx)) + by.dot(M1.dot(by)))
en_C = 1/(2*eps0*wpe**2) * (yx[1:-1].dot(M0.dot(yx[1:-1])) + yy[1:-1].dot(M0.dot(yy[1:-1])))
en_H = me/(2*Np) * particles[:, 4].dot(particles[:, 1]**2 + particles[:, 2]**2 + particles[:, 3]**2) + control*Eh_eq
Bx_save = D.dot(bx)
if save == 1:
data = np.append(Bx_save, np.array([en_E, en_B, en_C, en_H, (time_step + 1)*dt]))
np.savetxt(file, np.reshape(data, (1, 5 + len(eva_points_Bx))), fmt = '%1.10e')
#=======================================================================================================
time_step += 1
except KeyboardInterrupt:
print('Pausing... (Hit ENTER to continue, type quit to exit.)')
if save == 1:
file.close()
try:
response = input()
if response == 'quit':
break
print('Resuming...')
if save == 1:
file = open(title, 'ab')
except KeyboardInterrupt:
print('Resuming...')
if save == 1:
file = open(title, 'ab')
continue
if save == 1:
file.close()
#====================================================================================
import HyCho_PIC
import Bspline as bspline
import bsplines as bsp
from pyccel.epyccel import epyccel
HyCho_PIC_fast = epyccel(HyCho_PIC)
import numpy as np
import matplotlib.pyplot as plt
Nel = 8
p = 3
L = 4.
el_b = np.linspace(0., L, Nel + 1)
bc = False
Nbase_0 = Nel + p - bc*p
Nbase_1 = Nbase_0 - 1 + bc
delta = L/Nel
T0 = bsp.make_knots(el_b, p, bc)
T1 = T0[1:-1]
spline_space_0 = bspline.Bspline(T0, p)
spline_space_1 = bspline.Bspline(T1, p - 1)
xplot = np.linspace(0., L, 200)
for i in range(spline_space_0.N):
plt.plot(xplot, spline_space_0(xplot, i))
plt.plot(el_b, np.zeros(len(el_b)), 'r+')
xplot = np.linspace(0., L, 200)
for j in range(spline_space_1.N):
plt.plot(xplot, p*spline_space_1(xplot, j)/(T1[j + p] - T1[j]))
Np = np.int(5)
wperp = 0.5
wpar = 0.2
particles = np.zeros((Np, 5), order='F')
particles[:, 0] = np.random.rand(Np)*L
particles[:, 1] = np.random.randn(Np)*wperp
particles[:, 2] = np.random.randn(Np)*wperp
particles[:, 3] = np.random.randn(Np)*wpar
particles[:, 4] = np.ones(Np)
np.save('particles', particles)
spans = np.floor(particles[:, 0]*Nel/L).astype(int) + p
spans
jh_x = np.empty(spline_space.N - bc*p, dtype=float)
jh_y = np.empty(spline_space.N - bc*p, dtype=float)
HyCho_PIC_fast.current(particles, T, p, spans, jh_x, jh_y, Nel + p - bc*p, 0)
jh_x
jh_y
jx_test = np.zeros(spline_space.N)
jy_test = np.zeros(spline_space.N)
for ip in range(Np):
#gamma = np.sqrt(1. + particles[ip, 1]**2 + particles[ip, 2]**2 + particles[ip, 3]**2)
gamma = 1.
for i in range(spline_space.N):
jx_test[i%Nel] -= particles[ip, 1]/gamma*particles[ip, 4]*spline_space(particles[ip, 0], i)
jy_test[i%Nel] -= particles[ip, 2]/gamma*particles[ip, 4]*spline_space(particles[ip, 0], i)
Np = np.int(1e6)
pos = np.random.rand(Np)*L
spans_0 = np.floor(pos*Nel/L).astype(int) + p
res_0 = np.empty(Np, dtype=float)
res_1 = np.empty(Np, dtype=float)
coeff_0 = np.random.rand(Nbase_0)
coeff_1 = np.random.rand(Nbase_1)
pp_0 = np.asfortranarray([[1/6, -1/(2*delta), 1/(2*delta**2), -1/(6*delta**3)], [2/3, 0., -1/delta**2, 1/(2*delta**3)], [1/6, 1/(2*delta), 1/(2*delta**2), -1/(2*delta**3)], [0., 0., 0., 1/(6*delta**3)]])
pp_1 = np.asfortranarray([[1/2, -1/delta, 1/(2*delta**2)], [1/2, 1/delta, -1/delta**2], [0., 0., 1/(2*delta**2)]])
HyCho_PIC_fast.pusher_reflecting(pos, T0, T1, p, spans_0, L, delta, coeff_0, coeff_1, pp_0, pp_1, res_0, res_1)
res_0
res_1
res0_test = np.zeros(len(pos), dtype=float)
for ip in range(len(pos)):
for i in range(spline_space_0.N):
res0_test[ip] += coeff_0[i%Nbase_0]*spline_space_0(pos[ip], i)
res1_test = np.zeros(len(pos), dtype=float)
for ip in range(len(pos)):
for i in range(spline_space_1.N):
res1_test[ip] += coeff_1[i%Nbase_0]*spline_space_1(pos[ip], i)*p/(T1[i + p] - T1[i])
Lz = 10.
# ... unit test for relativistic boris pusher
test_particle = np.zeros((2, 5), order='F')
test_particle[:, 0] = Lz/2 - 2.62
gamma = 1/np.sqrt(1 - (0.117**2 + 0.0795**2))
test_particle[:, 1] = 0.117*gamma
test_particle[:, 3] = 0.0795*gamma
spans = np.floor(test_particle[:, 0]*Nel/Lz).astype(int) + p
xi_test = 0.862*0.01
T = 150.*4
dt = 0.02
pp_0 = np.asfortranarray([[1/6, -1/(2*delta), 1/(2*delta**2), -1/(6*delta**3)], [2/3, 0., -1/delta**2, 1/(2*delta**3)], [1/6, 1/(2*delta), 1/(2*delta**2), -1/(2*delta**3)], [0., 0., 0., 1/(6*delta**3)]])
pp_1 = np.asfortranarray([[1/2, -1/delta, 1/(2*delta**2)], [1/2, 1/delta, -1/delta**2], [0., 0., 1/(2*delta**2)]])
Nt = int(T/dt)
tn = np.linspace(0., T, Nt + 1)
ex = np.zeros(Nbase_0)
ey = np.zeros(Nbase_0)
bx = np.zeros(Nbase_1)
by = np.zeros(Nbase_1)
z_old = np.copy(test_particle[:, 0])
HyCho_PIC_fast.pusher_reflecting(test_particle, dt, T0, T1, p, spans, Lz, delta, ex, ey, bx, by, pp_0, pp_1, xi_test)
test_particle[:, 0] = z_old
positions = np.empty(Nt + 1)
gammas = np.empty(Nt + 1)
positions[0] = test_particle[0, 0]
gammas[0] = gamma
for i in range(Nt):
HyCho_PIC_fast.pusher_reflecting(test_particle, dt, T0, T1, p, spans, Lz, delta, ex, ey, bx, by, pp_0, pp_1, xi_test)
positions[i + 1] = test_particle[0, 0]
gammas[i + 1] = np.sqrt(1 + test_particle[0, 1]**2 + test_particle[0, 2]**2 + test_particle[0, 3]**2)
# ...
omega = 1.*(1 + xi_test*(2.62)**2)
rho = -np.cross(np.array([0.117*gamma, 0., 0.0795*gamma]), np.array([0., 0., 1.]))/omega
B = np.array([-xi_test*rho[0]*(-2.62), -xi_test*rho[1]*(-2.62), (1 + xi_test*(2.62)**2)])
ob = 0.117*np.sqrt(xi_test/np.linalg.norm(B))
phi = np.arctan(0.0795/((Lz/2 - 2.62)*ob))
A = (Lz/2 - 2.62)/np.sin(phi)
plt.plot(tn, (-2.62*np.cos(ob*tn) + 0.0795/ob*np.sin(ob*tn)) + Lz/2)
plt.plot(tn[0::200], positions[0::200], 'k+')
#plt.ylim((Lz/2 - 4, Lz/2 + 6))
#plt.plot(tn, gammas/gamma + 24.)
elif rel == 1:
#$ omp parallel
#$ omp do reduction ( + : jh_x, jh_y ) private ( ip, pos, span, left, right, values, ux, uy, uz, wp_over_gamma, il, i, bi )
for ip in range(np):
pos = particles[ip, 0]
span = spans[ip]
basis_funs(knots, degree, pos, span, left, right, values)
ux = particles[ip, 1]
uy = particles[ip, 2]
uz = particles[ip, 3]
wp_over_gamma = particles[ip, 4]/sqrt(1. + ux**2 + uy**2 + uz**2)
for il in range(degree + 1):
i = (span - il)%n_base
bi = values[degree - il]*wp_over_gamma
jh_x[i] -= ux*bi
jh_y[i] -= uy*bi
#$ omp end do
#$ omp end parallel
```
| github_jupyter |
介绍如何在pytorch环境下,使用FGSM算法攻击基于ImageNet数据集预训练的alexnet模型。
Jupyter notebook中使用Anaconda中的环境需要单独配置,默认情况下使用的是系统默认的Python环境,以使用advbox环境为例。
首先在默认系统环境下执行以下命令,安装ipykernel。
conda install ipykernel
conda install -n advbox ipykernel
在advbox环境下激活,这样启动后就可以在界面上看到advbox了。
python -m ipykernel install --user --name advbox --display-name advbox
```
import logging
logging.basicConfig(level=logging.INFO,format="%(filename)s[line:%(lineno)d] %(levelname)s %(message)s")
logger=logging.getLogger(__name__)
import torch
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
import torch.nn as nn
from torchvision import models
from advbox.adversary import Adversary
from advbox.attacks.gradient_method import FGSMT
from advbox.attacks.gradient_method import FGSM
from advbox.models.pytorch import PytorchModel
import numpy as np
import cv2
from tools import show_images_diff
#定义被攻击的图片
image_path="tutorials/cropped_panda.jpg"
# Define what device we are using
logging.info("CUDA Available: {}".format(torch.cuda.is_available()))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#cv2默认读取格式为bgr bgr -> rgb
orig = cv2.imread(image_path)[..., ::-1]
#转换成224*224
orig = cv2.resize(orig, (224, 224))
img = orig.copy().astype(np.float32)
#图像数据标准化
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img /= 255.0
img = (img - mean) / std
#pytorch中图像格式为CHW
#[224,224,3]->[3,224,224]
img = img.transpose(2, 0, 1)
img = Variable(torch.from_numpy(img).to(device).float().unsqueeze(0)).cpu().numpy()
# Initialize the network
#Alexnet
model = models.alexnet(pretrained=True).to(device).eval()
#print(model)
#设置为不保存梯度值 自然也无法修改
for param in model.parameters():
param.requires_grad = False
# advbox demo
m = PytorchModel(
model, None,(-3, 3),
channel_axis=1)
attack = FGSMT(m)
# 静态epsilons
attack_config = {"epsilons": 0.001, "epsilon_steps": 1, "steps": 100}
inputs=img
labels = None
print(inputs.shape)
adversary = Adversary(inputs, labels)
tlabel = 538
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
print(
'attack success, adversarial_label=%d'
% (adversary.adversarial_label))
adv=adversary.adversarial_example[0]
else:
print('attack failed')
print("fgsm attack done")
#格式转换
adv = adv.transpose(1, 2, 0)
adv = (adv * std) + mean
adv = adv * 256.0
adv = np.clip(adv, 0, 255).astype(np.uint8)
#显示原始图片 抵抗样本 以及两张图之间的差异 其中灰色代表没有差异的像素点
show_images_diff(orig,adversary.original_label,adv,adversary.adversarial_label)
```
| github_jupyter |
```
# Cloning Mask_RCNN model
# !git clone https://github.com/matterport/Mask_RCNN.git
# !pip3 install imutils
# !pip install -U scikit-image
import numpy as np
from numpy import zeros
from numpy import asarray
import colorsys
import argparse
import imutils
import random
import cv2
import os
import time
import sys
from sklearn.cluster import KMeans
from scipy import ndimage
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("Mask_RCNN")
import warnings
warnings.filterwarnings("ignore")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
import mrcnn
from mrcnn.utils import Dataset
from mrcnn.model import MaskRCNN
from matplotlib import pyplot
from matplotlib.patches import Rectangle
from keras.models import load_model
%matplotlib inline
from os import listdir
from xml.etree import ElementTree
# Creating a class for training on labelled data sets
class trashMaskRCNNConfig(Config):
# Giving configuration a recognisable name
NAME = "Mask_RCNN_config"
# Number of GPUs to use, and number of images per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (normally add + 1 to background)
# Thus trash + background
NUM_CLASSES = 1 + 1
# Number of training steps per epoch
STEPS_PER_EPOCH = 131
# Learning rate
LEARNING_RATE = 0.006
# Skip detections with <90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
# Setting max ground truth instances
MAX_GT_INSTANCES = 10
# Creating TrashMaskRCNNConfig instance
config = trashMaskRCNNConfig()
# Displaying all config values
config.display()
# Building custom trash data set
class TrashDataset (Dataset):
# Loading dataset definitions
def load_dataset(self, dataset_dir, is_train = True)
# Add classes
# Only one class to add in our case
self.add_class("dataset", 1, "Trash")
#
# Image segmentation based on clustering
# Separating image into foregrounds and backgrounds
pic = plt.imread('malaga-13-sorted/andres_trash/img_CAMERA1_1261233382.061621_right.jpg')/255 # Dividing by 255 to bring pixel values between 0 and 1
print(pic.shape)
plt.imshow(pic)
# Converting into 2d array to do k-means clustering on
pic_2d = pic.reshape(pic.shape[0] * pic.shape[1], pic.shape[2])
pic_2d.shape
# Using kmeans clustering
kmeans = KMeans(n_clusters=10, random_state=0).fit(pic_2d)
pic2show = kmeans.cluster_centers_[kmeans.labels_]
print(pic2show.shape)
print(pic2show)
print('\n')
test = kmeans.cluster_centers_
print(test.shape)
print(test)
print('\n')
labels = kmeans.labels_
print(labels.shape)
print(labels)
# Reshaping image back to 3d and plotting results
cluster_pic = pic2show.reshape(pic.shape[0], pic.shape[1], pic.shape[2])
plt.imshow(cluster_pic, cmap = 'viridis', interpolation='nearest')
# Grabbing only the foreground
# Choosing only one cluster center by changing the rest to 1, or white
centers = kmeans.cluster_centers_
chosen_center = np.where(centers!= centers[9], 1, centers)
print(chosen_center)
# Grabbing only that cluster
foreground = chosen_center[kmeans.labels_]
# Reshaping image back to 3d and plotting results
foreground_pic = foreground.reshape(pic.shape[0], pic.shape[1], pic.shape[2])
plt.imshow(foreground_pic, cmap = 'viridis', interpolation='nearest')
# Defining Laplacian filter for convolution
kernel_laplace = np.array([np.array([1,1,1]), np.array([1, -8, 1]), np.array([1,1,1])])
print(kernel_laplace, 'is a laplacian kernel')
# Convolving over image and then reshaping it back
convolved = ndimage.convolve(foreground, kernel_laplace, mode = 'reflect')
# Reshaping image back to 3d and plotting results
convolved_pic = convolved.reshape(pic.shape[0], pic.shape[1], pic.shape[2])
plt.imshow(convolved_pic, cmap = 'viridis', interpolation='nearest')
# Drawing bounding boxes over convolved features
# Separate image into foreground and background
# Use Laplacian filter on foreground
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.metrics import confusion_matrix,classification_report,roc_curve
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.feature_selection import SelectKBest,chi2
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,BaggingClassifier
from sklearn.linear_model import LogisticRegression
bank=pd.read_csv("D://project/bank/bank_final/bank_DBSCAN_label.csv")
bank.head()
bank.drop("Unnamed: 0",axis=1, inplace=True)
bank.head()
plt.figure(figsize=(11,9))
sb.heatmap(bank.corr(),annot=True)
#creat sub dataset for creat model with importans feature and train test_split_model
x=bank.drop(["Label_db","DFACD","DBNCD"],axis=1) #drop DBNCD and DFACD be in khater k :input dar chi2 baiad mosbat bashan
y=bank.Label_db
skb=SelectKBest(chi2,k=4)
x_new=skb.fit_transform(x,y)
skb.pvalues_
skb.scores_
x_new
x=bank.drop(["Label_db","DBNCD"],axis=1)
xtrain,xtest,ytrain,ytest=train_test_split(x,y,random_state=33,test_size=0.3)
xnew_train,xnew_test,ynew_train,ynew_test=train_test_split(x_new,y,random_state=33,test_size=0.3)
##creat models [svm & knn & ANN & logistic regression & random forest & adabost]
#svc
##find best Parameters for svm
param_grids={"kernel":["linear","rbf"],
"C":[5,10,15,30],
"random_state":[42,33,0,303]
}
#find best params for SVC with Gridsearch cv
grid=GridSearchCV(SVC(),param_grids)
grid.fit(xtrain,ytrain)
grid.best_estimator_
grid.best_score_
grid.best_params_
svc=SVC(kernel="rbf",C=30,probability=True)
svc.fit(xtrain,ytrain)
print("SVC","\n",'-'*40)
print("score train:: ",svc.score(xtrain,ytrain)," || score test::",svc.score(xtest,ytest))
ypred=svc.predict(xtest)
print("-"*40)
print(classification_report(ytest,ypred))
print("-"*40)
print(confusion_matrix(ytest,ypred))
grid.fit(xnew_train,ynew_train)
grid.best_params_
#hamon SVC
svc.fit(xnew_train,ynew_train)
print("SVC","\n",'-'*40)
print("score train:: ",svc.score(xnew_train,ynew_train)," || score test::",svc.score(xnew_test,ynew_test))
ynew_pred=svc.predict(xnew_test)
print("-"*40)
print(classification_report(ynew_test,ynew_pred))
print("-"*40)
print(confusion_matrix(ynew_test,ynew_pred))
#knn
knn_param={"n_neighbors":[3,4,5,6,7,8,9,15,20,30]
,"weights":['uniform',"distance"],
}
grid=GridSearchCV(KNeighborsClassifier(),knn_param)
grid.fit(xtrain,ytrain)
grid.best_score_
grid.best_params_
knn=KNeighborsClassifier(n_neighbors=30,weights="distance",metric="euclidean")
knn.fit(xtrain,ytrain)
print("train score: ",knn.score(xtrain,ytrain),"test score: ",knn.score(xtest,ytest))
print("*"*40)
ypred=knn.predict(xtest)
print(classification_report(ytest,ypred))
print("*"*40)
print(confusion_matrix(ytest,ypred))
grid.fit(xnew_train,ynew_train) #xnew , ynew
grid.best_score_
grid.best_params_
knn.fit(xnew_train,ynew_train)
print("score train: ",knn.score(xnew_train,ynew_train),"score test: ",knn.score(xnew_test,ynew_test))
print("#"*40)
ypred=knn.predict(xnew_test)
print(classification_report(ynew_test,ypred))
print(confusion_matrix(ynew_test,ypred))
#mlpclassifier
param_mlp={
"hidden_layer_sizes":[(10,)],
"activation":["tanh", "relu"]
}
mlp=MLPClassifier((20,10),activation="logistic",random_state=42)
mlp.fit(xtrain,ytrain)
print("score train: ",mlp.score(xtrain,ytrain),"score test: ",mlp.score(xtest,ytest))
print("*"*40)
ypred=mlp.predict(xtest)
print(classification_report(ytest,ypred))
print(confusion_matrix(ytest,ypred))
#RandomForestClassifier
rf_param={"n_estimators":[20,50,100],
"criterion":["gini","entropy"],
"max_depth":[5,10,20,100],
}
grid=GridSearchCV(RandomForestClassifier(random_state=42),rf_param)
grid.fit(xtrain,ytrain)
grid.best_score_
grid.best_params_
rfc=RandomForestClassifier(n_estimators=20,criterion="entropy",max_depth=20)
rfc.fit(xtrain,ytrain)
print("score train: ",rfc.score(xtrain,ytrain),"score test: ",rfc.score(xtest,ytest))
print("#"*20)
ypred=rfc.predict(xtest)
print(classification_report(ytest,ypred))
print(confusion_matrix(ytest,ypred))
#The importance of features in determining the output of the model
rfc.feature_importances_
rfc.fit(xnew_train,ynew_train)
print("score train: ",rfc.score(xnew_train,ynew_train),"score test: ",rfc.score(xnew_test,ynew_test))
print("#"*20)
ypred=rfc.predict(xnew_test)
print(classification_report(ynew_test,ynew_pred))
print(confusion_matrix(ynew_test,ynew_pred))
rfc.feature_importances_
#adabootclassifier
ada_param={"n_estimators":[20,25,30,50,100]
}
grid=GridSearchCV(AdaBoostClassifier(random_state=42),ada_param)
grid.fit(xtrain,ytrain)
grid.best_params_
grid.best_score_
ada=AdaBoostClassifier(n_estimators=25,random_state=42)
ada.fit(xtrain,ytrain)
print("score train: ",ada.score(xtrain,ytrain),"score test: ",ada.score(xtest,ytest))
print("#"*20)
ypred=ada.predict(xtest)
print(classification_report(ytest,ypred))
print(confusion_matrix(ytest,ypred))
ada.feature_importances_
ada.fit(xnew_train,ynew_train)
print("score train: ",ada.score(xnew_train,ynew_train),"score test: ",ada.score(xnew_test,ynew_test))
print("#"*20)
ypred=ada.predict(xnew_test)
print(classification_report(ynew_test,ynew_pred))
print(confusion_matrix(ynew_test,ynew_pred))
ada.feature_importances_
#logistic Regression
logr=LogisticRegression(C=10)
logr.fit(xtrain,ytrain)
print("score train: ",logr.score(xtrain,ytrain),"score test: ",logr.score(xtest,ytest))
print("#"*20)
ypred=logr.predict(xtest)
print(classification_report(ytest,ypred))
print(confusion_matrix(ytest,ypred))
logr.fit(xnew_train,ynew_train)
print("score train: ",logr.score(xnew_train,ynew_train),"score test: ",logr.score(xnew_test,ynew_test))
print("#"*20)
ypred=logr.predict(xnew_test)
print(classification_report(ynew_test,ynew_pred))
print(confusion_matrix(ynew_test,ynew_pred))
#bagging with knn , svm , logreg
bag_param={"base_estimator":[SVC(),KNeighborsClassifier(),LogisticRegression()],
"n_estimators":[5,11,21,25,33],
"random_state":[0,42,33]
}
grid=GridSearchCV(,bag_param)
grid.fit(xtrain,ytrain)
grid.best_params_
grid.best_score_
bag=BaggingClassifier(SVC(kernel="rbf",C=30,probability=True),n_estimators=33,random_state=42)
bag.fit(xtrain,ytrain)
print("score train: ",bag.score(xtrain,ytrain),"score test: ",bag.score(xtest,ytest))
print("#"*20)
ypred=bag.predict(xtest)
print(classification_report(ytest,ypred))
print(confusion_matrix(ytest,ypred))
bag.fit(xnew_train,ynew_train)
print("score train: ",bag.score(xnew_train,ynew_train),"score test: ",bag.score(xnew_test,ynew_test))
print("#"*20)
ypred=bag.predict(xnew_test)
print(classification_report(ynew_test,ypred))
print(confusion_matrix(ynew_test,ypred))
```
## end
| github_jupyter |
# What is Principal Component Analysis?
Principal Component Analysis (PCA) is a simple yet popular and useful linear transformation technique that is used in numerous applications, such as stock market predictions, the analysis of gene expression data, and many more.
The sheer size of data in the modern age is not only a challenge for computer hardware but also a main bottleneck for the performance of many machine learning algorithms. The main goal of a PCA analysis is to identify patterns in data; PCA aims to detect the correlation between variables. If a strong correlation between variables exists, the attempt to reduce the dimensionality only makes sense. In a nutshell, this is what PCA is all about: Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information.
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
fig = plt.figure(figsize=(10, 10))
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# Importing the dataset into a pandas dataframe
df = pd.read_csv("Wine.csv")
df.head(10)
df.tail(10)
```
<h4>So Dependent Variable of the dataset is splitted into 3 classes namely(1, 2, 3)</h4>
```
df.shape
X = df.iloc[:, 0:13].values
Y = df.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
X_train, X_test, Y_train, Y_test = train_test_split(X,
Y,
test_size = 0.2,
random_state = 0)
print("Size of X_train: {}".format(X_train.shape))
print("Size of X_test: {}".format(X_test.shape))
print("Size of Y_train: {}".format(Y_train.shape))
print("Size of Y_test: {}".format(Y_test.shape))
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
```
We can see that First Two has the highest variance
So, we will set out n_components in PCA as 2
```
#Checking The Variances
Variances = pca.explained_variance_
Variances
# Applying PCA
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Fitting Logistic Regression to the Training set
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, Y_train)
#Predicting The Results
y_pred = classifier.predict(X_test)
y_pred
#Comparing the results
cm = confusion_matrix(Y_test, y_pred)
cm
#Checking The Accuracy score
acc = accuracy_score(Y_test, y_pred)
print("The Accuracy on the model is: {}%".format((acc*100).astype('int32')))
```
<h3>Building a text report showing the main classification metrics</h3>
```
cr = classification_report(Y_test, y_pred)
print(cr)
# Visualising the Training set results
x_set, y_set = X_train, Y_train
#Creating the grid of Minimum and maximun values from X_train
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
#Plotting the line Classifier
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4,
cmap = ListedColormap(('red', 'green', 'blue')))
#Plotting The Datapoint in red and gree color
for i,j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i),
label = j
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title("PCA + Logistic Regression (Training Set)")
plt.xlabel('PCA1')
plt.ylabel('PCA2')
plt.legend()
# Visualising the Test set results
x_set, y_set = X_test, Y_test
#Creating the grid of Minimum and maximun values from X_train
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
#Plotting the line Classifier
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4,
cmap = ListedColormap(('red', 'green', 'blue')))
#Plotting The Datapoint in red and gree color
for i,j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i),
label = j
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title("PCA + Logistic Regression (Test Set)")
plt.xlabel('PCA1')
plt.ylabel('PCA2')
plt.legend()
```
| github_jupyter |
# Independent expenditures experiments
By Ben Welsh
## Import tools
```
import os
import requests
from datetime import datetime
from clint.textui import progress
import pandas
pandas.set_option('display.float_format', lambda x: '%.2f' % x)
pandas.set_option('display.max_columns', None)
import matplotlib.pyplot as plt
import matplotlib.dates as dates
%matplotlib inline
def download_csv_to_dataframe(name):
"""
Accepts the name of a calaccess.download CSV and returns it as a pandas dataframe.
"""
path = os.path.join(os.getcwd(), '{}.csv'.format(name))
if not os.path.exists(path):
url = "http://calaccess.download/latest/{}.csv".format(name)
r = requests.get(url, stream=True)
with open(path, 'w') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
return pandas.read_csv(path)
def remove_amended_filings(df):
"""
Accepts a dataframe with FILING_ID and AMEND_ID files.
Returns only the highest amendment for each unique filing id.
"""
max_amendments = df.groupby('FILING_ID')['AMEND_ID'].agg("max").reset_index()
merged_df = pandas.merge(df, max_amendments, how='inner', on=['FILING_ID', 'AMEND_ID'])
print "Removed {} amendments".format(len(df)-len(merged_df))
print "DataFrame now contains {} rows".format(len(merged_df))
return merged_df
```
## Download quarterly filings
Independent expenditure committees are required to file [Form 461](http://calaccess.californiacivicdata.org/documentation/calaccess-forms/f461/). The summary totals from those filings are found in the [SMRY_CD](http://calaccess.californiacivicdata.org/documentation/calaccess-files/smry-cd/) file.
```
quarterly_df = download_csv_to_dataframe("smry_cd")
```
### Basic information about the file
```
quarterly_df.info()
quarterly_df.head()
```
### Frequency counts on the fields
```
quarterly_df.groupby(['FORM_TYPE'])['FILING_ID'].agg(['count'])
```
### Filter down to only Form 461 filings
```
quarterly_ie_filings = quarterly_df[quarterly_df['FORM_TYPE'] == 'F461']
print len(quarterly_ie_filings)
```
### Check what line items are recorded for this form
```
quarterly_ie_filings.groupby(['LINE_ITEM'])['FILING_ID'].agg(['count'])
```
Here's the paper form where those five line-items can be seen in the right-hand summary section. It looks to be that line three is the total made during the current reporting period.

### Filter down to those total line items
```
quarterly_ie_totals = quarterly_ie_filings[quarterly_ie_filings['LINE_ITEM'] == '3']
print len(quarterly_ie_totals)
```
### Prepare the table for analysis
```
real_ie_totals = remove_amended_filings(quarterly_ie_totals)
```
## Download itemized independent expenditures
- Download EXPN_CD table
- Filter it down to independent expenditures
- Remove all amended filings
- Check if they all come from Form 461 filings, or if other forms can disclosure IEs
- If they all come from 461s, try to walk them back to their filers via the cover sheet or filer_filings or something
- If you can do that, reconcile them with the late filings with some kind of date trick
```
itemized_df = download_csv_to_dataframe("expn_cd")
itemized_df.info()
itemized_ies = itemized_df[itemized_df['EXPN_CODE'] == 'IND']
print len(itemized_ies)
real_itemized_ies = remove_amended_filings(itemized_ies)
real_itemized_ies.sort_values('EXPN_DATE', ascending=False).head()
```
## Download late independent expenditure filings
They are filed via [Form 496](http://calaccess.californiacivicdata.org/documentation/calaccess-forms/f496/) for all independent expenditures over $1,000 in the 90 days prior to an election. The data are recorded in the [S496 file](http://calaccess.californiacivicdata.org/documentation/calaccess-files/s496-cd/).
```
late_df = download_csv_to_dataframe("s496_cd")
```
### Convert the date field to a datetime object
```
late_df['EXP_DATE'] = pandas.to_datetime(
late_df['EXP_DATE'],
errors="coerce"
)
```
### Basic information about the file
```
late_df.info()
late_df.head()
```
### Frequency counts on the fields
```
late_df.groupby(['REC_TYPE'])['FILING_ID'].agg(['count'])
late_df.groupby(['FORM_TYPE'])['FILING_ID'].agg(['count'])
late_df.groupby(['LINE_ITEM'])['FILING_ID'].agg(['count'])
```
### Preparing the file for analysis
```
real_late_filings = remove_amended_filings(late_df)
```
### Figure out what to do with the different line numbers
Let's start by pulling the filing with the most line items.
```
real_late_filings[real_late_filings['LINE_ITEM'] == 104]
longest_late_filing = real_late_filings[real_late_filings['FILING_ID'] == 1717649]
longest_late_filing
```
That filing can be reviewed [here](http://cal-access.ss.ca.gov/PDFGen/pdfgen.prg?filingid=1717649&amendid=0). The expenditure count is 104, which matches this data. There are three late contributions at the bottom that do not appear in this table and must be recorded elsewhere.
## Summing up the money
```
real_late_filings['AMOUNT'].sum()
```
### Spending by year
```
def trim_to_year(row):
try:
return row['EXP_DATE'].year
except TypeError:
return pandas.NaN
real_late_filings["year"] = real_late_filings.apply(trim_to_year, axis=1)
late_by_year = real_late_filings.groupby('year')['AMOUNT'].agg('sum')
late_by_year = late_by_year.to_frame('sum').reset_index()
fig = plt.figure(1, figsize=(16,8))
ax1 = fig.add_subplot(211)
ax1.plot(late_by_year['year'], late_by_year['sum'])
ax1.set_title('Independent expenditure spending')
ax1.set_xlabel('Year')
ax1.set_ylabel('Sum')
```
### Spending by month
```
def trim_to_month(row):
try:
return datetime(year=row['EXP_DATE'].year, month=row['EXP_DATE'].month, day=1)
except TypeError:
return pandas.NaT
real_late_filings["month"] = real_late_filings.apply(trim_to_month, axis=1)
late_by_month = real_late_filings.groupby('month')['AMOUNT'].agg('sum')
late_by_month = late_by_month.to_frame('sum').reset_index()
fig = plt.figure(1, figsize=(16,8))
ax1 = fig.add_subplot(211)
ax1.plot(late_by_month['month'], late_by_month['sum'])
ax1.set_title('Independent expenditure spending')
ax1.set_xlabel('Month')
ax1.set_ylabel('Sum')
```
## Summing up the expenditure types
```
by_description = real_late_filings.groupby('EXPN_DSCR')['AMOUNT'].agg('sum')
by_description = by_description.to_frame('sum').reset_index()
by_description.sort_values("sum", ascending=False)
```
| github_jupyter |
```
import itertools
import numpy as np
import pandas as pd
import random
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from pathlib import Path
from matplotlib import ticker
import matplotlib.patheffects as pe
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from multiprocessing import Pool
from tqdm.auto import tqdm
from omegaconf import OmegaConf
import scipy
from scipy.spatial.distance import pdist, squareform
from skimage import exposure, transform
from skimage.draw import line_aa
from skimage.color import gray2rgb, rgb2gray
from skimage.io import imread, imsave
from skimage.transform import resize
from skimage.util import montage
from sklearn.metrics import jaccard_score
from sklearn.preprocessing import StandardScaler
from datasets import PerineuronalNetsDataset, CellsDataset, PerineuronalNetsRankDataset
from methods.points.match import match
from methods.points.metrics import detection_and_counting
from methods.points.utils import draw_points, draw_groundtruth_and_predictions
tqdm.pandas()
```
# VGG Dataset
```
common = dict(root='data/vgg-cells')
detection_kws = {
'target_': 'detection',
'target_params': {
'side': 12
}
}
density_kws = {
'target_': 'density',
'target_params': {
'mode': 'reflect',
'k_size': 51,
'sigma': 5
}
}
segmentation_kws = {
'target_': 'segmentation',
'target_params': {
'radius': 5,
'radius_ignore': 6,
'v_bal': 0.1,
'sigma_bal': 3,
'sep_width': 1,
'sigma_sep': 3,
'lambda_sep': 50
}
}
detection_dataset = CellsDataset(**detection_kws, **common)
density_dataset = CellsDataset(**density_kws, **common)
segmentation_dataset = CellsDataset(**segmentation_kws, **common)
sample_idx = 0
sample = density_dataset[sample_idx][0][:, :, 0]
detections = detection_dataset[sample_idx][0][1]
density_map = density_dataset[sample_idx][0][:, :, 1]
segmentation_map = segmentation_dataset[sample_idx][0][:, :, 1]
weights_map = segmentation_dataset[sample_idx][0][:, :, 2]
detections = np.clip(detections.astype(int), 0, sample.shape[0] - 1)
sample = matplotlib.cm.jet(sample)
density_map = gray2rgb(density_map / density_map.max())
segmentation_map = gray2rgb(segmentation_map)
sample_with_boxes = sample.copy()
for y0, x0, y1, x1 in detections:
rect = ((y0, x0, y0, x1),
(y0, x1, y1, x1),
(y1, x1, y1, x0),
(y1, x0, y0, x0))
for r0, c0, r1, c1 in rect:
rr, cc, val = line_aa(r0, c0, r1, c1)
sample_with_boxes[rr, cc, 0] = val
fig, axes = plt.subplots(1, 4, figsize=(20, 20))
axes = axes.flatten()
axes[0].imshow(sample)
axes[1].imshow(sample_with_boxes)
axes[2].imshow(density_map)
axes[3].imshow(segmentation_map)
for ax in axes:
ax.set_axis_off()
plt.imsave('figures/vgg-sample.png', sample)
plt.imsave('figures/vgg-boxes.png', sample_with_boxes)
plt.imsave('figures/vgg-density.png', density_map)
plt.imsave('figures/vgg-segmentation.png', segmentation_map)
ncells = [len(detection_dataset[i][0][1]) for i in range(len(detection_dataset)) ]
f'{sum(ncells)} cells {np.mean(ncells):.2f}$\pm${np.std(ncells):.2f} cells/image'
def collect_runs(model_name, run, csv_file):
run = Path(run)
cfg = OmegaConf.load(run / '.hydra' / 'config.yaml')
num_samples = cfg['data']['validation']['num_samples'][0]
seed = cfg['data']['validation']['split_seed']
csv_path = run / 'test_predictions' / csv_file
if not csv_path.exists():
print(f'Skipping not found: {csv_path}')
return pd.DataFrame()
data = pd.read_csv(csv_path, index_col=0)
data['model'] = model_name
data['num_samples'] = num_samples
data['split_seed'] = seed
return data
runs = {
'S-UNet': Path('runs/experiment=vgg-cells/segmentation/').glob('unet_*'),
'FRCNN': Path('runs/experiment=vgg-cells/detection/').glob('fasterrcnn_*'),
'D-CSRNet': Path('runs/experiment=vgg-cells/density/').glob('csrnet_*')
}
metrics = pd.concat([collect_runs(k, r, 'all_metrics.csv.gz') for k, v in runs.items() for r in v], ignore_index=True)
mean_metrics = metrics.groupby(['model', 'num_samples', 'split_seed', 'thr'])['count/mae'].mean()
best_configs = mean_metrics.groupby(['model', 'num_samples', 'split_seed']).idxmin()
table = mean_metrics.loc[best_configs].groupby(['model', 'num_samples']).apply(lambda x: pd.Series({'mean': x.mean(), 'std': x.std()}))
table = table.unstack(2).apply(lambda x: f'{x["mean"]:.1f} $\pm$ {x["std"]:.1f}', axis=1).unstack(1)
print(table.to_latex(escape=False))
table
```
# MBM Dataset
```
common = dict(root='data/mbm-cells')
detection_kws = {
'target_': 'detection',
'target_params': {
'side': 20
}
}
density_kws = {
'target_': 'density',
'target_params': {
'mode': 'reflect',
'k_size': 51,
'sigma': 10
}
}
segmentation_kws = {
'target_': 'segmentation',
'target_params': {
'radius': 12,
'radius_ignore': 15,
'v_bal': 0.1,
'sigma_bal': 5,
'sep_width': 1,
'sigma_sep': 4,
'lambda_sep': 50
}
}
detection_dataset = CellsDataset(**detection_kws, **common)
density_dataset = CellsDataset(**density_kws, **common)
segmentation_dataset = CellsDataset(**segmentation_kws, **common)
sample_idx = 5
# sample = density_dataset[sample_idx][0][:, :, 0]
sample = imread('data/mbm-cells/BM_GRAZ_HE_0001_02_001_cell.png')
detections = detection_dataset[sample_idx][0][1]
density_map = density_dataset[sample_idx][0][:, :, 1]
segmentation_map = segmentation_dataset[sample_idx][0][:, :, 1]
weights_map = segmentation_dataset[sample_idx][0][:, :, 2]
detections = np.clip(detections.astype(int), 0, sample.shape[0] - 1)
sw_map = np.stack((segmentation_map, weights_map, np.zeros_like(weights_map)), axis=-1)
a = np.stack((rgb2gray(sample), segmentation_map, np.zeros_like(segmentation_map)), axis=-1)
density_map = gray2rgb(density_map / density_map.max())
segmentation_map = gray2rgb(segmentation_map)
sample_with_boxes = sample.copy()
for y0, x0, y1, x1 in detections:
rect = ((y0, x0, y0, x1),
(y0, x1, y1, x1),
(y1, x1, y1, x0),
(y1, x0, y0, x0))
for r0, c0, r1, c1 in rect:
rr, cc, val = line_aa(r0, c0, r1, c1)
sample_with_boxes[rr, cc, 0] = val
fig, axes = plt.subplots(1, 4, figsize=(20, 20))
axes = axes.flatten()
axes[0].imshow(sample)
axes[1].imshow(sample_with_boxes)
axes[2].imshow(density_map)
axes[3].imshow(segmentation_map)
for ax in axes:
ax.set_axis_off()
plt.subplots_adjust(wspace=0)
plt.imsave('figures/mbm-sample.png', sample)
plt.imsave('figures/mbm-boxes.png', sample_with_boxes)
plt.imsave('figures/mbm-density.png', density_map)
plt.imsave('figures/mbm-segmentation.png', segmentation_map)
runs = {
'S-UNet': Path('runs/experiment=mbm-cells/segmentation/').glob('unet_*'),
'FRCNN': Path('runs/experiment=mbm-cells/detection/').glob('fasterrcnn_*'),
'D-CSRNet': Path('runs/experiment=mbm-cells/density/').glob('csrnet_*')
}
metrics = pd.concat([collect_runs(k, r, 'all_metrics.csv.gz') for k, v in runs.items() for r in v], ignore_index=True)
mean_metrics = metrics.groupby(['model', 'num_samples', 'split_seed', 'thr'])['count/mae'].mean()
best_configs = mean_metrics.groupby(['model', 'num_samples', 'split_seed']).idxmin()
table = mean_metrics.loc[best_configs].groupby(['model', 'num_samples']).apply(lambda x: pd.Series({'mean': x.mean(), 'std': x.std()}))
table = table.unstack(2).apply(lambda x: f'{x["mean"]:.1f} $\pm$ {x["std"]:.1f}', axis=1).unstack(1)
print(table.to_latex(escape=False))
table
```
# PNN Dataset
## Examples
```
common = dict(split='train-half1', random_offset=0, patch_size=640)
detection_kws = {
'target_': 'detection',
'target_params': {
'side': 45
}
}
density_kws = {
'target_': 'density',
'target_params': {
'mode': 'reflect',
'k_size': 151,
'sigma': 15
}
}
segmentation_kws = {
'target_': 'segmentation',
'target_params': {
'radius': 20,
'radius_ignore': 25,
'v_bal': 0.1,
'sigma_bal': 10,
'sep_width': 1,
'sigma_sep': 6,
'lambda_sep': 50
}
}
detection_dataset = PerineuronalNetsDataset(**detection_kws, **common)
density_dataset = PerineuronalNetsDataset(**density_kws, **common)
segmentation_dataset = PerineuronalNetsDataset(**segmentation_kws, **common)
random_samples = np.random.randint(0, len(detection_dataset), 10000)
cells_per_sample = [len(detection_dataset[x][0][1]) for x in tqdm(random_samples)]
best_samples = np.argsort(cells_per_sample)[::-1]
sample_idx = random_samples[best_samples[31]]
sample = density_dataset[sample_idx][0][:, :, 0]
detections = detection_dataset[sample_idx][0][1]
density_map = density_dataset[sample_idx][0][:, :, 1]
segmentation_map = segmentation_dataset[sample_idx][0][:, :, 1]
weights_map = segmentation_dataset[sample_idx][0][:, :, 2]
detections = np.clip(detections.astype(int), 0, sample.shape[0] - 1)
sw_map = np.stack((segmentation_map, weights_map, np.zeros_like(sample)), axis=-1)
sample = matplotlib.cm.viridis(sample)
density_map = gray2rgb(density_map / density_map.max())
segmentation_map = gray2rgb(segmentation_map)
sample_with_boxes = sample.copy()
for y0, x0, y1, x1 in detections:
rect = ((y0, x0, y0, x1),
(y0, x1, y1, x1),
(y1, x1, y1, x0),
(y1, x0, y0, x0))
for r0, c0, r1, c1 in rect:
rr, cc, val = line_aa(r0, c0, r1, c1)
sample_with_boxes[rr, cc, 0] = val
fig, axes = plt.subplots(1, 4, figsize=(20, 20))
axes = axes.flatten()
axes[0].imshow(sample)
axes[1].imshow(sample_with_boxes)
axes[2].imshow(density_map)
axes[3].imshow(segmentation_map)
for ax in axes:
ax.set_axis_off()
plt.imsave('figures/pnn-sample.png', sample)
plt.imsave('figures/pnn-boxes.png', sample_with_boxes)
plt.imsave('figures/pnn-density.png', density_map)
plt.imsave('figures/pnn-segmentation.png', segmentation_map)
```
## Groundtruth Properties
```
gt = pd.read_csv('data/perineuronal-nets/test/annotations.csv')
gt['agreement'] = gt.loc[:, 'AV':'VT'].sum(axis=1)
gt.groupby('imgName').X.count()
```
### Distribution of Agreement in the (Multi-Rater) Test Set
```
sns.set_theme(context='notebook', style='ticks')
data = gt.agreement.value_counts().sort_index()
_, _, autotexts = plt.pie(data.values, labels=data.index,
autopct='{:.2g}%'.format, pctdistance=0.75,
# colors=sns.color_palette('rocket', 7)
colors=sns.color_palette('rocket', as_cmap=True)(np.linspace(0, 1, 8)[1:])
)
plt.ylabel('agreement')
plt.setp(autotexts, size=12)
for t in autotexts[:4]:
t.set_color('white')
pnn_cells = PerineuronalNetsRankDataset(mode='patches')
x = enumerate(pnn_cells.annot.agreement.values)
x = sorted(x, key=lambda x: x[1])
x = itertools.groupby(x, key=lambda x: x[1])
means = []
for agreement, group in x:
samples = [i for i, _ in group]
images = [pnn_cells[i][0].astype(np.float32) / 255. for i in samples]
mean = np.mean(images, axis=0)
means.append(mean)
sorted_samples = np.sum((images * mean), axis=(1,2)).argsort()
sorted_samples = np.array(samples)[sorted_samples][::-1]
print(f'{agreement}:', sorted_samples[:12].tolist(), ',')
pnn_means = np.stack(means)
pnn_means = (pnn_means - np.min(pnn_means)) / (np.max(pnn_means) - np.min(pnn_means))
pnn_means = matplotlib.cm.viridis(pnn_means)[:,:,:,:3]
a = sorted(gt.agreement.values) + [100]
a = np.array(a).reshape((112, -1))
fig, ax = plt.subplots(figsize=(8, 5))
ax = sns.heatmap(a, vmin=0, vmax=7, square=True,
linewidths=0,
antialiased=True,
rasterized=True,
cbar=False, cbar_kws={"orientation": "horizontal", 'pad': 0.05, 'ticks': range(0, 8), 'drawedges': True})
plt.xticks([])
plt.ylabel('Rater\'s Agreement')
def find_pos(a):
pos = np.unique(a, return_counts=True)[1].cumsum()
pos = np.insert(pos, 0, 1)
pos = (pos[:-1] + pos[1:]) / 2
return pos
l_pos = find_pos(a[:, 0])
r_pos = find_pos(a[:, 1])
_ = plt.yticks(l_pos, range(1, 8), rotation=0)
_, x_limit = plt.xlim()
y_limit, _ = plt.ylim()
def shuf(l):
l = l[:]
random.shuffle(l)
return l
sample_indices = {
1: [1565, 2300, 1913, 311, 1799, 763, 72],
2: [2309, 386, 983, 56, 286, 951, 1774],
3: [198, 1874, 392, 872, 78, 390, 1103],
4: [777, 219, 1944, 1066, 217, 1115, 96],
5: [220, 2174, 945, 389, 385, 1633, 593],
6: [2218, 2058, 1436, 2212, 2034, 1433, 207],
7: [453, 7, 6, 4, 12, 644, 20]
}
pct = gt.agreement.value_counts()
pct = pct / pct.sum()
pct = pct.sort_index().values
cell_x = 1.51
nr = 1
for i, (ry, ly) in enumerate(zip(r_pos, l_pos), start=1):
cell_y = 1 - (i / len(sample_indices))
# percentage
ax.annotate(f'{pct[i - 1]:.0%}', (0.5, 1 - (ry+ly) / (2*y_limit)), xycoords='axes fraction',
color='white' if i < 4 else 'black', ha='center', va='center')
# connector
ax.annotate('', xy=(1, 1 - ry / y_limit), xycoords='axes fraction',
xytext=(cell_x, cell_y + 0.06), textcoords='axes fraction',
arrowprops=dict(arrowstyle='-', color='0.2', connectionstyle='arc,angleA=0,angleB=0,armA=-7,armB=7,rad=0'))
# mean image
imagebox = OffsetImage(pnn_means[i - 1], zoom=0.5, origin='upper')
ab = AnnotationBbox(imagebox, (cell_x, cell_y), xycoords='axes fraction', frameon=False, box_alignment=(0,0))
ax.add_artist(ab)
# samples
cell_images = [pnn_cells[j][0] for j in sample_indices[i]]
cell_images = [matplotlib.cm.viridis(c) for c in cell_images]
cell_images = np.stack(cell_images)[:,:,:,:3]
image = montage(cell_images, grid_shape=(nr, len(cell_images) / nr), padding_width=5, fill=(1, 1, 1), multichannel=True)
image = image[5:-5, ...]
imagebox = OffsetImage(image, zoom=0.5, origin='upper')
ab = AnnotationBbox(imagebox, (cell_x + 0.8, cell_y), xycoords='axes fraction', frameon=False, box_alignment=(0,0))
ax.add_artist(ab)
ax.annotate('mean', (cell_x, 1), xycoords='axes fraction')
ax.annotate('samples', (cell_x + 2, 1), xycoords='axes fraction')
plt.savefig('figures/pnn-mr-breakdown.pdf', bbox_inches='tight')
sample_id = gt.imgName.unique()[2]
img = plt.imread('data/perineuronal-nets/test/fullFrames/' + sample_id)
p2, p98 = np.percentile(img, (0.1, 99.9))
img = exposure.rescale_intensity(img, in_range=(p2, p98))
img = transform.resize(img, (1024, 1024))
img = matplotlib.cm.viridis(img)[:,:,:3]
scale_f = img.shape[0] / 2000
plt.figure(figsize=(8, 8))
plt.imshow(img)
ax = plt.gca()
ax.set_axis_off()
colors = sns.color_palette('rocket', as_cmap=True)(np.linspace(0, 1, 8)[1:])
for agreement, group in gt.set_index('imgName').loc[sample_id].groupby('agreement'):
color = colors[agreement - 1]
xs, ys = (group[['X', 'Y']].values * scale_f).astype(int).T
ax.plot(xs, ys, 'o', ms=25*scale_f, mec=color, mfc='none', mew=0.8)
plt.savefig('figures/pnn-mr-sample.pdf', bbox_inches='tight')
gt_sr = pd.read_csv('data/perineuronal-nets/train/annotations.csv')
sample_id = '034_B4_s06_C1.tif'
img = imread('data/perineuronal-nets/train/fullFrames/' + sample_id)
new_shape = (np.array(img.shape) / 10).astype(int)
scale_f = new_shape[0] / img.shape[0]
img = transform.resize(img, new_shape)
p2, p98 = np.percentile(img, (0.1, 99.9))
img = exposure.rescale_intensity(img, in_range=(p2, p98))
img = matplotlib.cm.viridis(img)[:,:,:3]
plt.figure(figsize=(14, 14))
plt.imshow(img)
ax = plt.gca()
ax.set_axis_off()
xy = gt_sr.set_index('imageName').loc[sample_id, ['X', 'Y']].values * scale_f
xs, ys = xy.astype(int).T
ax.plot(xs, ys, 'o', ms=35 * scale_f, mec='red', mfc='none', mew=0.8)
plt.savefig('figures/pnn-sr-sample.pdf', bbox_inches='tight')
```
### Agreement between Raters in the Test Set
```
raters = gt.loc[0, 'AV':'VT'].index.values
raters = np.array(raters).reshape(-1, 1)
def agree(r1, r2):
a, b = gt[r1], gt[r2]
return jaccard_score(a, b)
raters_agreement = pdist(raters, agree)
raters_agreement = squareform(raters_agreement)
mask = 1 - np.tri(len(raters), k=-1)
raters_agreement = raters_agreement[1:, :-1]
mask = mask[1:, :-1]
ylabels = [f'R{i+2}' for i in range(len(raters)-1)]
xlabels = [f'R{i+1}' for i in range(len(raters)-1)]
sns.heatmap(raters_agreement, mask=mask, annot=True, square=True,
xticklabels=xlabels, yticklabels=ylabels, cmap='viridis',
cbar_kws=dict(location='left', label='Jaccard Index'))
plt.savefig('figures/raters-agreement.pdf', bbox_inches='tight')
```
### Total Cells counted by each Rater
```
counts = gt.loc[:, 'AV':'VT'].sum(axis=0)
counts.index = [f'R{i+1}' for i in range(len(counts))]
counts = counts.to_frame(name='count')
counts = counts.reset_index().rename({'index': 'rater'}, axis=1)
ax = sns.barplot(data=counts, x='rater', y='count')
mean = counts.mean().item()
ax.axhline(mean, c='k', ls='--', lw=1.5)
ax.set_ylim(1200, 1650)
sns.despine()
```
# Stage 1: Localization/Counting Models
## Model Evaluation
```
metric_order = ('count/mare', 'count/game-3', 'pdet/f1_score')
model_order = ('S-UNet', 'FRCNN', 'D-CSRNet')
scorer_order = ('simple_regression', 'simple_classification', 'ordinal_regression', 'pairwise_balanced')
runs = {
'S-UNet': list(Path('runs/experiment=perineuronal-nets/segmentation/').glob('unet_*')),
'FRCNN' : list(Path('runs/experiment=perineuronal-nets/detection/').glob('fasterrcnn_*')),
'D-CSRNet': list(Path('runs/experiment=perineuronal-nets/density/').glob('csrnet_*')),
}
def collect(model_name, run, csv_file):
run = Path(run)
cfg = OmegaConf.load(run / '.hydra' / 'config.yaml')
patch_size = cfg['data']['validation']['patch_size']
csv_path = run / 'test_predictions' / csv_file
if not csv_path.exists():
print(f'Skipping not found: {csv_path}')
return pd.DataFrame()
data = pd.read_csv(csv_path, index_col=0)
data['model'] = model_name
data['patch_size'] = patch_size
return data
metrics = pd.concat([collect(k, r, 'all_metrics.csv.gz') for k, v in runs.items() for r in v], ignore_index=True)
predictions = pd.concat([collect(k, r, 'all_gt_preds.csv.gz') for k, v in runs.items() for r in v], ignore_index=True)
predictions['agreement'] = predictions['agreement'].fillna(0)
```
## What's the best patch size?
Show trade-off between patch size and detection/counting performance.
```
sns.set_theme(context='talk', style='ticks', font_scale=1.0)
def compare_patch_sizes_plot(data, metric, metric_label, mode, fmt='.3f', ylim=(0,1), legend_bbta=(1,1)):
data = data.rename({'patch_size': 'Patch Size'}, axis=1)
g = sns.relplot(data=data, kind='line', col='model',
x='thr', y=metric, hue='Patch Size', ci=None,
facet_kws=dict(margin_titles=True, legend_out=True),
aspect=1.2, height=4.5)
data = data.groupby(['model', 'Patch Size', 'thr']).mean()
best_points = data.groupby('model')[metric]
best_points = best_points.idxmin() if mode == 'min' else best_points.idxmax()
g.set(ylim=ylim, xlim=(0, 1))
g.set_titles(col_template="{col_name}")
g.set_axis_labels(x_var='threshold', y_var=metric_label)
for model, ax in g.axes_dict.items():
ax.grid(True, which='major')
ax.grid(True, which='minor', ls='dotted')
ax.get_xaxis().set_minor_locator(ticker.AutoMinorLocator(2))
ax.get_yaxis().set_minor_locator(ticker.AutoMinorLocator(2))
ax.xaxis.set_major_formatter('{x:g}')
best_point = data.loc[best_points[model]]
_, patch_size, thr = best_point.name
value = best_point[metric]
print(f'[{metric}] {model} ps={patch_size} thr={thr} value={value:.2f}')
ax.plot([thr], [value], 'X', c='k', ms=9, mec='k', mfc='w')
xytext = (5, -3) if mode == 'min' else (5, 3)
va='top' if mode == 'min' else 'bottom'
ax.annotate(f'{value:{fmt}}', xy=(thr, value), xytext=xytext,
textcoords='offset points', fontsize='small',
va=va, ha='left')
sns.move_legend(g, "center right", bbox_to_anchor=legend_bbta, frameon=False,
labelspacing=0.25, fontsize='small', title_fontsize='small')
return g
data = metrics[metrics.thr.between(0, 1)]
compare_patch_sizes_plot(data, 'count/mae', 'MAE', 'min', fmt='.2f', ylim=(0, 200), legend_bbta=(.85, .5)) \
.savefig('figures/pnn-mae.pdf', bbox_inches='tight')
compare_patch_sizes_plot(data, 'count/mare', 'MARE', 'min', fmt='.1%', legend_bbta=(.85, .50)) \
.savefig('figures/pnn-mare.pdf', bbox_inches='tight')
compare_patch_sizes_plot(data, 'count/game-3', 'GAME(3)', 'min', fmt='.1f', ylim=(45, 200), legend_bbta=(.85, .5)) \
.savefig('figures/pnn-game3.pdf', bbox_inches='tight')
compare_patch_sizes_plot(data, 'pdet/f1_score', r'$F_1$-score', 'max', fmt='.1%', legend_bbta=(.85, .63))\
.savefig('figures/pnn-f1-score.pdf', bbox_inches='tight')
# PR Curves
sns.set_theme(context='talk', style='ticks')
def plot_pr(data, label, color):
mean_pr = data.groupby('thr').mean().reset_index().sort_values('pdet/recall', ascending=False)
mean_recalls = mean_pr['pdet/recall'].values
mean_precisions = mean_pr['pdet/precision'].values
aps = []
for group_key, img_group in data.groupby('imgName'):
img_group = img_group.reset_index().sort_values('pdet/recall', ascending=False)
recalls = img_group['pdet/recall'].values
precisions = img_group['pdet/precision'].values
average_precision = - np.sum(np.diff(recalls) * precisions[:-1]) # sklearn's ap
aps.append(average_precision)
mean_ap = np.mean(aps)
plt.plot(mean_recalls, mean_precisions, label=f'{label} ({mean_ap:.1%})', color=color)
data = metrics.copy()
data.loc[data['pdet/recall'] == 0, 'pdet/precision'] = 1.0
grid = sns.FacetGrid(data=data, hue='patch_size', col='model', height=4, xlim=(0,1), ylim=(0,1.05), aspect=1.2)
grid.map_dataframe(plot_pr)
grid.set_xlabels('Recall')
grid.set_ylabels('Precision')
grid.set_titles(col_template="{col_name}")
f_scores = np.linspace(0.1, 0.9, num=9)
for ax in grid.axes.flatten():
ax.legend(title='Patch Size', loc='lower left', ncol=1, fontsize='x-small', title_fontsize='x-small')
for i, f_score in enumerate(f_scores):
label_it = i % 2 != 0
ls = '-' if label_it else '--'
lw = 1 if label_it else 0.8
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = ax.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2, ls=ls, lw=lw)
if label_it:
ax.annotate(r'$F_1$={0:0.1f}'.format(f_score), xy=(0.85, y[45] + 0.02), fontsize='xx-small')
grid.savefig('figures/pnn-pr-curves.pdf', bbox_inches='tight')
```
### Density-based Metrics
```
density_metrics = pd.concat([collect(k, r, 'dmap_metrics.csv.gz') for k, v in runs.items() for r in v], ignore_index=True)
density_metrics.groupby(['model', 'patch_size']).mean()
```
## How's performance on different agreement levels?
Show best counting metrics when practitioners choose different GT based on agreement.
```
# common funcs
from multiprocessing import Pool, cpu_count
def _temp_func(args):
func, name, group = args
return func(group), name
def applyParallel(dfGrouped, func):
with Pool(cpu_count()) as p:
gen = [(func, name, group) for name, group in dfGrouped]
ret_list = p.map(_temp_func, tqdm(gen))
retLst, top_index = zip(*ret_list)
return pd.concat(retLst, keys=top_index)
def _compute(x):
x = detection_and_counting(x, image_hw=(2000, 2000))
return pd.Series(x)
def drop_empty_gp(x):
empty = x.X.isna() & x.Xp.isna()
return x[~empty]
def compute_metrics_by_agreement(data, grouping, parallel=True):
is_positive = ~data.Xp.isna()
filtered = []
for i in range(1, 8):
tmp = data.copy()
tmp.loc[(tmp.agreement < i), 'X'] = None
tmp = drop_empty_gp(tmp)
tmp = tmp.assign(min_raters=i)
filtered.append(tmp)
data = pd.concat(filtered, ignore_index=True)
data = data.groupby(grouping)
if parallel:
data = applyParallel(data, _compute)
data = data.unstack()
data.index.names = grouping
return data
return data.progress_apply(_compute)
def compute_ap(data):
pr = data.sort_values('pdet/recall', ascending=False)
recalls = pr['pdet/recall'].values
precisions = pr['pdet/precision'].values
ap = - np.sum(np.diff(recalls) * precisions[:-1])
return ap
def build_map_table(data):
data = data.copy().reset_index()
model_grouper = ['model', 'patch_size']
if 'seed' in data.columns:
model_grouper.append('seed')
if 'scorer' in data.columns:
model_grouper.append('scorer')
data['thr'] = data['re_thr_quantile']
aps = data.groupby(model_grouper + ['min_raters', 'imgName']).apply(compute_ap)
mean_aps = aps.reset_index().groupby(model_grouper + ['min_raters']).mean()
mean_aps = mean_aps.rename(columns={0: 'mean_ap'})
return mean_aps
def build_metrics_table(
data,
metric=['count/mare', 'count/game-3', 'pdet/f1_score'],
best_metric=None,
mode='min',
ci=False,
return_config=False
):
metric = metric if isinstance(metric, list) else [metric]
best_metric = metric if best_metric is None else best_metric
best_metric = best_metric if isinstance(best_metric, list) else [best_metric] * len(metric)
mode = mode if isinstance(mode, list) else [mode] * len(metric)
assert len(metric) == len(best_metric), 'best_metric must be 1 or of the same size of metric'
assert len(metric) == len(mode), 'mode must be 1 or of the same size of metric'
data = data.copy().reset_index()
model_grouper = ['model', 'patch_size']
if 'seed' in data.columns:
model_grouper.append('seed')
if 'scorer' in data.columns:
model_grouper.append('scorer')
data['thr'] = data['re_thr_quantile']
grouped = data.groupby(model_grouper + ['thr', 'min_raters'])
m, s = grouped.mean(), grouped.std()
tables = []
configs = []
for metr, best_metr, mod in zip(metric, best_metric, mode):
best_points = m # if not ci else (m + s) if mod == 'min' else (m - s)
best_points = best_points.groupby(model_grouper + ['min_raters'])[best_metr]
best_points = best_points.idxmin() if mod == 'min' else best_points.idxmax()
table = m.loc[best_points, [metr]]
if ci:
table = table.combine(s.loc[best_points, [metr]], lambda x,y: x.combine(y, lambda w,z: (w,z)))
table = table.reset_index().melt(id_vars=model_grouper + ['thr', 'min_raters'], var_name='metric')
tables.append(table)
configs.append(best_points)
table = pd.concat(tables)
if return_config:
return table, configs
return table
# let's compute metrics
p1_metrics = compute_metrics_by_agreement(predictions, ['model', 'patch_size', 'thr', 'imgName', 'min_raters'])
def cimax(args):
imax = args.map(lambda x: x[0] - x[1]*0).idxmax()
return args.loc[imax]
def cimin(args):
imin = args.map(lambda x: x[0] + x[1]*0).idxmin()
return args.loc[imin]
metr = ['count/mae', 'count/mare', 'count/game-3', 'pdet/f1_score']
ci = True
modes = ['min', 'min', 'min', 'max']
aggr = [cimin, cimin, cimin, cimax] if ci else modes
p1_table, configs = build_metrics_table(p1_metrics, metric=metr, mode=modes, ci=ci, return_config=True)
prec = 1
xfm = {
'count/mae': lambda x: f'{x[0]:.1f};{x[1]:.1f}',
'count/mare': lambda x: f'{100*x[0]:.{prec}f};{100*x[1]:.{prec}f}',
'count/game-3': lambda x: f'{x[0]:.1f};{x[1]:.1f}',
'pdet/f1_score': lambda x: f'{100*x[0]:.{prec}f};{100*x[1]:.{prec}f}',
} if ci else {
'count/mae': '{:.1f}'.format,
'count/mare': lambda x: f'{100*x:.1f}',
'count/game-3': '{:.1f}'.format,
'pdet/f1_score': lambda x: f'{100*x:.1f}', # '{:.0%}'.format,
}
aggr_per_metric = {k: v for k, v in zip(metr, aggr)}
def take_best(a):
return a['value'].aggregate(aggr_per_metric[a.name[-1]])
p1_table = p1_table.groupby(['model', 'min_raters', 'metric']).apply(take_best).rename('value')
p1_table = p1_table.unstack('metric').transform(xfm).rename_axis('metric', axis=1).stack().rename('value')
p1_table = p1_table.reset_index().pivot(index=['metric', 'model'], columns='min_raters', values='value')
p1_table = p1_table.reindex(metr, level=0).reindex(model_order, level=1)
print(p1_table.to_latex(escape=False, multirow=True))
display(p1_table)
p1_table = p1_table[[1,4,5,7]]
print(p1_table.to_latex(escape=False, multirow=True))
display(p1_table)
def find_best_worst_image(x, metric, mode):
x = x.reset_index()
m = x[metric]
return pd.Series({
'best' : x.loc[m.idxmin() if mode == 'min' else m.idxmax(), 'imgName'],
'worst': x.loc[m.idxmax() if mode == 'min' else m.idxmin(), 'imgName']
})
best_metric = 'pdet/f1_score'
best_metric_configs = configs[-1]
mode = 'max'
best_worst_images = p1_metrics \
.reset_index().set_index(['model', 'patch_size', 'thr', 'min_raters']) \
.loc[best_metric_configs] \
.groupby(['model', 'patch_size', 'thr', 'min_raters']).apply(find_best_worst_image, best_metric, mode) \
.mode()
tmp = best_metric_configs.reset_index()
selector = (((tmp.model == 'S-UNet') & (tmp.patch_size == 320)) |
((tmp.model == 'FRCNN') & (tmp.patch_size == 640)) |
((tmp.model == 'D-CSRNet') & (tmp.patch_size == 640)) )
selector = selector & tmp.min_raters.isin([1, 7])
best_configs = tmp[selector][best_metric].values
best_configs
indexed_preds = predictions.set_index(['model', 'patch_size', 'thr', 'imgName'])
for img in ('best', 'worst'):
imgName = best_worst_images.loc[0, img]
image = imread('data/perineuronal-nets/test/fullFrames/' + imgName)
image = matplotlib.cm.viridis(image)[:,:,:3]
image = resize(image, (500, 500))
image = (255 * image).astype(np.uint8)
image = image[:250, 125:375, :]
imsave(f'figures/{img}_clean.png', image)
for model, patch_size, thr, min_raters in best_configs:
preds = indexed_preds.loc[(model, patch_size, thr, imgName)].reset_index().copy()
preds.loc[(preds.agreement < min_raters), ['X', 'Y']] = None
preds = drop_empty_gp(preds)
preds.loc[:, ['X', 'Y']] /= 4
preds.loc[:, ['Xp', 'Yp']] /= 4
sel = ( (preds.X.isna() | (preds.X.between(125, 375) & preds.Y.between(0, 250))) |
(preds.Xp.isna() | (preds.Xp.between(125, 375) & preds.Yp.between(0, 250))) )
preds = preds[sel]
preds = drop_empty_gp(preds)
preds.loc[:, 'X'] -= 125
preds.loc[:, 'Xp'] -= 125
drawn = draw_groundtruth_and_predictions(image, preds, radius=5)
fname = f'figures/{img}_img_{model.lower()}_{patch_size}_raters_{min_raters}.png'
imsave(fname, drawn)
gt_only_img = f'figures/{img}_gt_raters_{min_raters}.png'
if not Path(gt_only_img).exists():
gt_sel = ( (predictions.imgName == imgName)
& (~predictions.agreement.isna())
& (predictions.agreement >= min_raters)
)
gt_yx = predictions[gt_sel][['Y', 'X']].drop_duplicates().dropna()
gt_yx.loc[:, 'X'] = (gt_yx.X / 4) - 125
gt_yx.loc[:, 'Y'] = (gt_yx.Y / 4)
gt_yx = gt_yx[gt_yx.X.between(0, 250) & gt_yx.Y.between(0, 250)].values
gt_only = draw_points(image, gt_yx, radius=5, marker='square', color=[255,255,0]) # YELLOW
imsave(gt_only_img, gt_only)
```
## How much does rescoring (stage 2) increase performance?
Compare counting and detection metrics of stage-1 only models and stage-2 refinement.
```
runs_score_path = Path('runs_score')
runs_score = {
#'AR': runs_score_path.glob('method=simple_regression,seed=*'),
#'AC': runs_score_path.glob('method=simple_classification,seed=*'),
#'OR': runs_score_path.glob('method=ordinal_regression,seed=*'),
#'RL': runs_score_path.glob('method=pairwise_balanced,seed=*'),
'Agreement Regression': runs_score_path.glob('method=simple_regression,seed=*'),
'Agreement Classification': runs_score_path.glob('method=simple_classification,seed=*'),
'Ordinal Regression': runs_score_path.glob('method=ordinal_regression,seed=*'),
'Rank Learning': runs_score_path.glob('method=pairwise_balanced,seed=*'),
}
def collect_scores(model_name, run):
run = Path(run)
csv_path = run / 'test_predictions' / 'all_gt_preds.csv.gz'
data = pd.read_csv(csv_path, index_col=0)
data['model'] = model_name
data['seed'] = int(run.name.split('=')[-1])
return data
score_data = [collect_scores(k, run) for k, runs in runs_score.items() for run in runs]
score_data = pd.concat(score_data, ignore_index=True)
test_images = score_data.groupby('seed').imgName.unique().to_dict()
# best configs for maximum recall
def max_recall(data):
data = data.sort_values(['pdet/recall', 'pdet/precision'], ascending=[False, False])
return data.head(1).index.values
p1_metrics.xs(1, level='min_raters').groupby(['model', 'patch_size', 'thr']).mean().groupby('model').apply(max_recall)
```
### Samples per Scorer
```
dataset = PerineuronalNetsRankDataset(mode='patches')
sns.set_theme(context='notebook', style='ticks', font_scale=1)
p2i = dataset.annot.reset_index().set_index(['imgName','X','Y'])
so = ('Pair-wise Regression', 'Ordinal Regression', 'Agreement Classification', 'Agreement Regression')
sample_idx = rank_data.groupby(['model', 'agreement'])\
.apply(lambda x: x.nlargest(10, 'score')).droplevel(-1)\
.apply(lambda x: p2i.loc[tuple(x[['imgName', 'X', 'Y']].values), 'index'], axis=1)\
nr=1
fig, axes = plt.subplots(7, len(so), figsize=(17,4))
for i, scorer in enumerate(so):
axes[0, i].set_title(scorer)
for j, agreement in enumerate(range(7, 0, -1)):
samples = sample_idx.loc[(scorer, agreement)]
cell_images = [dataset[i][0] for i in samples]
cell_images = [matplotlib.cm.viridis(c) for c in cell_images]
cell_images = np.stack(cell_images)[:,:,:,:3]
image = montage(cell_images, grid_shape=(nr, len(cell_images) / nr), padding_width=5, fill=(1, 1, 1), multichannel=True)
axes[j, i].imshow(image)
axes[j, i].set_axis_off()
for j, agreement in enumerate(range(7, 0, -1)):
axes[j, 0].set_ylabel(str(agreement))
plt.subplots_adjust(wspace=0.1, hspace=0)
```
### Stage-1 Only Metrics
```
# get best patch_size per method, all thresholds
tmp = p1_metrics.reset_index()
selector = (((tmp.model == 'S-UNet') & (tmp.patch_size == 320)) |
((tmp.model == 'FRCNN') & (tmp.patch_size == 640)) |
((tmp.model == 'D-CSRNet') & (tmp.patch_size == 640)) )
tmp = tmp[selector]
# keep only test set
tmp = pd.concat([tmp[tmp.imgName.isin(images)].assign(seed=seed) for seed, images in test_images.items()], ignore_index=True)
p1_test_metrics = tmp.set_index(p1_metrics.index.names + ['seed'])
p1t_table = build_metrics_table(p1_test_metrics, metric=metr, mode=modes)
p1t_table = p1t_table.groupby(['model', 'patch_size', 'min_raters', 'metric']).value.aggregate(['mean', 'std'])
p1t_table = p1t_table.unstack('metric')
pct_f = lambda x: f'{100*x:.1f}'
flo_f = '{:.1f}'.format
p1t_table = p1t_table.transform({
('mean', 'count/mae' ): flo_f,
('std' , 'count/mae' ): flo_f,
('mean', 'count/mare' ): pct_f,
('std' , 'count/mare' ): pct_f,
('mean', 'count/game-3' ): flo_f,
('std' , 'count/game-3' ): flo_f,
('mean', 'pdet/f1_score'): pct_f,
('std' , 'pdet/f1_score'): pct_f,
})
p1t_table = p1t_table['mean'] + ' $\pm$ ' + p1t_table['std']
p1t_table = p1t_table.rename_axis('metric', axis=1).stack().rename('value').reset_index()
p1t_table = p1t_table.pivot(index=['metric', 'model'], columns='min_raters', values='value')
p1t_table = p1t_table.reindex(metr, level=0).reindex(model_order, level=1)
p1t_table
display(p1_table, p1t_table)
p1_map_table = build_map_table(p1_metrics)\
.unstack('min_raters')\
.applymap(lambda x: f'{100*x:.1f}')
p1t_map_table = build_map_table(p1_test_metrics)\
.reset_index()\
.groupby(['model', 'patch_size', 'min_raters'])\
.mean_ap.aggregate(['mean', 'std'])\
.applymap(lambda x: f'{100*x:.1f}')
p1t_map_table = p1t_map_table['mean'] + ' $\pm$ ' + p1t_map_table['std']
p1t_map_table = p1t_map_table\
.rename('value')\
.reset_index()\
.pivot(index=['model', 'patch_size'], columns='min_raters', values='value')
display(p1_map_table, p1t_map_table)
```
### Score vs Agreement Correlation
```
# get best config per model, maximum recall
selector = (((predictions.model == 'S-UNet') & (predictions.patch_size == 320) & (predictions.thr == 0.1)) |
((predictions.model == 'FRCNN') & (predictions.patch_size == 640) & (predictions.thr == 0.0)) |
((predictions.model == 'D-CSRNet') & (predictions.patch_size == 640) & (predictions.thr == 0.0)))
# keep only test sets
keep = np.unique(np.concatenate(list(test_images.values()))).tolist()
selector = selector & predictions.imgName.isin(keep)
p1_data = predictions[selector].copy()
p1_data['agreement'] = p1_data['agreement'].fillna(0)
p1_data['seed'] = 23
p2_data = score_data.copy()
p2_data['patch_size'] = -1
rdata = pd.concat([p1_data, p2_data], ignore_index=True)
def normalize_scores(data):
data['score'] = StandardScaler().fit_transform(data['score'].values.reshape(-1, 1)) # * 0.5 + 0.5
return data
rdata = rdata.groupby(['model', 'patch_size', 'seed']).apply(normalize_scores)
rdata
sns.set_theme(context='talk', style='ticks', font_scale=1.0)
plot_data = rdata[~rdata.score.isna() & (rdata.agreement > 0)].copy()
plot_data['agreement'] = plot_data.agreement.astype(int)
plot_data = plot_data[['score', 'agreement', 'model']]
order = [
'S-UNet',
'FRCNN',
'D-CSRNet',
'Agreement Regression',
'Agreement Classification',
'Ordinal Regression',
'Rank Learning',
#'AR',
#'AC',
#'OR',
#'RL',
]
fig, ax = plt.subplots(figsize=(16, 6))
width = 0.8
sns.boxenplot(data=plot_data, y='score', x='model', hue='agreement', order=order, palette='rocket', ax=ax, width=width, showfliers=False)
ax.set_ylim([-3.5, 2.5])
ax.set_yticks(range(-3, 3))
ax.set_yticklabels(range(-3, 3))
ax.axhline(xmax=.95, c='k', zorder=-10, lw=1.5)
def corr_coeff(data, **kws):
sel = (~data.score.isna()) & (~data.agreement.isna())
x = data.loc[sel, 'score']
y = data.loc[sel, 'agreement']
r, p = scipy.stats.pearsonr(x, y)
return r
def lin_fit(data, **kws):
sel = (~data.score.isna()) & (~data.agreement.isna())
p = []
grouped = data[sel].groupby('agreement')
min_num = grouped.model.count().min()
for _ in range(50):
y, x = grouped.sample(min_num)[['score', 'agreement']].values.T
z = np.polyfit(x, y, 1)
p.append(z)
p = np.mean(p, axis=0)
p = np.poly1d(p)
return p
grouped = plot_data.groupby('model')
corrs = grouped.apply(corr_coeff)
linfits = grouped.apply(lin_fit)
display(corrs)
labels = [l.get_text() for l in ax.get_xticklabels()]
labels = ['{}\n$r$={:.2f}'.format(l.replace(' ', '\n'), corrs[l]) for l in labels]
ax.set_xticklabels(labels)
ax.set_xlabel(None)
ax.set_ylabel('z-score')
ax.grid(which='major', axis='y', ls='-', lw=.75, zorder=-10)
ax.tick_params(axis='x', color='white', labelbottom=False, labeltop=True)
offsets = (0.18, 0.18, 0.1, 0.1, 0.1, 0.1, 0.1)
for label, offset in zip(ax.get_xticklabels(), offsets):
label.set_y(label.get_position()[1] - offset)
handles, labels = ax.get_legend_handles_labels()
legend_order = (1, 2, 3, 4, 5, 6, 7)
handles = [handles[i-1] for i in legend_order]
labels = [labels[i-1] for i in legend_order]
ax.legend(handles, labels, title='agreement',
ncol=7, loc='lower right', bbox_to_anchor=(1, 0), # bbox_to_anchor=(.12,.12,.77,1),
fontsize='x-small', title_fontsize='x-small',
labelspacing=0.2, columnspacing=1, framealpha=1, fancybox=False)
for i, line in enumerate(linfits[order]):
x = [i - 4 * width / 7, i + 4 * width / 7]
y = line([0, 8])
ax.plot(x, y, c='w', ls='--', path_effects=[pe.Stroke(linewidth=4, foreground='k'), pe.Normal()])
sns.despine(bottom=True)
plt.savefig('figures/score-vs-agreement.pdf', bbox_inches='tight')
sns.set_theme(context='notebook', style='ticks', font_scale=1.5)
plot_data = rdata.fillna({'agreement': 0, 'score': -100000}).groupby(['model', 'X', 'Y', 'agreement']).score.mean().reset_index()
sorted_samples = plot_data.groupby('model')\
.apply(lambda x: \
x.sort_values(['score', 'Y', 'X'], ascending=[False, True, True]).agreement)
sorted_samples = sorted_samples.droplevel(-1).reset_index()
sorted_samples['model'] = sorted_samples['model'].str.replace(' ', '\n')
order = [
'S-UNet',
'FRCNN',
'D-CSRNet',
'Agreement\nRegression',
'Agreement\nClassification',
'Ordinal\nRegression',
'Rank\nLearning',
]
def heatmap_plot(data, color, **kws):
data = data.agreement.values
rows = 32
pad = (-data.size) % rows
mask = np.zeros_like(data)
mask = np.pad(mask, (0, pad), constant_values=1).reshape(-1, rows)
data = np.pad(data, (0, pad), constant_values=100).reshape(-1, rows)
sns.heatmap(ax=plt.gca(), data=data, mask=mask, **kws)
g = sns.FacetGrid(data=sorted_samples, col='model', aspect=.45, height=5, col_order=order)
cbar_ax = g.fig.add_axes([.99, .2, .01, .60]) # create a colorbar axes
g = g.map_dataframe(heatmap_plot, vmin=0, vmax=7, square=True, antialiased=True, rasterized=True,
cbar_ax=cbar_ax, cbar_kws=dict(
ticks=range(8),
ticklocation='right', orientation='vertical',
label='agreement',
))
for ax in g.axes.flatten():
ax.axis('off')
g.set_titles(col_template="{col_name}")
g.tight_layout()
g.fig.subplots_adjust(wspace=.05)#, hspace=0.05)
g.savefig('figures/score-gradient.pdf', bbox_inches='tight')
```
### Stage-2 Metrics
```
rescored_runs = {
'S-UNet': [
('runs/experiment=perineuronal-nets/segmentation/unet_320', 0.1),
],
'FRCNN' : [
('runs/experiment=perineuronal-nets/detection/fasterrcnn_640', 0.00),
],
'D-CSRNet': [
('runs/experiment=perineuronal-nets/density/csrnet_640', 0.00),
],
}
def collect_rescored(model_name, run, thr):
run = Path(run)
cfg = OmegaConf.load(run / '.hydra' / 'config.yaml')
patch_size = cfg['data']['validation']['patch_size']
preds = []
csv_paths = (run / 'test_predictions').glob('all_gt_preds_rescored_*seed*_imgsplit.csv.gz')
for csv_path in csv_paths:
method_and_seed = csv_path.name[len('all_gt_preds_rescored_'):-len('_imgsplit.csv.gz')]
rescore_method, seed = method_and_seed.split('-')
seed = int(seed[len('seed'):])
data = pd.read_csv(csv_path)
data = data[(data.thr == thr) & (data.imgName.isin(test_images[seed]))]
data['model'] = model_name
data['patch_size'] = patch_size
data['scorer'] = rescore_method
data['seed'] = seed
preds.append(data)
return pd.concat(preds, ignore_index=True)
rescored_predictions = pd.concat([collect_rescored(k, r, t) for k, v in rescored_runs.items() for r, t in v], ignore_index=True)
rescored_predictions['agreement'] = rescored_predictions.agreement.fillna(0)
def apply_percentile_thresholds(gp):
quantiles = np.linspace(0, 1, 201)
if gp.scorer.iloc[0] == 'no_rescore':
re_thrs = quantiles.tolist()
else:
re_thrs = gp.rescore.quantile(quantiles).tolist()
quantiles = quantiles.tolist()
quantiles.append(2.)
re_thrs.append(re_thrs[-1] + 1)
all_thresholded = []
for re_thr, q in zip(re_thrs, quantiles):
thresholded = gp.copy()
thresholded.loc[(gp.rescore < re_thr) | gp.rescore.isna(), 'Xp'] = None
thresholded = thresholded[~(thresholded.X.isna() & thresholded.Xp.isna())]
thresholded['re_thr'] = re_thr
thresholded['re_thr_quantile'] = q
all_thresholded.append(thresholded)
return pd.concat(all_thresholded, ignore_index=True)
rescored_predictions = rescored_predictions.groupby(['patch_size', 'model', 'seed', 'thr', 'scorer'])\
.progress_apply(apply_percentile_thresholds)\
.reset_index(drop=True)
p2_metrics = compute_metrics_by_agreement(
rescored_predictions,
['model', 'patch_size', 'scorer', 're_thr_quantile', 'imgName', 'min_raters', 'seed']
)
p1t_mean_ap = build_map_table(p1_test_metrics)
p2_mean_ap = build_map_table(p2_metrics)
#display(p1t_mean_ap, p2_mean_ap)
tmp_p1 = pd.concat({'-': p1t_mean_ap}, names=['scorer']).reset_index().set_index(p2_mean_ap.index.names)
combined = pd.concat((p2_mean_ap, tmp_p1))\
.groupby(['model', 'patch_size', 'scorer', 'min_raters']).mean()
diff = combined - combined.xs('-', level=2)
def fmt(absolute, difference):
return f'{absolute:.2f} ({difference:.2f})'
def styling(x):
diff = float(x.split(' ')[1].strip('()'))
color = '#ADFFAD' if diff > 0 else '#ffadad'if diff < 0 else 'none'
return f'background-color: {color}'
combined.combine(diff, lambda x, y: x.combine(y, fmt)) \
.reindex(model_order, level=0).reindex(('-',) + scorer_order, level=2) \
.unstack('min_raters') \
.style.applymap(styling)
metr = ['count/mae', 'count/mare', 'count/game-3', 'pdet/f1_score']
modes = ['min', 'min', 'min', 'max']
# modes = ['min', 'min', 'max']
p1t_table = build_metrics_table(p1_test_metrics, metric=metr, mode=modes).assign(scorer='-')
p2_table = build_metrics_table(p2_metrics, metric=metr, mode=modes)
#p1t_table = pd.concat({'-': p1t_table}, names=['scorer']).reset_index().set_index(p2_table.index.names)
combined = pd.concat((p1t_table, p2_table), ignore_index=True)\
.groupby(['model', 'patch_size', 'scorer', 'min_raters', 'metric']).value.mean().rename('value')\
.reset_index().set_index(['metric', 'model', 'patch_size', 'scorer', 'min_raters'])
#combined
diff = combined - combined.xs('-', level='scorer')
# display(combined, combined.xs('-', level='scorer'))
def fmt(absolute, difference):
return f'{absolute:.2f} ({difference:.2f})'
def styling_up(x):
diff = float(x.split(' ')[1].strip('()'))
color = '#ADFFAD' if diff > 0 else '#ffadad'if diff < 0 else 'none'
return f'background-color: {color}'
def styling_down(x):
diff = float(x.split(' ')[1].strip('()'))
color = '#ADFFAD' if diff < 0 else '#ffadad'if diff > 0 else 'none'
return f'background-color: {color}'
styles = {
'count/mae': styling_down,
'count/mare': styling_down,
'count/game-3': styling_down,
'pdet/f1_score': styling_up
}
def styling(x):
style_func = styles[x.name[0]]
return [style_func(i) for i in x.values]
table = combined.combine(diff, lambda x, y: x.combine(y, fmt)) \
.reindex(model_order, level='model')\
.reindex(('-',) + scorer_order, level='scorer') \
.unstack('min_raters')\
.style.apply(styling, axis=1)
display(table)
def latex_fmt(a, d):
return f'{a:.2f}\diff{{{d:.2f}}}'
table = combined.combine(diff, lambda x, y: x.combine(y, latex_fmt))\
.reindex(model_order, level='model')\
.reindex(('-',) + scorer_order, level='scorer') \
.unstack('min_raters')\
.droplevel('patch_size', axis=0)\
.droplevel(0, axis=1)\
.loc['count/mae', [1, 4, 5,7]]\
.rename({
'-': '',
'simple_regression': 'AR', #'Agreement Regression',
'simple_classification': 'AC', #'Agreement Classification',
'ordinal_regression': 'OR', #'Ordinal Regression',
'pairwise_balanced': 'RL', #'Rank Learning',
}, axis=0, level='scorer')
table = table.set_index(table.index.map(lambda x: x[0] + (' + ' + x[1] if x[1] else '')))
print(table.to_latex(escape=False))
table
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import seaborn as sns
from os.path import join
plt.style.use(["seaborn", "thesis"])
plt.rc("figure", figsize=(8,4))
model_path = "../../thesis/models/DescriptorHomo/HH/"
```
# Molecules
```
from SCFInitialGuess.utilities.dataset import ScreenedData
target = "P"
basis = "6-311++g**"
data = ScreenedData(r_max=10)
data.include(data_path = "../../thesis/dataset/MethanT/", postfix = "MethanT", target=target)
from SCFInitialGuess.utilities.dataset import ScreenedData
target = "P"
data = ScreenedData(r_max=10)
data.include(data_path = "../../thesis/dataset/MethanT/", postfix = "MethanT", target=target)
data.include(data_path = "../../thesis/dataset/MethanT2/", postfix = "MethanT2", target=target)
data.include(data_path = "../../thesis/dataset/MethanT3/", postfix = "MethanT3", target=target)
data.include(data_path = "../../thesis/dataset/MethanT4/", postfix = "MethanT4", target=target)
data.include(data_path = "../../thesis/dataset/EthanT/", postfix = "EthanT", target=target)
data.include(data_path = "../../thesis/dataset/EthanT2/", postfix = "EthanT2", target=target)
data.include(data_path = "../../thesis/dataset/EthanT3/", postfix = "EthanT3", target=target)
data.include(data_path = "../../thesis/dataset/EthanT4/", postfix = "EthanT4", target=target)
data.include(data_path = "../../thesis/dataset/EthanT5/", postfix = "EthanT5", target=target)
data.include(data_path = "../../thesis/dataset/EthanT6/", postfix = "EthanT6", target=target)
data.include(data_path = "../../thesis/dataset/EthenT/", postfix = "EthenT", target=target)
data.include(data_path = "../../thesis/dataset/EthenT2/", postfix = "EthenT2", target=target)
data.include(data_path = "../../thesis/dataset/EthenT3/", postfix = "EthenT3", target=target)
data.include(data_path = "../../thesis/dataset/EthenT4/", postfix = "EthenT4", target=target)
data.include(data_path = "../../thesis/dataset/EthenT5/", postfix = "EthenT5", target=target)
data.include(data_path = "../../thesis/dataset/EthenT6/", postfix = "EthenT6", target=target)
data.include(data_path = "../../thesis/dataset/EthinT/", postfix = "EthinT", target=target)
data.include(data_path = "../../thesis/dataset/EthinT2/", postfix = "EthinT2", target=target)
data.include(data_path = "../../thesis/dataset/EthinT3/", postfix = "EthinT3", target=target)
#data.include(data_path = "../../dataset/QM9/", postfix = "QM9-300")
```
# Descriptor
```
from SCFInitialGuess.descriptors.high_level import AtomicNumberWeighted
from SCFInitialGuess.descriptors.cutoffs import BehlerCutoff1
from SCFInitialGuess.descriptors.models import RADIAL_GAUSSIAN_MODELS, make_uniform
from SCFInitialGuess.descriptors.coordinate_descriptors import \
Gaussians, SPHAngularDescriptor
import pickle
model = make_uniform(25, 5, eta_max=60, eta_min=20)
descriptor = AtomicNumberWeighted(
Gaussians(*model),
SPHAngularDescriptor(3),
BehlerCutoff1(5)
)
pickle.dump(descriptor, open(model_path + "descriptor.dump", "wb"))
descriptor.radial_descriptor.number_of_descriptors, descriptor.angular_descriptor.number_of_descriptors, descriptor.number_of_descriptors
```
# Package Dataset
```
from SCFInitialGuess.utilities.dataset import make_block_dataset, extract_HOMO_block_dataset_pairs
dataset = make_block_dataset(
descriptor,
data.molecules,
data.T,
"H",
extract_HOMO_block_dataset_pairs
)
np.save(model_path + "normalisation.npy", (dataset.x_mean, dataset.x_std))
len(dataset.training[0]), len(dataset.validation[0]), len(dataset.testing[0]),
from SCFInitialGuess.utilities.constants import number_of_basis_functions as N_BASIS
species = "H"
dim = N_BASIS[basis][species]
dim_triu = dim * (dim + 1) // 2
```
# NN Utils
```
#keras.backend.clear_session()
#activation = "elu"
#learning_rate = 1e-5
intializer = keras.initializers.TruncatedNormal(mean=0.0, stddev=0.01)
def make_model(
structure,
input_dim,
output_dim,
activation="elu",
learning_rate=1e-3
):
model = keras.Sequential()
# input layer
model.add(keras.layers.Dense(
structure[0],
activation=activation,
input_dim=input_dim,
kernel_initializer=intializer
))
for layer in structure[1:]:
model.add(keras.layers.Dense(
layer,
activation=activation,
kernel_initializer=intializer,
#bias_initializer='zeros',
kernel_regularizer=keras.regularizers.l2(5e-3)
))
#output
model.add(keras.layers.Dense(output_dim))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss='MSE',
metrics=['mse']
)
return model
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_mean_squared_error",
min_delta=1e-8,
patience=20,
verbose=1
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='val_mean_squared_error',
factor=0.1,
patience=3,
verbose=1,
mode='auto',
min_delta=1e-6,
cooldown=2,
min_lr=1e-10
)
epochs = 1000
def train_model(model, dataset, filepath=None, learning_rate=1e-4, log_dir=None):
if not log_dir is None:
tensorboard = keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
batch_size=32,
#update_freq='epoch'
)
if not filepath is None:
checkpoint = keras.callbacks.ModelCheckpoint(
filepath,
monitor='val_mean_squared_error',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1
)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
train, validation = [], []
while True:
keras.backend.set_value(model.optimizer.lr, learning_rate)
history = model.fit(
x = dataset.training[0],
y = dataset.training[1],
epochs=epochs,
shuffle=True,
validation_data=dataset.validation,
verbose=1,
callbacks=[
early_stopping,
reduce_lr,
checkpoint,
#tensorboard
]
)
#error.append(model.evaluate(
# dataset.testing[0],
# dataset.testing[1],
# verbose=1
#)[1])
return error
```
# Training
```
dataset.training[0].shape, dataset.training[1].shape
descriptor.number_of_descriptors, dim**2
structure = [100, 70]
keras.backend.clear_session()
model = make_model(
structure=structure,
input_dim=descriptor.number_of_descriptors * 2,
output_dim=dim**2,
)
model.summary()
#i+=1
train_model(
model,
dataset,
model_path + "model.h5",
learning_rate=1e-3,
#log_dir=None#"./logs/H/" + name + "_" + "x".join(list(map(str, structure))) + "_" + str(i)
)
```
| github_jupyter |
# 使用GPU来计算
【注意】运行本教程需要GPU。没有GPU的同学可以大致理解下内容,至少是`context`这个概念,因为之后我们也会用到。但没有GPU不会影响运行之后的大部分教程(好吧,还是有点点,可能运行会稍微慢点)。
前面的教程里我们一直在使用CPU来计算,因为绝大部分的计算设备都有CPU。但CPU的设计目的是处理通用的计算,例如打开浏览器和运行Jupyter,它一般只有少数的一块区域复杂数值计算,例如`nd.dot(A, B)`。对于复杂的神经网络和大规模的数据来说,单块CPU可能不够给力。
常用的解决办法是要么使用多台机器来协同计算,要么使用数值计算更加强劲的硬件,或者两者一起使用。本教程关注使用单块Nvidia GPU来加速计算,更多的选项例如多GPU和多机器计算则留到后面。
首先需要确保至少有一块Nvidia显卡已经安装好了,然后下载安装显卡驱动和[CUDA](https://developer.nvidia.com/cuda-downloads)(推荐下载8.0,CUDA自带了驱动)。完成后应该可以通过`nvidia-smi`查看显卡信息了。(Windows用户需要设一下PATH:`set PATH=C:\Program Files\NVIDIA Corporation\NVSMI;%PATH%`)。
```
!nvidia-smi
```
接下来要要确认正确安装了的`mxnet`的GPU版本。具体来说是卸载了`mxnet`(`pip uninstall mxnet`),然后根据CUDA版本安装`mxnet-cu75`或者`mxnet-cu80`(例如`pip install --pre mxnet-cu80`)。
使用pip来确认下:
```
import pip
for pkg in ['mxnet', 'mxnet-cu75', 'mxnet-cu80']:
pip.main(['show', pkg])
```
## Context
MXNet使用Context来指定使用哪个设备来存储和计算。默认会将数据开在主内存,然后利用CPU来计算,这个由`mx.cpu()`来表示。GPU则由`mx.gpu()`来表示。注意`mx.cpu()`表示所有的物理CPU和内存,意味着计算上会尽量使用多有的CPU核。但`mx.gpu()`只代表一块显卡和其对应的显卡内存。如果有多块GPU,我们用`mx.gpu(i)`来表示第*i*块GPU(*i*从0开始)。
```
import mxnet as mx
[mx.cpu(), mx.gpu(), mx.gpu(1)]
```
## NDArray的GPU计算
每个NDArray都有一个`context`属性来表示它存在哪个设备上,默认会是`cpu`。这是为什么前面每次我们打印NDArray的时候都会看到`@cpu(0)`这个标识。
```
from mxnet import nd
x = nd.array([1,2,3])
x.context
```
### GPU上创建内存
我们可以在创建的时候指定创建在哪个设备上(如果GPU不能用或者没有装MXNet GPU版本,这里会有error):
```
a = nd.array([1,2,3], ctx=mx.gpu())
b = nd.zeros((3,2), ctx=mx.gpu())
c = nd.random.uniform(shape=(2,3), ctx=mx.gpu())
(a,b,c)
```
尝试将内存开到另外一块GPU上。如果不存在会报错。当然,如果你有大于10块GPU,那么下面代码会顺利执行。
```
import sys
try:
nd.array([1,2,3], ctx=mx.gpu(10))
except mx.MXNetError as err:
sys.stderr.write(str(err))
```
我们可以通过`copyto`和`as_in_context`来在设备直接传输数据。
```
y = x.copyto(mx.gpu())
z = x.as_in_context(mx.gpu())
(y, z)
```
这两个函数的主要区别是,如果源和目标的context一致,`as_in_context`不复制,而`copyto`总是会新建内存:
```
yy = y.as_in_context(mx.gpu())
zz = z.copyto(mx.gpu())
(yy is y, zz is z)
```
### GPU上的计算
计算会在数据的`context`上执行。所以为了使用GPU,我们只需要事先将数据放在上面就行了。结果会自动保存在对应的设备上:
```
nd.exp(z + 2) * y
```
注意所有计算要求输入数据在同一个设备上。不一致的时候系统不进行自动复制。这个设计的目的是因为设备之间的数据交互通常比较昂贵,我们希望用户确切的知道数据放在哪里,而不是隐藏这个细节。下面代码尝试将CPU上`x`和GPU上的`y`做运算。
```
try:
x + y
except mx.MXNetError as err:
sys.stderr.write(str(err))
```
### 默认会复制回CPU的操作
如果某个操作需要将NDArray里面的内容转出来,例如打印或变成numpy格式,如果需要的话系统都会自动将数据copy到主内存。
```
print(y)
print(y.asnumpy())
print(y.sum().asscalar())
```
## Gluon的GPU计算
同NDArray类似,Gluon的大部分函数可以通过`ctx`指定设备。下面代码将模型参数初始化在GPU上:
```
from mxnet import gluon
net = gluon.nn.Sequential()
net.add(gluon.nn.Dense(1))
net.initialize(ctx=mx.gpu())
```
输入GPU上的数据,会在GPU上计算结果
```
data = nd.random.uniform(shape=[3,2], ctx=mx.gpu())
net(data)
```
确认下权重:
```
net[0].weight.data()
```
## 总结
通过`context`我们可以很容易在不同的设备上计算。
## 练习
- 试试大一点的计算任务,例如大矩阵的乘法,看看CPU和GPU的速度区别。如果是计算量很小的任务呢?
- 试试CPU和GPU之间传递数据的速度
- GPU上如何读写模型呢?
**吐槽和讨论欢迎点**[这里](https://discuss.gluon.ai/t/topic/988)
| github_jupyter |
# Data Access and Time-series Statistics
- **Emilio Mayorga**, University of Washington - APL
- **Yifan Cheng**, University of Washington - CEE
[WaterHackWeek](https://waterhackweek.github.io) Cyberseminar. February 7, 2019
## Data types and this seminar
- Spatial representation? Point, gridded, etc.
- Long time series, near-real-time data, high-frequency sensor data?
- USA, International, Global?
- Academic research or government monitoring and research?
- Atmospheric, surface water, groundwater? Variables of interest?
- **Seminar focused on time series data from sites commonly represented as fixed geographical points.** And we'll emphasize surface waters.
## Where do I find the data I need? <u>Google</u>?
- **Google Search.**
- Overwhelming. Many irrelevant results.
- **Google Earth Engine.** https://earthengine.google.com/
- Fantastic for remote sensing data and processing. Not yet there for site time series, site data
- **Google Dataset Search.** https://toolbox.google.com/datasetsearch
- New, promising. Probably a good place to start.
- No single catalog will meet all your needs for search, discovery, and convenient access to data, equally well, in all domains of water research!
- But some systems are good, broad starting points.
- Some make it easier to identify, ingest and use the *"granular"* data consistently across sources, while some only target the *"dataset"* and metadata level.
- See this nice (but a bit dated) [**GLEON** page comparing DataONE and CUAHSI HIS-HydroClient](http://gleon.org/data/repositories). Valuable comparison beyond those two systems.
- All have pros and cons.
## Catalogs: User Interface vs APIs
- Most catalog systems provide both a user interface (UI) for interactive browsing, and an *Application Programming Interface (API)* for remote programmatic access through the web. The UI is typically easier to use, but the API facilitates repeated tasks, large queries, reuse, and reproducibility.
## Data Search Strategies, Considerations
- Clarify the type of data your research question requires
- Ask your colleagues about sources of relevant data!
- Do try Google. But also use specialized (Earth Sciences, hydrology, etc) but still broad catalogs.
- Does the catalog make it easier to access data in consistent data formats across data sources? Or is it focused on "datasets"?
- Go to each individual data provider's web site, or try to find a system or tool that spans multiple data sets?
### ... continued
- Manual file downloading, or programmatic access ("web services"; say, from Python)?
- What options does the data provider have? Manual file downloads, custom or standards-based web service API, custom or standards-based data formats?
- Are there multi-dataset code packages that can handle the dataset you want? Is the package fairly easy to use, well documented, with plenty of examples, and does it have an active community of users and developers (say, on GitHub)?
- Are parameters ("water temperature", "stream discharge", "rainfall", etc) well described? Do different data sources use different parameter names, meanings, and units?
## Searching for Data: Useful systems/catalogs we'll explore
1. **CUAHSI HIS-HydroClient.** https://www.cuahsi.org/data-models. HydroClient, http://data.cuahsi.org, is a web application providing discovery, visualization and download capability for site time series from many different sources accessible through the CUAHSI "HIS Central" catalog http://his.cuahsi.org/
2. **CUAHSI HydroShare.** https://www.hydroshare.org/search/ (already discussed in previous seminars)
3. **USGS NWIS.** National Water Information System - USGS Water Data for the Nation: https://waterdata.usgs.gov/nwis. Water quantity and quality from surface waters and groundwater.
4. **Observatories, Field site networks.** [NEON (National Ecological Observatory Network)](https://www.neonscience.org), [LTER](https://lternet.edu/), [CZO](http://criticalzone.org/national/), [GLEON](http://gleon.org/), USDA watersheds.
5. **MonitorMyWatershed / ModelMyWatershed web application.** https://modelmywatershed.org. Provides user-friendly search capabilities across 4 catalogs: HydroShare, CUAHSI HIS-HydroClient, CINERGI, and Water Quality Portal; plus integration with HydroShare.
### ... Other Systems (explore on your own)
- [EarthCube Data Discovery Studio (formerly "CINERGI")](http://DataDiscoveryStudio.org). Broad "meta" catalog that integrates and harmonizes many different catalogs across the Earth Sciences. Operates at the dataset level.
- [DataONE](https://search.dataone.org). Broad catalog operating at the dataset level, with a diverse range of "Earth observational" data, including water data. US based and strongest for US data, but integrates datasets from other regions.
- [PANGEA](https://www.pangaea.de). "archiving, publishing and distributing georeferenced data from earth system research". Based in Germany, PANGEA operates at the dataset level and archives both large and small datasets, often via agreements with journals.
- [Water Quality Portal](https://www.waterqualitydata.us). "A cooperative service sponsored by USGS, EPA and the National Water Quality Monitoring Council (NWQMC) that integrates publicly available water quality data from USGS NWIS, the EPA STORET Data Warehouse, and USDA STEWARDS."
- **NOAA NCDC.** Climate data -- next slide.
- US agencies
- Other federal agencies (Army Corps. Eng., DOE, USDA)
- State agencies (and local agencies!)
## Climate Data
- [**NOAA NCDC** Land-based station data (US and Global)](https://www.ncdc.noaa.gov/data-access/land-based-station-data)
- Multiple datasets by a single agency provider
- Multiple access mechanisms (custom REST API; standards-based APIs; etc)
- Access for a subset of datasets via independent, specialized tool: CUAHSI HIS/HydroClient, `ulmo`
- [**NASA ORNL Daymet** (US)](https://daymet.ornl.gov)
- 1km x 1km gridded daily data, including query access via lat-lon points
- Multiple access mechanisms
- Access via independent, specialized tools: `ulmo`, [daymetpy](http://khufkens.github.io/daymetpy/)
*Brief NCDC drill in online:*
- NCDC Land-Based Station Data pages (Datasets, [CDO](https://www.ncdc.noaa.gov/cdo-web/), [web services](https://www.ncdc.noaa.gov/cdo-web/webservices/ncdcwebservices), [CDO REST API](https://www.ncdc.noaa.gov/cdo-web/webservices/v2)): https://www.ncdc.noaa.gov/data-access/land-based-station-data
- `ulmo` NCDC [Global Historical Climate Network Daily (GHCN) plugin](https://ulmo.readthedocs.io/en/latest/api.html#module-ulmo.ncdc.ghcn_daily). Also NCDC GSOD, CRIS plugins.
## Python
- **Pandas** is the backbone. "DataFrame" tabular data structure. Incorporates lots of functionality and core Python tools: read/write, data organization, data exploration, cleaning, and summarizing; Numpy, matplotlib plotting
- **GeoPandas.** Geospatially enabled Pandas, incorporating several useful geospatial tools.
- Matplotlib plotting.
- **ulmo** data access package. https://ulmo.readthedocs.io
- Python datetime handling.
- Beware of different Python datetime types: Python standard `datetime` type; Numpy datetime; Pandas `Timestamp`
- Timezone handling; datetime utilities, conversion
- See [this](https://medium.com/jbennetcodes/dealing-with-datetimes-like-a-pro-in-pandas-b80d3d808a7f) and [that](https://medium.com/jbennetcodes/dealing-with-datetimes-like-a-pro-in-python-fb3ac0feb94b) blog posts
## Data access and ingest. Common approaches, tools
- Manual browsing, donwloads, and reading local files (but issues of reproducibility, efficiency, thorougness)
- [`requests` Python package](https://stackabuse.com/the-python-requests-module/) (and `wget`, `curl`): generic remote access through the web.
- Pandas `read_csv` function. Not just local files, but also remote files.
- Custom web APIs (often called "REST" APIs) from the data provider (eg, NEON). Often fairly easy to use, but highly variable across systems.
- Standards-based resources:
- APIs: OPeNDAP, Open Geospatial Consortium (OGC) Web Services (WFS, SOS, etc), **CUAHSI WaterOneFlow**
- Formats: WaterML (**CUAHSI WaterML 1.x** vs OGC WaterML 2.0), NetCDF (3 "classic" vs 4), Metadata standards
- See this [old but still very useful descriptions of CUAHSI "HIS" standards](http://his.cuahsi.org/wofws.html)
- Standards enable reusability across multiple data sources, systems
- `ulmo`. Water and climate data. Wraps a lot of the underlying complexity into simpler, more user-friendly Python APIs.
### ... Other Python packages
- We won't discuss these, but you can explore them
- [`climata`](https://github.com/heigeo/climata)
- [`MetPy`](https://github.com/Unidata/MetP)
- [`obsio`](https://github.com/jaredwo/obsio)
- provider-specific packages, such as `daymetpy`
## Next: Use cases with Python examples
- Southeast US (Yifan's research area)
- Surface water temperature, discharge and water quality
- Running Python code: [conda](https://geohackweek.github.io/datasharing/01-conda-tutorial/), conda environment, and Jupyter notebooks
- All materials available on GitHub repository, https://github.com/waterhackweek/tsdata_access
- Use cases
1. [River and reservoir water temperature.](https://nbviewer.jupyter.org/github/waterhackweek/tsdata_access/blob/master/Part1-ulmo_nwis_reservoirtemperature.ipynb) Yifan
2. Search, access and initial look. [Notebook 1](https://nbviewer.jupyter.org/github/waterhackweek/tsdata_access/blob/master/Part2-ulmo_nwis_and_cuahsi.ipynb), and [notebook 2](https://nbviewer.jupyter.org/github/waterhackweek/tsdata_access/blob/master/Part2-NEON_and_Observatories_RESTAPI.ipynb). Emilio
| github_jupyter |
# Traitement de signal
## Atelier \#6 : Filtrage des signaux
### Support de cours disponible à l'adresse : [https://www.github.com/a-mhamdi/isetbz](https://www.github.com/a-mhamdi/isetbz)
---
La convolution est une intégrale qui exprime le degré de chevauchement d'une fonction $h$ lorsqu'elle est décalée sur une autre fonction $x$.
Par définition, une convolution $x\ast h$ se mesure par l'équation suivante :
$$
x\ast h \;=\; \displaystyle\int_{0}^{t}h(t-\varsigma)x(\varsigma)d\varsigma.
$$
Ce produit est utilisé fréquemment pour le filtrage d'un signal contaminé par du bruit gênant ainsi la perception correcte de l'information. Un produit de convolution peut être vu comme une technique de calcul de moyenne à un instant $t$ d'une fonction $x$ pondérée par la fonction $h$ et vice-versa.
On se propose de générer un signal sinusoïdal d'amplitude $1$ et de fréquence $1$ Hz que l'on note $x(t)$.
$$
x(t) \;=\; \displaystyle\sin\left(2\pi f t\right),\quad\text{avec}\quad f \,=\, 1\,\text{Hz}.
$$
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
plt.rcParams['figure.figsize'] = [15, 10]
plt.rc({"keymap.grid": "g", "font.serif": "Charter", "font.size": 10})
t = np.linspace(0, 5.0, 1000)
x = np.sin(2 * np.pi * t)
plt.plot(t, x)
plt.title("Signal non bruité")
plt.xlabel("$t$ (sec)")
plt.grid()
plt.show()
```
Nous synthétisons ici un exemple de bruit :
$$
b(t) \;=\;
-0.4 \displaystyle\sin\left(2\pi f_{\text{b}_1} t\right)
+0.6 \displaystyle\sin\left(2\pi f_{\text{b}_2} t\right),
\quad\text{avec}\quad \left\{\begin{array}{lcl}f_{\text{b}_1}&=& 500\,\text{Hz},\\&&\\ f_{\text{b}_2}&=& 750\,\text{Hz}.\end{array}\right.
$$
Nous le rajouterons par la suite au signal d'origine $x(t)$ comme suit :
$$
x_\text{b} \;=\; x(t) + b(t)
$$
```
# Générer un bruit
b = -0.4 * np.sin(1000 * np.pi * t) + 0.6 * np.sin(1500 * np.pi * t)
x_b = x + b
plt.plot(t, x_b)
plt.title("Signal bruité")
plt.xlabel("$t$ (sec)")
plt.grid()
plt.show()
```
Le filtre à appliquer s'agit d'un passe-bas de réponse impulsionnelle :
$$
h(t) \;=\; \displaystyle\frac{1}{\tau}\mathrm{e}^{-\displaystyle\frac{t}{\tau}},\quad\text{avec}\quad \tau\,=\, 0.1\,\text{sec}.
$$
```
tau = 0.1; t_h = np.linspace(0, 5*tau, 100)
h = 1/tau * np.exp(-t_h/tau)
plt.plot(t_h, h)
plt.title(r"$h(t) \;=\; \dfrac{1}{\tau}\mathrm{e}^{-\dfrac{t}{\tau}}$")
plt.xlabel("$t$ (sec)")
plt.grid()
plt.show()
```
La sortie $\tilde{x}$ du filtre est le résultat du produit de convolution suivant :
$$
\tilde{x}(t) \;=\; \displaystyle\int_{0}^{t}h(t-\varsigma)x(\varsigma)d\varsigma
$$
```
# Normaliser le filtre : filt = h/Sigma_h
filt = h/h.sum()
x_f = np.convolve(x_b, filt, 'same')
plt.plot(t, x_f)
plt.title("Signal filtré")
plt.xlabel("$t$ (sec)")
plt.grid()
plt.show()
```
| github_jupyter |
# ResnetTrick_s128
> first experiments size 128
# setup and imports
```
# pip install git+https://github.com/ayasyrev/model_constructor
# pip install git+https://github.com/kornia/kornia
from kornia.contrib import MaxBlurPool2d
from fastai.basic_train import *
from fastai.vision import *
from fastai.script import *
from model_constructor.net import *
from model_constructor.layers import SimpleSelfAttention, ConvLayer
import math
import torch
from torch.optim.optimizer import Optimizer, required
import itertools as it
```
# utils
```
class Mish(nn.Module):
def __init__(self):
super().__init__()
# print("Mish activation loaded...")
def forward(self, x):
#save 1 second per epoch with no x= x*() and then return x...just inline it.
return x *( torch.tanh(F.softplus(x)))
#Ranger deep learning optimizer - RAdam + Lookahead combined.
#https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
#Ranger has now been used to capture 12 records on the FastAI leaderboard.
#This version = 9.3.19
#Credits:
#RAdam --> https://github.com/LiyuanLucasLiu/RAdam
#Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
#Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
#summary of changes:
#full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
#supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
#changes 8/31/19 - fix references to *self*.N_sma_threshold;
#changed eps to 1e-5 as better default than 1e-8.
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0):
#parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
#parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
#N_sma_threshold of 5 seems better in testing than 4.
#In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
#prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params,defaults)
#adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
#now we can get to work...
#removed as we now use step from RAdam...no need for duplicate step counting
#for group in self.param_groups:
# group["step_counter"] = 0
#print("group step counter init")
#look ahead params
self.alpha = alpha
self.k = k
#radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
#self.first_run_check=0
#lookahead weights
#9/2/19 - lookahead param tensors have been moved to state storage.
#This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
#self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
#don't use grad for lookahead weights
#for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
#note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
#Uncomment if you need to use the actual closure...
#if closure is not None:
#loss = closure()
#Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
def get_data(size=128, woof=1, bs=64, workers=None, **kwargs):
if woof:
path = URLs.IMAGEWOOF # if woof
else:
path = URLs.IMAGENETTE
path = untar_data(path)
print('data path ', path)
n_gpus = num_distrib() or 1
if workers is None: workers = min(8, num_cpus()//n_gpus)
return (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
.databunch(bs=bs, num_workers=workers)
.presize(size, scale=(0.35,1))
.normalize(imagenet_stats))
def get_learn(
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=1,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.95, #? 0.9
eps: Param("epsilon", float)=1e-6,
bs: Param("Batch size", int)=64,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='ranger',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
model: Param('model as partial', callable) = xresnet50
):
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps)
data = get_data(size, woof, bs)
learn = (Learner(data, model(), wd=1e-2, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy(),))
print('Learn path', learn.path)
if mixup: learn = learn.mixup(alpha=mixup)
return learn
```
# ResBlock
```
class NewResBlock(Module):
def __init__(self, expansion, ni, nh, stride=1,
conv_layer=ConvLayer, act_fn=act_fn, bn_1st=True,
pool=nn.AvgPool2d(2, ceil_mode=True), sa=False,sym=False, zero_bn=True):
nf,ni = nh*expansion,ni*expansion
self.reduce = noop if stride==1 else pool
layers = [(f"conv_0", conv_layer(ni, nh, 3, stride=stride, act_fn=act_fn, bn_1st=bn_1st)),
(f"conv_1", conv_layer(nh, nf, 3, zero_bn=zero_bn, act=False, bn_1st=bn_1st))
] if expansion == 1 else [
(f"conv_0",conv_layer(ni, nh, 1, act_fn=act_fn, bn_1st=bn_1st)),
(f"conv_1",conv_layer(nh, nh, 3, stride=1, act_fn=act_fn, bn_1st=bn_1st)), #!!!
(f"conv_2",conv_layer(nh, nf, 1, zero_bn=zero_bn, act=False, bn_1st=bn_1st))
]
if sa: layers.append(('sa', SimpleSelfAttention(nf,ks=1,sym=sym)))
self.convs = nn.Sequential(OrderedDict(layers))
self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False)
self.merge =act_fn
def forward(self, x):
o = self.reduce(x)
return self.merge(self.convs(o) + self.idconv(o))
```
# Parameters
```
lr = 0.004
epochs = 5
moms = (0.95,0.95)
start_pct = 0.72
size=128
bs=64
```
# Model Constructor
```
model = xresnet50(c_out=10)
model.block = NewResBlock
pool = MaxBlurPool2d(3, True)
model.pool = pool
model.stem_pool = pool
model.stem_sizes = [3,32,64,64]
model.act_fn= Mish()
model.sa = True
```
## repr model
```
model()
model.stem
model.body
model.head
```
# Lr find
```
learn = get_learn(model=model,size=size,bs=bs)
learn.lr_find()
learn.recorder.plot()
```
# epochs 5
```
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
```
# e5 results
```
acc = np.array([0.751082, 0.734029, 0.728939, 0.727412, 0.737847])
acc.mean(), acc.std()
```
# epochs 20
```
epochs = 20
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_losses()
learn.recorder.plot_metrics()
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_losses()
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_losses()
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_metrics()
learn = get_learn(model=model,size=size,bs=bs)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_losses()
```
# e20 results
```
acc = np.array([0.862560, 0.853143, 0.853652, 0.844490, 0.847544 ])
acc.mean(), acc.std()
```
| github_jupyter |
# Week 1 Assignment: Data Validation
[Tensorflow Data Validation (TFDV)](https://cloud.google.com/solutions/machine-learning/analyzing-and-validating-data-at-scale-for-ml-using-tfx) is an open-source library that helps to understand, validate, and monitor production machine learning (ML) data at scale. Common use-cases include comparing training, evaluation and serving datasets, as well as checking for training/serving skew. You have seen the core functionalities of this package in the previous ungraded lab and you will get to practice them in this week's assignment.
In this lab, you will use TFDV in order to:
* Generate and visualize statistics from a dataframe
* Infer a dataset schema
* Calculate, visualize and fix anomalies
Let's begin!
## Table of Contents
- [1 - Setup and Imports](#1)
- [2 - Load the Dataset](#2)
- [2.1 - Read and Split the Dataset](#2-1)
- [2.1.1 - Data Splits](#2-1-1)
- [2.1.2 - Label Column](#2-1-2)
- [3 - Generate and Visualize Training Data Statistics](#3)
- [3.1 - Removing Irrelevant Features](#3-1)
- [Exercise 1 - Generate Training Statistics](#ex-1)
- [Exercise 2 - Visualize Training Statistics](#ex-2)
- [4 - Infer a Data Schema](#4)
- [Exercise 3: Infer the training set schema](#ex-3)
- [5 - Calculate, Visualize and Fix Evaluation Anomalies](#5)
- [Exercise 4: Compare Training and Evaluation Statistics](#ex-4)
- [Exercise 5: Detecting Anomalies](#ex-5)
- [Exercise 6: Fix evaluation anomalies in the schema](#ex-6)
- [6 - Schema Environments](#6)
- [Exercise 7: Check anomalies in the serving set](#ex-7)
- [Exercise 8: Modifying the domain](#ex-8)
- [Exercise 9: Detecting anomalies with environments](#ex-9)
- [7 - Check for Data Drift and Skew](#7)
- [8 - Display Stats for Data Slices](#8)
- [9 - Freeze the Schema](#8)
<a name='1'></a>
## 1 - Setup and Imports
```
# Import packages
import os
import pandas as pd
import tensorflow as tf
import tempfile, urllib, zipfile
import tensorflow_data_validation as tfdv #
from tensorflow.python.lib.io import file_io
from tensorflow_data_validation.utils import slicing_util
from tensorflow_metadata.proto.v0.statistics_pb2 import DatasetFeatureStatisticsList, DatasetFeatureStatistics
# Set TF's logger to only display errors to avoid internal warnings being shown
tf.get_logger().setLevel('ERROR')
```
<a name='2'></a>
## 2 - Load the Dataset
You will be using the [Diabetes 130-US hospitals for years 1999-2008 Data Set](https://archive.ics.uci.edu/ml/datasets/diabetes+130-us+hospitals+for+years+1999-2008) donated to the University of California, Irvine (UCI) Machine Learning Repository. The dataset represents 10 years (1999-2008) of clinical care at 130 US hospitals and integrated delivery networks. It includes over 50 features representing patient and hospital outcomes.
This dataset has already been included in your Jupyter workspace so you can easily load it.
<a name='2-1'></a>
### 2.1 Read and Split the Dataset
```
# Read CSV data into a dataframe and recognize the missing data that is encoded with '?' string as NaN
df = pd.read_csv('dataset_diabetes/diabetic_data.csv', header=0, na_values = '?')
# Preview the dataset
df.head()
```
<a name='2-1-1'></a>
#### Data splits
In a production ML system, the model performance can be negatively affected by anomalies and divergence between data splits for training, evaluation, and serving. To emulate a production system, you will split the dataset into:
* 70% training set
* 15% evaluation set
* 15% serving set
You will then use TFDV to visualize, analyze, and understand the data. You will create a data schema from the training dataset, then compare the evaluation and serving sets with this schema to detect anomalies and data drift/skew.
<a name='2-1-2'></a>
#### Label Column
This dataset has been prepared to analyze the factors related to readmission outcome. In this notebook, you will treat the `readmitted` column as the *target* or label column.
The target (or label) is important to know while splitting the data into training, evaluation and serving sets. In supervised learning, you need to include the target in the training and evaluation datasets. For the serving set however (i.e. the set that simulates the data coming from your users), the **label column needs to be dropped** since that is the feature that your model will be trying to predict.
The following function returns the training, evaluation and serving partitions of a given dataset:
```
def prepare_data_splits_from_dataframe(df):
'''
Splits a Pandas Dataframe into training, evaluation and serving sets.
Parameters:
df : pandas dataframe to split
Returns:
train_df: Training dataframe(70% of the entire dataset)
eval_df: Evaluation dataframe (15% of the entire dataset)
serving_df: Serving dataframe (15% of the entire dataset, label column dropped)
'''
# 70% of records for generating the training set
train_len = int(len(df) * 0.7)
# Remaining 30% of records for generating the evaluation and serving sets
eval_serv_len = len(df) - train_len
# Half of the 30%, which makes up 15% of total records, for generating the evaluation set
eval_len = eval_serv_len // 2
# Remaining 15% of total records for generating the serving set
serv_len = eval_serv_len - eval_len
# Sample the train, validation and serving sets. We specify a random state for repeatable outcomes.
train_df = df.iloc[:train_len].sample(frac=1, random_state=48).reset_index(drop=True)
eval_df = df.iloc[train_len: train_len + eval_len].sample(frac=1, random_state=48).reset_index(drop=True)
serving_df = df.iloc[train_len + eval_len: train_len + eval_len + serv_len].sample(frac=1, random_state=48).reset_index(drop=True)
# Serving data emulates the data that would be submitted for predictions, so it should not have the label column.
serving_df = serving_df.drop(['readmitted'], axis=1)
return train_df, eval_df, serving_df
# Split the datasets
train_df, eval_df, serving_df = prepare_data_splits_from_dataframe(df)
print('Training dataset has {} records\nValidation dataset has {} records\nServing dataset has {} records'.format(len(train_df),len(eval_df),len(serving_df)))
```
<a name='3'></a>
## 3 - Generate and Visualize Training Data Statistics
In this section, you will be generating descriptive statistics from the dataset. This is usually the first step when dealing with a dataset you are not yet familiar with. It is also known as performing an *exploratory data analysis* and its purpose is to understand the data types, the data itself and any possible issues that need to be addressed.
It is important to mention that **exploratory data analysis should be perfomed on the training dataset** only. This is because getting information out of the evaluation or serving datasets can be seen as "cheating" since this data is used to emulate data that you have not collected yet and will try to predict using your ML algorithm. **In general, it is a good practice to avoid leaking information from your evaluation and serving data into your model.**
<a name='3-1'></a>
### Removing Irrelevant Features
Before you generate the statistics, you may want to drop irrelevant features from your dataset. You can do that with TFDV with the [tfdv.StatsOptions](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/StatsOptions) class. It is usually **not a good idea** to drop features without knowing what information they contain. However there are times when this can be fairly obvious.
One of the important parameters of the `StatsOptions` class is `feature_whitelist`, which defines the features to include while calculating the data statistics. You can check the [documentation](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/StatsOptions#args) to learn more about the class arguments.
In this case, you will omit the statistics for `encounter_id` and `patient_nbr` since they are part of the internal tracking of patients in the hospital and they don't contain valuable information for the task at hand.
```
# Define features to remove
features_to_remove = {'encounter_id', 'patient_nbr'}
# Collect features to whitelist while computing the statistics
approved_cols = [col for col in df.columns if (col not in features_to_remove)]
# Instantiate a StatsOptions class and define the feature_whitelist property
stats_options = tfdv.StatsOptions(feature_whitelist=approved_cols)
# Review the features to generate the statistics
print(stats_options.feature_whitelist)
print(stats_options)
```
<a name='ex-1'></a>
### Exercise 1: Generate Training Statistics
TFDV allows you to generate statistics from different data formats such as CSV or a Pandas DataFrame.
Since you already have the data stored in a DataFrame you can use the function [`tfdv.generate_statistics_from_dataframe()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe) which, given a DataFrame and `stats_options`, generates an object of type `DatasetFeatureStatisticsList`. This object includes the computed statistics of the given dataset.
Complete the cell below to generate the statistics of the training set. Remember to pass the training dataframe and the `stats_options` that you defined above as arguments.
```
### START CODE HERE
train_stats = tfdv.generate_statistics_from_dataframe(train_df, stats_options)
### END CODE HERE
# TEST CODE
# get the number of features used to compute statistics
print(f"Number of features used: {len(train_stats.datasets[0].features)}")
# check the number of examples used
print(f"Number of examples used: {train_stats.datasets[0].num_examples}")
# check the column names of the first and last feature
print(f"First feature: {train_stats.datasets[0].features[0].path.step[0]}")
print(f"Last feature: {train_stats.datasets[0].features[-1].path.step[0]}")
```
**Expected Output:**
```
Number of features used: 48
Number of examples used: 71236
First feature: race
Last feature: readmitted
```
<a name='ex-2'></a>
### Exercise 2: Visualize Training Statistics
Now that you have the computed statistics in the `DatasetFeatureStatisticsList` instance, you will need a way to **visualize** these to get actual insights. TFDV provides this functionality through the method [`tfdv.visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics).
Using this function in an interactive Python environment such as this one will output a very nice and convenient way to interact with the descriptive statistics you generated earlier.
**Try it out yourself!** Remember to pass in the generated training statistics in the previous exercise as an argument.
```
### START CODE HERE
tfdv.visualize_statistics(train_stats)
### END CODE HERE
```
<a name='4'></a>
## 4 - Infer a data schema
A schema defines the **properties of the data** and can thus be used to detect errors. Some of these properties include:
- which features are expected to be present
- feature type
- the number of values for a feature in each example
- the presence of each feature across all examples
- the expected domains of features
The schema is expected to be fairly static, whereas statistics can vary per data split. So, you will **infer the data schema from only the training dataset**. Later, you will generate statistics for evaluation and serving datasets and compare their state with the data schema to detect anomalies, drift and skew.
<a name='ex-3'></a>
### Exercise 3: Infer the training set schema
Schema inference is straightforward using [`tfdv.infer_schema()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema). This function needs only the **statistics** (an instance of `DatasetFeatureStatisticsList`) of your data as input. The output will be a Schema [protocol buffer](https://developers.google.com/protocol-buffers) containing the results.
A complimentary function is [`tfdv.display_schema()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/display_schema) for displaying the schema in a table. This accepts a **Schema** protocol buffer as input.
Fill the code below to infer the schema from the training statistics using TFDV and display the result.
```
### START CODE HERE
# Infer the data schema by using the training statistics that you generated
schema = tfdv.infer_schema(train_stats)
# Display the data schema
tfdv.display_schema(schema)
### END CODE HERE
# TEST CODE
# Check number of features
print(f"Number of features in schema: {len(schema.feature)}")
# Check domain name of 2nd feature
print(f"Second feature in schema: {list(schema.feature)[1].domain}")
```
**Expected Output:**
```
Number of features in schema: 48
Second feature in schema: gender
```
**Be sure to check the information displayed before moving forward.**
<a name='5'></a>
## 5 - Calculate, Visualize and Fix Evaluation Anomalies
It is important that the schema of the evaluation data is consistent with the training data since the data that your model is going to receive should be consistent to the one you used to train it with.
Moreover, it is also important that the **features of the evaluation data belong roughly to the same range as the training data**. This ensures that the model will be evaluated on a similar loss surface covered during training.
<a name='ex-4'></a>
### Exercise 4: Compare Training and Evaluation Statistics
Now you are going to generate the evaluation statistics and compare it with training statistics. You can use the [`tfdv.generate_statistics_from_dataframe()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe) function for this. But this time, you'll need to pass the **evaluation data**. For the `stats_options` parameter, the list you used before works here too.
Remember that to visualize the evaluation statistics you can use [`tfdv.visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics).
However, it is impractical to visualize both statistics separately and do your comparison from there. Fortunately, TFDV has got this covered. You can use the `visualize_statistics` function and pass additional parameters to overlay the statistics from both datasets (referenced as left-hand side and right-hand side statistics). Let's see what these parameters are:
- `lhs_statistics`: Required parameter. Expects an instance of `DatasetFeatureStatisticsList `.
- `rhs_statistics`: Expects an instance of `DatasetFeatureStatisticsList ` to compare with `lhs_statistics`.
- `lhs_name`: Name of the `lhs_statistics` dataset.
- `rhs_name`: Name of the `rhs_statistics` dataset.
For this case, remember to define the `lhs_statistics` protocol with the `eval_stats`, and the optional `rhs_statistics` protocol with the `train_stats`.
Additionally, check the function for the protocol name declaration, and define the lhs and rhs names as `'EVAL_DATASET'` and `'TRAIN_DATASET'` respectively.
```
### START CODE HERE
# Generate evaluation dataset statistics
# HINT: Remember to use the evaluation dataframe and to pass the stats_options (that you defined before) as an argument
eval_stats = tfdv.generate_statistics_from_dataframe(eval_df, stats_options=stats_options)
# Compare evaluation data with training data
# HINT: Remember to use both the evaluation and training statistics with the lhs_statistics and rhs_statistics arguments
# HINT: Assign the names of 'EVAL_DATASET' and 'TRAIN_DATASET' to the lhs and rhs protocols
tfdv.visualize_statistics(lhs_statistics=train_stats, rhs_statistics=eval_stats,
lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')
### END CODE HERE
# TEST CODE
# get the number of features used to compute statistics
print(f"Number of features: {len(eval_stats.datasets[0].features)}")
# check the number of examples used
print(f"Number of examples: {eval_stats.datasets[0].num_examples}")
# check the column names of the first and last feature
print(f"First feature: {eval_stats.datasets[0].features[0].path.step[0]}")
print(f"Last feature: {eval_stats.datasets[0].features[-1].path.step[0]}")
```
**Expected Output:**
```
Number of features: 48
Number of examples: 15265
First feature: race
Last feature: readmitted
```
<a name='ex-5'></a>
### Exercise 5: Detecting Anomalies ###
At this point, you should ask if your evaluation dataset matches the schema from your training dataset. For instance, if you scroll through the output cell in the previous exercise, you can see that the categorical feature **glimepiride-pioglitazone** has 1 unique value in the training set while the evaluation dataset has 2. You can verify with the built-in Pandas `describe()` method as well.
```
train_df["glimepiride-pioglitazone"].describe()
eval_df["glimepiride-pioglitazone"].describe()
```
It is possible but highly inefficient to visually inspect and determine all the anomalies. So, let's instead use TFDV functions to detect and display these.
You can use the function [`tfdv.validate_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/validate_statistics) for detecting anomalies and [`tfdv.display_anomalies()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/display_anomalies) for displaying them.
The `validate_statistics()` method has two required arguments:
- an instance of `DatasetFeatureStatisticsList`
- an instance of `Schema`
Fill in the following graded function which, given the statistics and schema, displays the anomalies found.
```
def calculate_and_display_anomalies(statistics, schema):
'''
Calculate and display anomalies.
Parameters:
statistics : Data statistics in statistics_pb2.DatasetFeatureStatisticsList format
schema : Data schema in schema_pb2.Schema format
Returns:
display of calculated anomalies
'''
### START CODE HERE
# HINTS: Pass the statistics and schema parameters into the validation function
anomalies = None
# HINTS: Display input anomalies by using the calculated anomalies
None
### END CODE HERE
```
You should see detected anomalies in the `medical_specialty` and `glimepiride-pioglitazone` features by running the cell below.
```
# Check evaluation data for errors by validating the evaluation data staticss using the previously inferred schema
calculate_and_display_anomalies(eval_stats, schema=schema)
```
<a name='ex-6'></a>
### Exercise 6: Fix evaluation anomalies in the schema
The evaluation data has records with values for the features **glimepiride-pioglitazone** and **medical_speciality** that were not included in the schema generated from the training data. You can fix this by adding the new values that exist in the evaluation dataset to the domain of these features.
To get the `domain` of a particular feature you can use [`tfdv.get_domain()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/get_domain).
You can use the `append()` method to the `value` property of the returned `domain` to add strings to the valid list of values. To be more explicit, given a domain you can do something like:
```python
domain.value.append("feature_value")
```
```
### START CODE HERE
# Get the domain associated with the input feature, glimepiride-pioglitazone, from the schema
glimepiride_pioglitazone_domain = tfdv.get_domain(schema, 'glimepiride-pioglitazone')
# HINT: Append the missing value 'Steady' to the domain
None
# Get the domain associated with the input feature, medical_specialty, from the schema
medical_specialty_domain = tfdv.get_domain(schema, 'medical_specialty')
# HINT: Append the missing value 'Neurophysiology' to the domain
None
# HINT: Re-calculate and re-display anomalies with the new schema
None
### END CODE HERE
```
If you did the exercise correctly, you should see *"No anomalies found."* after running the cell above.
<a name='6'></a>
## 6 - Schema Environments
By default, all datasets in a pipeline should use the same schema. However, there are some exceptions.
For example, the **label column is dropped in the serving set** so this will be flagged when comparing with the training set schema.
**In this case, introducing slight schema variations is necessary.**
<a name='ex-7'></a>
### Exercise 7: Check anomalies in the serving set
Now you are going to check for anomalies in the **serving data**. The process is very similar to the one you previously did for the evaluation data with a little change.
Let's create a new `StatsOptions` that is aware of the information provided by the schema and use it when generating statistics from the serving DataFrame.
```
# Define a new statistics options by the tfdv.StatsOptions class for the serving data by passing the previously inferred schema
options = tfdv.StatsOptions(schema=schema,
infer_type_from_schema=True,
feature_whitelist=approved_cols)
### START CODE HERE
# Generate serving dataset statistics
# HINT: Remember to use the serving dataframe and to pass the newly defined statistics options
serving_stats = tfdv.generate_statistics_from_dataframe(None, stats_options=None)
# HINT: Calculate and display anomalies using the generated serving statistics
calculate_and_display_anomalies(None, schema=None)
### END CODE HERE
```
You should see that `metformin-rosiglitazone`, `metformin-pioglitazone`, `payer_code` and `medical_specialty` features have an anomaly (i.e. Unexpected string values) which is less than 1%.
Let's **relax the anomaly detection constraints** for the last two of these features by defining the `min_domain_mass` of the feature's distribution constraints.
```
# This relaxes the minimum fraction of values that must come from the domain for the feature.
# Get the feature and relax to match 90% of the domain
payer_code = tfdv.get_feature(schema, 'payer_code')
payer_code.distribution_constraints.min_domain_mass = 0.9
# Get the feature and relax to match 90% of the domain
medical_specialty = tfdv.get_feature(schema, 'medical_specialty')
medical_specialty.distribution_constraints.min_domain_mass = 0.9
# Detect anomalies with the updated constraints
calculate_and_display_anomalies(serving_stats, schema=schema)
```
If the `payer_code` and `medical_specialty` are no longer part of the output cell, then the relaxation worked!
<a name='ex-8'></a>
### Exercise 8: Modifying the Domain
Let's investigate the possible cause of the anomalies for the other features, namely `metformin-pioglitazone` and `metformin-rosiglitazone`. From the output of the previous exercise, you'll see that the `anomaly long description` says: "Examples contain values missing from the schema: Steady (<1%)". You can redisplay the schema and look at the domain of these features to verify this statement.
When you inferred the schema at the start of this lab, it's possible that some values were not detected in the training data so it was not included in the expected domain values of the feature's schema. In the case of `metformin-rosiglitazone` and `metformin-pioglitazone`, the value "Steady" is indeed missing. You will just see "No" in the domain of these two features after running the code cell below.
```
tfdv.display_schema(schema)
```
Towards the bottom of the Domain-Values pairs of the cell above, you can see that many features (including **'metformin'**) have the same values: `['Down', 'No', 'Steady', 'Up']`. These values are common to many features including the ones with missing values during schema inference.
TFDV allows you to modify the domains of some features to match an existing domain. To address the detected anomaly, you can **set the domain** of these features to the domain of the `metformin` feature.
Complete the function below to set the domain of a feature list to an existing feature domain.
For this, use the [`tfdv.set_domain()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/set_domain) function, which has the following parameters:
- `schema`: The schema
- `feature_path`: The name of the feature whose domain needs to be set.
- `domain`: A domain protocol buffer or the name of a global string domain present in the input schema.
```
def modify_domain_of_features(features_list, schema, to_domain_name):
'''
Modify a list of features' domains.
Parameters:
features_list : Features that need to be modified
schema: Inferred schema
to_domain_name : Target domain to be transferred to the features list
Returns:
schema: new schema
'''
### START CODE HERE
# HINT: Loop over the feature list and use set_domain with the inferred schema, feature name and target domain name
for feature in features_list:
None
### END CODE HERE
return schema
```
Using this function, set the domain of the features defined in the `domain_change_features` list below to be equal to **metformin's domain** to address the anomalies found.
**Since you are overriding the existing domain of the features, it is normal to get a warning so you don't do this by accident.**
```
domain_change_features = ['repaglinide', 'nateglinide', 'chlorpropamide', 'glimepiride',
'acetohexamide', 'glipizide', 'glyburide', 'tolbutamide', 'pioglitazone',
'rosiglitazone', 'acarbose', 'miglitol', 'troglitazone', 'tolazamide',
'examide', 'citoglipton', 'insulin', 'glyburide-metformin', 'glipizide-metformin',
'glimepiride-pioglitazone', 'metformin-rosiglitazone', 'metformin-pioglitazone']
# Infer new schema by using your modify_domain_of_features function
# and the defined domain_change_features feature list
schema = modify_domain_of_features(domain_change_features, schema, 'metformin')
# Display new schema
tfdv.display_schema(schema)
# TEST CODE
# check that the domain of some features are now switched to `metformin`
print(f"Domain name of 'chlorpropamide': {tfdv.get_feature(schema, 'chlorpropamide').domain}")
print(f"Domain values of 'chlorpropamide': {tfdv.get_domain(schema, 'chlorpropamide').value}")
print(f"Domain name of 'repaglinide': {tfdv.get_feature(schema, 'repaglinide').domain}")
print(f"Domain values of 'repaglinide': {tfdv.get_domain(schema, 'repaglinide').value}")
print(f"Domain name of 'nateglinide': {tfdv.get_feature(schema, 'nateglinide').domain}")
print(f"Domain values of 'nateglinide': {tfdv.get_domain(schema, 'nateglinide').value}")
```
**Expected Output:**
```
Domain name of 'chlorpropamide': metformin
Domain values of 'chlorpropamide': ['Down', 'No', 'Steady', 'Up']
Domain name of 'repaglinide': metformin
Domain values of 'repaglinide': ['Down', 'No', 'Steady', 'Up']
Domain name of 'nateglinide': metformin
Domain values of 'nateglinide': ['Down', 'No', 'Steady', 'Up']
```
Let's do a final check of anomalies to see if this solved the issue.
```
calculate_and_display_anomalies(serving_stats, schema=schema)
```
You should now see the `metformin-pioglitazone` and `metformin-rosiglitazone` features dropped from the output anomalies.
<a name='ex-9'></a>
### Exercise 9: Detecting anomalies with environments
There is still one thing to address. The `readmitted` feature (which is the label column) showed up as an anomaly ('Column dropped'). Since labels are not expected in the serving data, let's tell TFDV to ignore this detected anomaly.
This requirement of introducing slight schema variations can be expressed by using [environments](https://www.tensorflow.org/tfx/data_validation/get_started#schema_environments). In particular, features in the schema can be associated with a set of environments using `default_environment`, `in_environment` and `not_in_environment`.
```
# All features are by default in both TRAINING and SERVING environments.
schema.default_environment.append('TRAINING')
schema.default_environment.append('SERVING')
```
Complete the code below to exclude the `readmitted` feature from the `SERVING` environment.
To achieve this, you can use the [`tfdv.get_feature()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/get_feature) function to get the `readmitted` feature from the inferred schema and use its `not_in_environment` attribute to specify that `readmitted` should be removed from the `SERVING` environment's schema. This **attribute is a list** so you will have to **append** the name of the environment that you wish to omit this feature for.
To be more explicit, given a feature you can do something like:
```python
feature.not_in_environment.append('NAME_OF_ENVIRONMENT')
```
The function `tfdv.get_feature` receives the following parameters:
- `schema`: The schema.
- `feature_path`: The path of the feature to obtain from the schema. In this case this is equal to the name of the feature.
```
### START CODE HERE
# Specify that 'readmitted' feature is not in SERVING environment.
# HINT: Append the 'SERVING' environmnet to the not_in_environment attribute of the feature
tfdv.get_feature(schema, None).not_in_environment.append(None)
# HINT: Calculate anomalies with the validate_statistics function by using the serving statistics,
# inferred schema and the SERVING environment parameter.
serving_anomalies_with_env = tfdv.validate_statistics(None, schema, environment=None)
### END CODE HERE
```
You should see "No anomalies found" by running the cell below.
```
# Display anomalies
tfdv.display_anomalies(serving_anomalies_with_env)
```
Now you have succesfully addressed all anomaly-related issues!
<a name='7'></a>
## 7 - Check for Data Drift and Skew
During data validation, you also need to check for data drift and data skew between the training and serving data. You can do this by specifying the [skew_comparator and drift_comparator](https://www.tensorflow.org/tfx/data_validation/get_started#checking_data_skew_and_drift) in the schema.
Drift and skew is expressed in terms of [L-infinity distance](https://en.wikipedia.org/wiki/Chebyshev_distance) which evaluates the difference between vectors as the greatest of the differences along any coordinate dimension.
You can set the threshold distance so that you receive warnings when the drift is higher than is acceptable. Setting the correct distance is typically an iterative process requiring domain knowledge and experimentation.
Let's check for the skew in the **diabetesMed** feature and drift in the **payer_code** feature.
```
# Calculate skew for the diabetesMed feature
diabetes_med = tfdv.get_feature(schema, 'diabetesMed')
diabetes_med.skew_comparator.infinity_norm.threshold = 0.03 # domain knowledge helps to determine this threshold
# Calculate drift for the payer_code feature
payer_code = tfdv.get_feature(schema, 'payer_code')
payer_code.drift_comparator.infinity_norm.threshold = 0.03 # domain knowledge helps to determine this threshold
# Calculate anomalies
skew_drift_anomalies = tfdv.validate_statistics(train_stats, schema,
previous_statistics=eval_stats,
serving_statistics=serving_stats)
# Display anomalies
tfdv.display_anomalies(skew_drift_anomalies)
```
In both of these cases, the detected anomaly distance is not too far from the threshold value of `0.03`. For this exercise, let's accept this as within bounds (i.e. you can set the distance to something like `0.035` instead).
**However, if the anomaly truly indicates a skew and drift, then further investigation is necessary as this could have a direct impact on model performance.**
<a name='8'></a>
## 8 - Display Stats for Data Slices <a class="anchor" id="fourth-objective"></a>
Finally, you can [slice the dataset and calculate the statistics](https://www.tensorflow.org/tfx/data_validation/get_started#computing_statistics_over_slices_of_data) for each unique value of a feature. By default, TFDV computes statistics for the overall dataset in addition to the configured slices. Each slice is identified by a unique name which is set as the dataset name in the [DatasetFeatureStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto#L43) protocol buffer. Generating and displaying statistics over different slices of data can help track model and anomaly metrics.
Let's first define a few helper functions to make our code in the exercise more neat.
```
def split_datasets(dataset_list):
'''
split datasets.
Parameters:
dataset_list: List of datasets to split
Returns:
datasets: sliced data
'''
datasets = []
for dataset in dataset_list.datasets:
proto_list = DatasetFeatureStatisticsList()
proto_list.datasets.extend([dataset])
datasets.append(proto_list)
return datasets
def display_stats_at_index(index, datasets):
'''
display statistics at the specified data index
Parameters:
index : index to show the anomalies
datasets: split data
Returns:
display of generated sliced data statistics at the specified index
'''
if index < len(datasets):
print(datasets[index].datasets[0].name)
tfdv.visualize_statistics(datasets[index])
```
The function below returns a list of `DatasetFeatureStatisticsList` protocol buffers. As shown in the ungraded lab, the first one will be for `All Examples` followed by individual slices through the feature you specified.
To configure TFDV to generate statistics for dataset slices, you will use the function `tfdv.StatsOptions()` with the following 4 arguments:
- `schema`
- `slice_functions` passed as a list.
- `infer_type_from_schema` set to True.
- `feature_whitelist` set to the approved features.
Remember that `slice_functions` only work with [`generate_statistics_from_csv()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv) so you will need to convert the dataframe to CSV.
```
def sliced_stats_for_slice_fn(slice_fn, approved_cols, dataframe, schema):
'''
generate statistics for the sliced data.
Parameters:
slice_fn : slicing definition
approved_cols: list of features to pass to the statistics options
dataframe: pandas dataframe to slice
schema: the schema
Returns:
slice_info_datasets: statistics for the sliced dataset
'''
# Set the StatsOptions
slice_stats_options = tfdv.StatsOptions(schema=schema,
slice_functions=[slice_fn],
infer_type_from_schema=True,
feature_whitelist=approved_cols)
# Convert Dataframe to CSV since `slice_functions` works only with `tfdv.generate_statistics_from_csv`
CSV_PATH = 'slice_sample.csv'
dataframe.to_csv(CSV_PATH)
# Calculate statistics for the sliced dataset
sliced_stats = tfdv.generate_statistics_from_csv(CSV_PATH, stats_options=slice_stats_options)
# Split the dataset using the previously defined split_datasets function
slice_info_datasets = split_datasets(sliced_stats)
return slice_info_datasets
```
With that, you can now use the helper functions to generate and visualize statistics for the sliced datasets.
```
# Generate slice function for the `medical_speciality` feature
slice_fn = slicing_util.get_feature_value_slicer(features={'medical_specialty': None})
# Generate stats for the sliced dataset
slice_datasets = sliced_stats_for_slice_fn(slice_fn, approved_cols, dataframe=train_df, schema=schema)
# Print name of slices for reference
print(f'Statistics generated for:\n')
print('\n'.join([sliced.datasets[0].name for sliced in slice_datasets]))
# Display at index 10, which corresponds to the slice named `medical_specialty_Gastroenterology`
display_stats_at_index(10, slice_datasets)
```
If you are curious, try different slice indices to extract the group statistics. For instance, `index=5` corresponds to all `medical_specialty_Surgery-General` records. You can also try slicing through multiple features as shown in the ungraded lab.
Another challenge is to implement your own helper functions. For instance, you can make a `display_stats_for_slice_name()` function so you don't have to determine the index of a slice. If done correctly, you can just do `display_stats_for_slice_name('medical_specialty_Gastroenterology', slice_datasets)` and it will generate the same result as `display_stats_at_index(10, slice_datasets)`.
<a name='9'></a>
## 9 - Freeze the schema
Now that the schema has been reviewed, you will store the schema in a file in its "frozen" state. This can be used to validate incoming data once your application goes live to your users.
This is pretty straightforward using Tensorflow's `io` utils and TFDV's [`write_schema_text()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/write_schema_text) function.
```
# Create output directory
OUTPUT_DIR = "output"
file_io.recursive_create_dir(OUTPUT_DIR)
# Use TensorFlow text output format pbtxt to store the schema
schema_file = os.path.join(OUTPUT_DIR, 'schema.pbtxt')
# write_schema_text function expect the defined schema and output path as parameters
tfdv.write_schema_text(schema, schema_file)
```
After submitting this assignment, you can click the Jupyter logo in the left upper corner of the screen to check the Jupyter filesystem. The `schema.pbtxt` file should be inside the `output` directory.
**Congratulations on finishing this week's assignment!** A lot of concepts where introduced and now you should feel more familiar with using TFDV for inferring schemas, anomaly detection and other data-related tasks.
**Keep it up!**
| github_jupyter |
# CNTK Test Platform
Can compare the results of running onnx model on the CNTK and running hdf5 model on Keras.
```
import sys
sys.path.insert(0, "../onnx-keras/")
import frontend
from keras.models import load_model
import keras.layers as Klayers
from keras.models import Model
import numpy as np
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.debug("Ensure debug outputs")
np.set_printoptions(suppress=True, formatter={'float': '{:0.6f}'.format})
```
## Initialization
Set the path of models and the input size. Load the model. Or construct a model yourself.
```
# Configurations
# KerasFile = "/home/jiyuan/Work/playground/submodel1_0_256_0.97_0.92_270.hdf5"
# OnnxFile = "/home/jiyuan/Work/playground/model.onnx"
# KerasFile = "C:\\Users\\Joel Liu\\Work\\kneron_gesnet.hdf5"
KerasFile = "C:\\Users\\Joel Liu\\Work\\tiny-yolo-voc.hdf5"
OnnxFile = "C:\\Users\\Joel Liu\\Work\\model.onnx"
# Reload the frontend
import importlib
importlib.reload(frontend)
rand_input = np.random.rand(1, 224, 224, 3).astype('float32')
# Load from file
k_model = load_model(KerasFile)
# layer_name = 'batch_normalization_1'
# k_model = Model(inputs=k_model.input, outputs=k_model.get_layer(layer_name).output)
#Construct testing model
# inputs = Klayers.Input(shape=(112,))
# x = Klayers.Reshape((1, 1, 112))(inputs)
# k_model = Model(inputs=inputs, outputs=x)
# k_model.compile(optimizer='rmsprop',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# # a = np.random.rand(1, 32).astype('float32')
# # b = np.random.rand(1, 32).astype('float32')
# # k_model.fit(x=a,y=b)
# k_model.summary()
```
## Conversion
Convert the model into onnx.
```
# Onnx part
converter = frontend.KerasFrontend()
converter.loadFromModel(k_model)
onnx_model = converter.convertToOnnx()
converter.saveToFile(OnnxFile)
```
## Comparison
The section below will compare two results. However, there are still bugs in CNTK. So wrong results does not indicate that the onnx model is wrong. Here list the known bugs in CNTK.
1. CNTK cannot deal with epsilon other than 1e-5 in the BatchNormalization layer.
2. CNTK cannot deal with Reshape layers.
```
# Keras part
k_out = k_model.predict(rand_input)
# pre = k_out
# CNTK part
import cntk as C
z = C.Function.load(OnnxFile, device=C.device.cpu(), format=C.ModelFormat.ONNX)
z_out = z.eval(np.transpose(rand_input, [0, 3, 1, 2]))
# Compare
print(converter.ops)
if len(k_out.shape) == 4:
k_out_t = np.transpose(k_out, [0, 3, 1, 2])
else:
k_out_t = k_out
# if len(z_out.shape) != len(k_out.shape):
# z_out = z_out.reshape(np.shape(k_out.shape))
result = z_out - k_out_t
abs_result = np.absolute(result)
threshold = 0.01
if (abs_result > threshold).any():
print("Wrong values (threshold: {}):".format(threshold))
wv = abs_result[np.where(abs_result > threshold)]
print(sorted(wv, reverse=True)[:10])
print("Count: {} - {:.2f}%".format(len(wv), len(wv) / abs_result.size * 100))
# print("Difference:")
# print(result)
else:
print("Test passed (threshold: {})".format(threshold))
# print("Input:")
# print(np.transpose(pre, [0, 3, 1, 2]))
# print("Keras output:")
# print(k_out_t)
# print("CNTK output:")
# print(z_out)
# k_model.get_layer('batch_normalization_1').get_weights()
# (-0.549246 - -0.291988)/(0.036777 + 1e-3)**0.5 * 2.524630 + -1.684543
# with open('C:\\Users\\Joel Liu\\Work\\model.txt', 'w') as f:
# f.write(str(onnx_model))
# f.close()
# print(onnx_model)
```
| github_jupyter |
```
import paltas
from paltas.Analysis import hierarchical_inference
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import emcee
import corner
import numba
import os
np.random.seed(4)
```
# Running Hierarchical Inference on a Population of Strong Lenses
__Author:__ Sebastian Wagner-Carena
__Goals: Understand how to run hierarchical inference on the network outputs using `paltas`__
__If you have not already done so, you will have to install `tensorflow` to run the Analysis module tools in this notebook. This notebook will also take advantage of the package `emcee`, although you you can use any sampler you like. For the plotting we will use `corner`.__
## Making our data
To best illustrate how the hierarchical inference tools work, we'll generate a set of artificial network outputs. However, the pipeline outlined here will work equally well with true network outputs. To do this we need to simulate a) the prior we use for trianing and b) the information content of the image. We will focus only on the SHMF normalization and therefore assume that the information content of the image is very low compared to the prior.
```
# Define the true population mean and its scatter
true_mean = 1e-3
true_scatter = 3e-4
# Assume some consistent, large noise for all of our lens images
image_information_scatter = np.random.uniform(low=4e-3,high=6e-3,size=100)
image_mean = true_mean + np.random.normal(size=100)*image_information_scatter + np.random.normal(size=100)*true_scatter
# This in the mean and scatter of the information in each image, but remember our network predicts a posterior which means we need to multiply our likelihood
# by the training prior
train_mean = 2e-3
train_scatter = 1e-3
network_means = (image_mean * train_scatter**2 + train_mean*image_information_scatter**2)/(train_scatter**2 + image_information_scatter**2)
network_scatter = np.sqrt((train_scatter**2 * image_information_scatter**2)/(train_scatter**2 + image_information_scatter**2))
```
Now we have the image information and the network outputs. The network outputs are dominated by the prior. Quickly, let's confirm that a) if we combined the image information we would get a likelihood consistent with with the true mean and b) if we combine the network outputs we would get a likelihood consistent with the training mean.
```
# Start by combining all the image level information
plt.figure(figsize=(10,8))
colors = ['#66c2a5','#fc8d62','#8da0cb','#e78ac3']
x = np.linspace(-1e-3,7e-3,1000)
prod = np.zeros(len(x))
for i in range(100):
prod += norm(loc = image_mean[i],scale=image_information_scatter[i]).logpdf(x)
# Normalize the max to 1 for visualization
prod -= np.max(prod)
plt.plot(x,np.exp(prod),label='Image Information',c=colors[0],lw=4)
# Now combine the network outputs
prod = np.zeros(len(x))
for i in range(100):
prod += norm(loc = network_means[i],scale=network_scatter[i]).logpdf(x)
prod -= np.max(prod)
plt.plot(x,np.exp(prod),label='Network Information',c=colors[1],lw=4)
plt.axvline(true_mean,label='True Mean',c=colors[2],lw=4,ls='--')
plt.axvline(train_mean,label='Training Mean',c=colors[3],lw=4,ls='--')
plt.legend(fontsize=20)
plt.xlabel(r'$\Sigma_\mathrm{sub}$',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks([])
plt.show()
```
Note because there is scatter in our true values, the combination we did above is not statistically correct, but it gives us a good idea of the challenges we face. The information about the population mean exists in the sample, but it washed out by the prior in the posteriors our network is estimating.
## Hierarchical Inference
Now we can use our hierarchical inference tools to combine the network outputs and attempt to infer the population mean and scatter of the SHMF normalization.
All of the work gets done by `hierarchical_inference.ProbabilityClassAnalytical` and `emcee`. `ProbabilityClassAnalytical` needs to be initialized with the mean vector and covariance matrix defining the interim training distribution (this class assumed that every distribution being considered is a multivariate Gaussian, but there are other classes in `hierarchical_inference.py` that relax that assumption at the cost of computational time).
```
# Let's define a few of the parameters for our inference
n_lenses = 100
n_emcee_samps = int(1e4)
burnin = int(1e3)
chains_folder = 'notebook_data/'
backend_path = 'example_chains.h5'
chains_path = os.path.join(chains_folder,backend_path)
# Load the predictions for the mean and covariance for our model. We'll have to do a little reshaping here since the code
# expect an array of mean values and a precision matrix.
y_pred = network_means[:n_lenses].reshape((n_lenses,1))
prec_pred = 1/np.square(network_scatter[:n_lenses].reshape((n_lenses,1,1)))
# The interim training distribution.
mu_omega_i = np.array([train_mean])
cov_omega_i =np.diag(np.array([train_scatter])**2)
# We will want to initialize emcee near the correct values.
mu_omega = np.array([true_mean])
cov_omega =np.diag(np.array([true_scatter])**2)
true_hyperparameters = np.concatenate([mu_omega,np.log(np.diag(np.sqrt(cov_omega)))])
# A prior function that mainly just bounds the uncertainty estimation.
@numba.njit()
def eval_func_omega(hyperparameters):
# Enforce that the SHMF normalization is not negative
if hyperparameters[0] < 0:
return -np.inf
# Need to set bounds to avoid random singular matrix proposals
if hyperparameters[1] < -12:
return -np.inf
return 0
# Initialize our class and then give it the network predictions. These are set to global variables in case you want to use
# pooling.
prob_class = hierarchical_inference.ProbabilityClassAnalytical(mu_omega_i,cov_omega_i,eval_func_omega)
prob_class.set_predictions(mu_pred_array_input=y_pred,prec_pred_array_input=prec_pred)
# Set a few of the parameters we will need to pass to emcee
n_walkers = 40
ndim = 2
# Generate an initial state around the true values (this helps with convergence for this example)
initial_std = np.array([5e-4,1])
cur_state = (np.random.rand(n_walkers, ndim)*2-1)*initial_std
cur_state += true_hyperparameters
backend = emcee.backends.HDFBackend(chains_path)
sampler = emcee.EnsembleSampler(n_walkers, ndim,prob_class.log_post_omega,backend=backend)
sampler.run_mcmc(cur_state,n_emcee_samps,progress=True)
chain = sampler.chain[:,burnin:,:].reshape((-1,2))
```
Let's visualize the constraints using `corner`
```
corner_param_print=[r'$\Sigma_\mathrm{sub,pop} \times 10^{3}$' + '\n' + r'$[\mathrm{kpc}^{-2}]$',
r'$\log \Sigma_\mathrm{sub,pop,\sigma} \times 10^{3}$' + '\n' + r'$[\mathrm{kpc}^{-2}]$']
fontsize = 20
color='#FFAA00'
truth_color = 'k'
hist_kwargs = {'density':True,'color':color,'lw':3}
corner.corner(chain,labels=corner_param_print,bins=20,show_titles=False,plot_datapoints=False,label_kwargs=dict(fontsize=fontsize),
levels=[0.68,0.95],color=color,fill_contours=True,hist_kwargs=hist_kwargs,title_fmt='.2f',truths=true_hyperparameters,
truth_color=truth_color,max_n_ticks=3)
plt.show()
```
You have an unbiased inference of the mean, and it's clear from this data that the constrain on the scatter is an upper limit (not suprising given the small scatter and the large uncertainty of the information we assigned to each data point).
| github_jupyter |
# The rubric of this project has 4 parts with (1,2,4,3) subparts and I will be writing -> R < Part > < Subpart > to show you all the parts (ex: R21 is part 2 subpart 1 i.e. dataset summary in data exploration)
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
---
## Step 0: Load The Data
```
import os
os.getcwd()
1+1
import pickle
filereader=open('/home/workspace/data/train.p','rb')
training_file = pickle.load(filereader)
training_file
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
#filereader=open('/home/workspace/data/train.p','rb')
#training_file = pickle.load(filereader)
#filereader2=open('/home/workspace/data/test.p','rb')
#testing_file = pickle.load(filereader2)
#filereader3=open('/home/workspace/data/valid.p','rb')
#validation_file = pickle.load(filereader3)
training_file = '/home/workspace/data/train.p'
validation_file='/home/workspace/data/valid.p'
testing_file = '/home/workspace/data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
(X_train,y_train,X_test,y_test,X_valid,y_valid)
```
# R21
```
X_train[0].shape,X_test[0].shape,X_valid[0].shape
X_train.shape,X_test.shape,X_valid.shape
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
```
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
```
### Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
```
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
import numpy as np
plt.hist(y_train,n_classes)
```
# the graphs show how many of each class are present in testing, training and validation dataset
# R22
```
hist, bins = np.histogram(y_train, bins=n_classes)
print(hist,bins)
width = 0.8 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
hist, bins = np.histogram(y_test, bins=n_classes)
print(hist,bins)
width = 0.8 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
hist, bins = np.histogram(y_valid, bins=n_classes)
print(hist,bins)
width = 0.8 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
import random
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[index])
```
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
- Neural network architecture (is the network over or underfitting?)
- Play around preprocessing techniques (normalization, rgb to grayscale, etc)
- Number of examples per label (some have more than others).
- Generate fake data.
Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
### Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
```
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
#(pixel - 128)/ 128
(image.astype(float)-128)/128
```
# R31
```
def normalize(x):
z=x.astype(np.float32)
return ((z - 128) / 128)
image = X_train[100].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[100])
X_train=normalize(X_train)
#X_test=normalize(X_test)
#X_valid=normalize(X_valid)
```
### Model Architecture
# R32
```
### Define your architecture here.
### Feel free to use as many code cells as needed.
from sklearn.utils import shuffle
X_train,y_train=shuffle(X_train,y_train)
import tensorflow as tf
epochs=20
batch_size=128
def lenet(x):
mu=0
sigma=0.1
cl1_w=tf.Variable(tf.truncated_normal(shape=(5,5,3,6),mean=mu,stddev=sigma))
cl1_b=tf.Variable(tf.zeros(6))
cl1=tf.nn.conv2d(x,cl1_w,strides=[1,1,1,1],padding='VALID')+cl1_b
cl1=tf.nn.relu(cl1)
cl1=tf.nn.max_pool(cl1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
cl2_w=tf.Variable(tf.truncated_normal(shape=(5,5,6,16),mean=mu,stddev=sigma))
cl2_b=tf.Variable(tf.zeros(16))
cl2=tf.nn.conv2d(cl1,cl2_w,strides=[1,1,1,1],padding='VALID')+cl2_b
cl2=tf.nn.relu(cl2)
cl2=tf.nn.max_pool(cl2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
f0=tf.contrib.layers.flatten(cl2)
f1_w=tf.Variable(tf.truncated_normal(shape=(400,120),mean=mu,stddev=sigma))
f1_b=tf.Variable(tf.zeros(120))
f1=tf.matmul(f0,f1_w)+f1_b
f1=tf.nn.dropout(f1,keep_prob)
f1=tf.nn.relu(f1)
f2_w=tf.Variable(tf.truncated_normal(shape=(120,84),mean=mu,stddev=sigma))
f2_b=tf.Variable(tf.zeros(84))
f2=tf.matmul(f1,f2_w)+f2_b
f2=tf.nn.dropout(f2,keep_prob)
f2=tf.nn.relu(f2)
f3_w=tf.Variable(tf.truncated_normal(shape=(84,43),mean=mu,stddev=sigma))
f3_b=tf.Variable(tf.zeros(43))
f3=tf.matmul(f2,f3_w)+f3_b
return f3
x=tf.placeholder(tf.float32,(None,32,32,3))
y=tf.placeholder(tf.int32,(None))
y_onehot=tf.one_hot(y,43)
keep_prob=tf.placeholder(tf.float32)
rate=0.001
logits=lenet(x)
cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=y_onehot,logits=logits)
loss=tf.reduce_mean(cross_entropy)
optimiser=tf.train.AdamOptimizer(learning_rate=rate)
training_operation=optimiser.minimize(loss)
correct_preds=tf.equal(tf.argmax(logits,1),tf.argmax(y_onehot,1))
accuracy_operation=tf.reduce_mean(tf.cast(correct_preds,tf.float32))
saver=tf.train.Saver()
def evaluate(xdata,ydata):
num_examples=len(xdata)
total_acc=0
sess=tf.get_default_session()
for i in range(0,num_examples,batch_size):
xbatch,ybatch=xdata[i:i+batch_size],ydata[i:i+batch_size]
acc=sess.run(accuracy_operation,feed_dict={x:xbatch,y:ybatch,keep_prob:1.0})
total_acc+=(acc*len(xbatch))
return total_acc/num_examples
```
# WAIT
# R33
```
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples=len(X_train)
for i in range(epochs):
X_train,y_train=shuffle(X_train,y_train)
for j in range(0,num_examples,batch_size):
end=j+batch_size
xbatch,ybatch=X_train[j:end],y_train[j:end]
sess.run(training_operation,feed_dict={x:xbatch,y:ybatch,keep_prob:0.5})
valid_acc=evaluate(X_valid,y_valid)
#print('\n epoch-> '+i+' , valid_acc-> '+valid_acc+'\n')
print()
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(valid_acc))
print()
saver.save(sess,'./lenet_V5')
print('saved')
```
# R34
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
train_accuracy = evaluate(X_train, y_train)
print("Train Accuracy = {:.3f}".format(train_accuracy))
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
valid_accuracy = evaluate(X_valid, y_valid)
print("Valid Accuracy = {:.3f}".format(valid_accuracy))
```
# 92.9%
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
valid_accuracy = evaluate(X_test, y_test)
print("Valid Accuracy = {:.3f}".format(valid_accuracy))
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
```
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
```
---
## Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
### Load and Output the Images
# R41
```
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
oslist=os.listdir('WebImages')
oslist.remove('.ipynb_checkpoints')
oslist
import cv2
img=cv2.imread('WebImages/download.jpeg')
nimg = cv2.resize(img,(32,32))
#nimg.shape
#plt.imsave(nimg,'WebImages/img1_32.jpeg')
plt.imshow(nimg)
cv2.imwrite('WebImages/messigray.png',nimg)
for i in oslist:
img=cv2.imread('WebImages/'+i)
nimg = cv2.resize(img,(32,32))
# plt.imshow(nimg)
cv2.imwrite('WebImages2/'+i+'.png',nimg)
oslist=os.listdir('WebImages2')
oslist.remove('.ipynb_checkpoints')
oslist
test_imgs=[]
for i in oslist:
img=cv2.imread('WebImages2/'+i)
test_imgs.append(img)
#test_imgs.remove('.ipynb_checkpoints/')
len(test_imgs)
import pandas as pd
df=pd.read_csv('signnames.csv')
df.head(3)
signs_names=[]
for i in df['SignName']:
signs_names.append(i)
len(signs_names)
```
### Predict the Sign Type for Each Image
```
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
```
# R42
```
test_data=np.uint8(np.zeros((5,32,32,3)))
for i in range(5):
test_data[i] = normalize(test_imgs[i])
#test_data=normalize(test_imgs)
with tf.Session() as sess:
saver.restore(sess, './lenet_V5')
signs_classes = sess.run(tf.argmax(logits, 1), feed_dict={x: test_data,keep_prob:1.0})
figsize = (12,12)
plt.figure(figsize=figsize)
for i in range(5):
plt.subplot(4, 4, i+1)
plt.imshow(test_imgs[i])
plt.title(signs_names[signs_classes[i]])
plt.axis('off')
plt.show()
```
### Analyze Performance
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
# 20% accurate . low acc here bcoz of wierd resizing
### Output Top 5 Softmax Probabilities For Each Image Found on the Web
For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
```
# (5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
```
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
```
Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
```
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
```
# R43
```
with tf.Session() as sess:
saver.restore(sess, './lenet_V5')
topk5 = sess.run(tf.nn.top_k(logits, k=5), feed_dict={x: test_data,keep_prob:1.0})
topk5
k=0
for i in topk5[1]:
print(topk5[0][k])
k+=1
print(i)
for j in i:
print(signs_names[j],end=" -- ")
print('\n\n')
```
### Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
---
## Step 4 (Optional): Visualize the Neural Network's State with Test Images
This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
<figure>
<img src="visualize_cnn.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above)</p>
</figcaption>
</figure>
<p></p>
```
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Image Classification using tf.keras
```
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c03_exercise_flowers_with_data_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c03_exercise_flowers_with_data_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
In this Colab you will classify images of flowers. You will build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.
# Importing Packages
Let's start by importing required packages. **os** package is used to read files and directory structure, **numpy** is used to convert python list to numpy array and to perform required matrix operations and **matplotlib.pyplot** is used to plot the graph and display images in our training and validation data.
```
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
```
### TODO: Import TensorFlow and Keras Layers
In the cell below, import Tensorflow as `tf` and the Keras layers and models you will use to build your CNN. Also, import the `ImageDataGenerator` from Keras so that you can perform image augmentation.
```
#import packages
```
# Data Loading
In order to build our image classifier, we can begin by downloading the flowers dataset. We first need to download the archive version of the dataset and after the download we are storing it to "/tmp/" directory.
After downloading the dataset, we need to extract its contents.
```
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
zip_file = tf.keras.utils.get_file(origin=_URL,
fname="flower_photos.tgz",
extract=True)
base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')
```
The dataset we downloaded contains images of 5 types of flowers:
1. Rose
2. Daisy
3. Dandelion
4. Sunflowers
5. Tulips
So, let's create the labels for these 5 classes:
```
classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips']
```
Also, The dataset we have downloaded has following directory structure.
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>flower_photos</b>
|__ <b>diasy</b>
|__ <b>dandelion</b>
|__ <b>roses</b>
|__ <b>sunflowers</b>
|__ <b>tulips</b>
</pre>
As you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this.
The code below creates a `train` and a `val` folder each containing 5 folders (one for each type of flower). It then moves the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. In the end our directory will have the following structure:
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>flower_photos</b>
|__ <b>diasy</b>
|__ <b>dandelion</b>
|__ <b>roses</b>
|__ <b>sunflowers</b>
|__ <b>tulips</b>
|__ <b>train</b>
|______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....]
|__ <b>val</b>
|______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....]
|______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....]
|______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....]
|______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....]
|______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....]
</pre>
Since we don't delete the original folders, they will still be in our `flower_photos` directory, but they will be empty. The code below also prints the total number of flower images we have for each type of flower.
```
for cl in classes:
img_path = os.path.join(base_dir, cl)
images = glob.glob(img_path + '/*.jpg')
print("{}: {} Images".format(cl, len(images)))
train, val = images[:round(len(images)*0.8)], images[round(len(images)*0.8):]
for t in train:
if not os.path.exists(os.path.join(base_dir, 'train', cl)):
os.makedirs(os.path.join(base_dir, 'train', cl))
shutil.move(t, os.path.join(base_dir, 'train', cl))
for v in val:
if not os.path.exists(os.path.join(base_dir, 'val', cl)):
os.makedirs(os.path.join(base_dir, 'val', cl))
shutil.move(v, os.path.join(base_dir, 'val', cl))
```
For convenience, let us set up the path for the training and validation sets
```
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'val')
```
# Data Augmentation
Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better.
In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process.
## Experiment with Various Image Transformations
In this section you will get some practice doing some basic image transformations. Before we begin making transformations let's define the our `batch_size` and our image size. Remember that the input to our CNN are images of the same size. We therefore have to resize the images in our dataset to the same size.
### TODO: Set Batch and Image Size
In the cell below, create a `batch_size` of 100 images and set a value to `IMG_SHAPE` such that our training data consists of images with width of 150 pixels and height of 150 pixels.
```
batch_size =
IMG_SHAPE =
```
### TODO: Apply Random Horizontal Flip
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random horizontal flip. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen =
train_data_gen =
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TOO: Apply Random Rotation
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random 45 degree rotation. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen =
train_data_gen =
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Apply Random Zoom
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random zoom of up to 50%. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen =
train_data_gen =
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Put It All Together
In the cell below, use ImageDataGenerator to create a transformation rescales the images by 255 and that applies:
- random 45 degree rotation
- random zoom of up to 50%
- random horizontal flip
- width shift of 0.15
- height shfit of 0.15
Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images,to shuffle the images, and to set the class mode to `sparse`.
```
image_gen_train =
train_data_gen =
```
Let's visualize how a single image would look like 5 different times, when we pass these augmentations randomly to our dataset.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Create a Data Generator for the Validation Set
Generally, we only apply data augmentation to our training examples. So, in the cell below, use ImageDataGenerator to create a transformation that only rescales the images by 255. Then use the `.flow_from_directory` method to apply the above transformation to the images in our validation set. Make sure you indicate the batch size, the path to the directory of the validation images, the target size for the images, and to set the class mode to `sparse`. Remember that it is not necessary to shuffle the images in the validation set.
```
image_gen_val =
val_data_gen =
```
# TODO: Create the CNN
In the cell below, create a convolutional neural network that consists of 3 convolution blocks. Each convolutional block contains a `Conv2D` layer followed by a max pool layer. The first convolutional block should have 16 filters, the second one should have 32 filters, and the third one should have 64 filters. All convolutional filters should be 3 x 3. All max pool layers should have a `pool_size` of `(2, 2)` .
After the 3 convolutional blocks you should have a flatten layer followed by a fully connected layer with 512 units. The CNN should output class probabilities based on 5 classes which is done by the **softmax** activation function. All other layers should use a **relu** activation function. You should also add Dropout layers with a probability of 20%, where appropriate.
```
model =
```
# TODO: Compile the Model
In the cell below, compile your model using the ADAM optimizer, the sparse cross entropy function as a loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so make sure you also pass the metrics argument.
```
# Compile the model
```
# TODO: Train the Model
In the cell below, train your model using the **fit_generator** function instead of the usual **fit** function. We have to use the `fit_generator` function because we are using the **ImageDataGenerator** class to generate batches of training and validation data for our model. Train the model for 80 epochs and make sure you use the proper parameters in the `fit_generator` function .
```
epochs =
history =
```
# TODO: Plot Training and Validation Graphs.
In the cell below, plot the training and validation accuracy/loss graphs.
```
acc =
val_acc =
loss =
val_loss =
epochs_range =
```
# TODO: Experiment with Different Parameters
So far you've created a CNN with 3 convolutional layers and followed by a fully connected layer with 512 units. In the cells below create a new CNN with a different architecture. Feel free to experiement by changing as many parameters as you like. For example, you can add more convolutional layers, or more fully connected layers. You can also experiement with different filter sizes in your convolutional layers, differnt number of units in your fully connected layers, different dropout rates, etc... You can also experiment by performing image aumentation with more image transformations that we have seen so far. Take a look at the [ImageDataGenerator Documentation](https://keras.io/preprocessing/image/) to see a full list of all the available image transformations. For example, you can add shear transformations, or you can vary the brightness of the images, etc... Experiement as much as you can and compare the accuracy of your various models. Which parameters give you the best result?
```
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import os
import pandas as pd
import numpy as np
import sys
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.patches as patches
import matplotlib.colors as clr
import matplotlib.font_manager
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import silhouette_score
from scipy.cluster.hierarchy import fcluster, cophenet
import scipy.cluster.hierarchy as hierarchy
from scipy.cluster.hierarchy import dendrogram
import warnings
warnings.filterwarnings('ignore')
sys.path.append("scripts/")
import scripts.conf as conf
import scripts.oncotree
conf.config_params()
oncotree = scripts.oncotree.Oncotree()
os.makedirs("raw_plots",exist_ok=True)
os.makedirs("source_data",exist_ok=True)
from plot_blueprint import tracked_blueprint, load_saturation
conf.config_params(12)
```
# Figure 3a
```
path_saturation = os.path.join(conf.output_boostdm, 'saturation', 'prediction')
def plot_blueprint(gene, ttype):
df_codon = load_saturation(gene, ttype, path_saturation, shap_corrected=False)
tracked_blueprint(gene, ttype, df_codon, outpath='./raw_plots/', show=True, plotlabel='feature tracks')
plt.show()
plot_blueprint('CTNNB1', 'HC')
plot_blueprint('TP53', 'COREAD')
```
# Figure 3b
```
gene="EGFR"
if not(os.path.exists(f"source_data/blueprint_info_{gene}.pickle")):
! python3 scripts/prepare_blueprint_across_ttypes.py
```
## Figure 3, blueprint EGFR across ttypes
```
# distance used for clustering
def mcc_score(x, y):
"""Generalization of MCC score to [0, 1] continuous values"""
x = np.array(x)
y = np.array(y)
tp = np.dot(x, y)
tn = np.dot(1 - x, 1 - y)
fp = np.dot(1 - x, y)
fn = np.dot(x, 1 - y)
# MCC = TP * TN – FP * FN / √ (TP +FP) * (TP + FN) * (TN + FP) * (TN + FN)
num = (tp * tn) - (fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return num / den
def mcc_dist(x, y):
return max(1 - mcc_score(x, y), 0)
def round_low(x):
y = abs(x - np.round(x, 3))
return x - y
def plot_cluster_domains_kde(df, dict_models, gene, output='./', dpi=150, invisible_heatmap=False, plot=True, points=True):
X = list(df.values)
Y = pdist(X, metric=mcc_dist)
if len(X) > 1:
linkage = hierarchy.linkage(Y, method='ward')
if gene == 'TP53':
figsize = (13, 8)
else:
figsize = (11, 2)
fig, ax = plt.subplots(figsize=figsize)
gs = gridspec.GridSpec(figure=fig, ncols=2, nrows=2, width_ratios=[15,1], height_ratios=[1,4])
gs.update(hspace=0.05, wspace=0.00)
ax0 = plt.subplot(gs[0]) # counts_muts
ax1 = plt.subplot(gs[1]) # null
ax2 = plt.subplot(gs[2]) # heatmap
ax3 = plt.subplot(gs[3]) # right dendogram
# plot dendrogram and display cophenetic distances in ax3
ax3.axis('off')
if len(X) > 1:
ddgram = dendrogram(linkage, truncate_mode=None,
labels=list(df.index),
color_threshold=0,
above_threshold_color='black',
orientation="right",
get_leaves=True,
no_plot=False, ax=ax3)
# capture spread of y-values of dendrogram to adjust later
pool = []
for item in ddgram['icoord']:
pool += item
min_ = min(pool)
max_ = max(pool)
coph_diam = round_low(ddgram['dcoord'][-1][1])
# Draw domains
ax1.axis('off')
ax0.axis('off')
ax0.axhline(y=0.0, xmin=0, xmax=len(df.values[0]),
ls="-", lw=2,color="black", alpha=0.5, zorder=1)
fontsize = 8
if len(df_pfam) >= 6:
fontsize=6
eps = 6
for i, r in df_pfam.sort_values(by='START').iterrows():
start_base = 9.0 * r['START']
size_base = 9.0 * r['SIZE']
rect1 = patches.Rectangle(xy=(start_base, -1), width=size_base,
height=5.5, color="grey", alpha=1,
clip_on=True, zorder=10)
rect2 = patches.Rectangle(xy=(start_base, -1), width=size_base,
height=5.5, color="black", fill=None, alpha=1,
clip_on=True, zorder=10, linewidth=0.5)
if (gene != 'TP53') and (size_base / df.shape[1] < 0.04):
eps *= -1
ax0.annotate(s=r["DOMAIN_NAME"], xy=(start_base + 27, eps),
fontsize=fontsize, zorder=10)
else:
ax0.annotate(s=r["DOMAIN_NAME"], xy=(start_base + 27, 0.0),
fontsize=fontsize, zorder=10)
ax0.add_patch(rect1)
ax0.add_patch(rect2)
ax0.set_xlim(0, len(df.values[0]+50))
ax0.set_ylim(-7, 7)
# Draw degrons
if df_degrons.shape[0]>0:
eps = 6
for i, r in df_degrons.sort_values(by='START').iterrows():
start_base = 9.0 * r['START']
size_base = 9.0 * r['SIZE']
rect1 = patches.Rectangle(xy=(start_base, -1), width=size_base,
height=5.5, color="#1b9e77ff", alpha=1,
clip_on=True, zorder=10)
rect2 = patches.Rectangle(xy=(start_base, -1), width=size_base,
height=5.5, color="black", fill=None, alpha=1,
clip_on=True, zorder=10, linewidth=0.5)
if (gene != 'TP53') and (size_base / df.shape[1] < 0.04):
eps *= -1
ax0.annotate(s=r["ID_MANUSCRIPT"], xy=(start_base + 27, eps),
fontsize=fontsize, zorder=10)
else:
ax0.annotate(s=r["ID_MANUSCRIPT"], xy=(start_base + 27, 0.0),
fontsize=fontsize, zorder=10)
ax0.add_patch(rect1)
ax0.add_patch(rect2)
ax0.set_xlim(0, len(df.values[0]+50))
ax0.set_ylim(-7, 7)
# Heatmap
y_values, x_values = [], []
if len(X) > 1:
labels = [str(ttype) for ttype in ddgram["ivl"][::-1]]
else:
labels = [str(ttype) for ttype in df.index]
if len(X) > 1:
values = [np.array(df.loc[label].values) for label in labels]
scaling_factor = (max_ - min_) / (len(X) - 1)
else:
values = [df.iloc[0].values]
scaling_factor = 1
max_ = 0
j = 0
for array in values:
for v in range(0, len(array)):
if array[v] > 0:
y_values.append(max_ + j * scaling_factor) # scaling_factor
x_values.append(v)
j-=1
if len(X) > 1:
eps = 0.5
else:
eps = 0.05
dots="no_dots"
if points:
dots="dots"
ax2.scatter(x_values, [y + np.random.uniform(-eps, eps) for y in y_values], color="#cc0000", s=3.5, alpha=0.15)
# separating horizontal lines
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.set_xlabel("Amino acid position",fontsize=12)
ax2.set_ylabel("Tumor type",fontsize=12,rotation=90)
len_cds = (df.columns.values[-1] + 1)
len_aa = len_cds / 9
ax2.set_xticks([x for x in np.linspace(0, len_cds, num=10, endpoint=True)])
ax2.set_xticklabels([str(int(x)) for x in np.linspace(0, len_aa, num=10, endpoint=True)])
if len(X) > 1:
_ = ax2.set_yticks(np.linspace(min_, stop=max_, num=len(labels)))
_ = ax2.set_yticklabels(labels[::-1], rotation=0, fontsize=10)
h = ax2.hlines(np.linspace(min_, stop=max_, num=len(labels)), xmin=0, xmax=len_cds, alpha=0.3)
else:
_ = ax2.set_yticks(np.linspace(0, stop=len(X)+1, num=len(labels)))
_ = ax2.set_yticklabels(labels, rotation=0, fontsize=10)
h = ax2.hlines(np.linspace(0, stop=len(X)+1, num=len(labels)), xmin=0, xmax=len_cds, alpha=0.3)
h.set_linewidth(0.5)
ax2.set_xlim(0,len_cds)
if gene == "TP53":
ax2.tick_params(axis='y', labelsize=8.5, pad=0.25, width=0.5, length=1.5)
else:
ax2.tick_params(axis='both', labelsize=10, pad=0.25, width=0.5, length=1.5)
if invisible_heatmap:
ax2.set_visible(False)
title = f'{gene}'
ax0.set_title(title, fontsize=14)
if len(X) > 1:
ax3.set_ylim(ax2.get_ylim())
else:
ax2.set_ylim(-0.5, 0.5)
plt.savefig(os.path.join(output, f'{gene}_domains_{dots}.{str(dpi)}dpi.png'), dpi=dpi, bbox_inches='tight')
plt.savefig(os.path.join(output, f'{gene}_domains_{dots}.{str(dpi)}dpi.svg'), dpi=dpi, bbox_inches='tight')
if not plot:
plt.close(fig)
def create_saturation_table_reformat(pair_vectors):
"""
pair_vectors: type (ttype, gene): (Bool, array-like)
all genes are supposed to be the same
"""
l_data = []
l_ttype = []
l_model = []
for (ttype, gene), v in pair_vectors.items():
l_model.append((v[0], None))
l_data.append(list(v[1]))
l_ttype.append(ttype)
df = pd.DataFrame(l_data)
df.fillna(0.0, inplace=True)
df.index = l_ttype
dict_models = dict(zip(l_ttype, l_model))
return df, dict_models
def plot_clustering_reformat(gene, output, res, plot=False, dpi=150, invisible_heatmap=False, points=True):
"""res: type (ttype, gene): (Bool, array-like)"""
pair_vectors = {}
for ttype, gene_q in res:
if gene == gene_q and ttype not in ['SOLID', 'NON_SOLID', 'CANCER', 'LUNG_CANCER']: # do not display general models
pair_vectors[(ttype, gene)] = res[(ttype, gene)]
if len(pair_vectors) > 0:
df, dict_models = create_saturation_table_reformat(pair_vectors)
x = plot_cluster_domains_kde(df, dict_models, gene, plot=plot,
output=output, dpi=dpi,
invisible_heatmap=invisible_heatmap,points=points)
if plot:
plt.show()
return df
```
### Generate Plots
```
gene = "EGFR"
df_degrons = pd.read_csv(f"source_data/annotated_degrons_{gene}.tsv",sep="\t")
df_pfam = pd.read_csv(f"source_data/PFAM_domains_{gene}.tsv",sep="\t")
with open(f"source_data/blueprint_info_{gene}.pickle",'rb') as f:
all_pred_specific=pickle.load(f)
_ = plot_clustering_reformat(gene, 'raw_plots', all_pred_specific, dpi=600, plot=True)
```
| github_jupyter |
```
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
# Install pyspark
! pip install --ignore-installed pyspark==2.4.4
# Install Spark NLP
! pip install --ignore-installed spark-nlp
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv -P /tmp
import os
import sys
import time
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
packages = [
'JohnSnowLabs:spark-nlp: 2.5.5'
]
spark = SparkSession \
.builder \
.appName("ML SQL session") \
.config('spark.jars.packages', ','.join(packages)) \
.getOrCreate()
import sparknlp
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
from pyspark.sql import SQLContext
sql = SQLContext(spark)
trainBalancedSarcasmDF = spark.read.option("header", True).option("inferSchema", True) \
.csv("/tmp/train-balanced-sarcasm.csv")
trainBalancedSarcasmDF.printSchema()
# Let's create a temp view (table) for our SQL queries
trainBalancedSarcasmDF.createOrReplaceTempView('sarcasm')
sql.sql('SELECT COUNT(*) FROM sarcasm').collect()
df = sql.sql('''
select label, concat(parent_comment,"\n",comment) as comment
from sarcasm
where comment is not null and parent_comment is not null limit 10000''')
print(type(df))
df.printSchema()
print('rows', df.count())
df.show()
from sparknlp.annotator import *
from sparknlp.common import *
from sparknlp.base import *
from pyspark.ml import Pipeline
document_assembler = DocumentAssembler() \
.setInputCol("comment") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence") \
.setUseAbbreviations(True)
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
nlp_pipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer])
nlp_model = nlp_pipeline.fit(df)
processed = nlp_model.transform(df)
processed.show()
train, test = processed.randomSplit(weights=[0.7, 0.3], seed=123)
print(train.count())
print(test.count())
glove = WordEmbeddingsModel.pretrained()
train_featurized = glove.transform(train)
train_featurized.show()
test_featurized = glove.transform(test)
test_featurized.show()
def get_features(row):
result = []
for tk in row:
result.append(tk['embeddings'])
return np.array(result)
def build_data(df, chunks=10):
x_train = []
y_train = []
row_count = df.count()
i = 0
chunks = df.randomSplit(weights=[1/chunks] * chunks)
for chunk in chunks:
rows = chunk.collect()
for row in rows:
if i % 1000 == 0:
print('row {} / {} ({:.1f} %)'.format(i, row_count, 100 * i / row_count))
embeddings = get_features(row['embeddings'])
label = row['label']
x_train.append(embeddings)
y_train.append(label)
i += 1
x_train = np.array(x_train)
y_train = np.array(y_train)
return x_train, y_train
x_train, y_train = build_data(train_featurized)
x_test, y_test = build_data(test_featurized)
spark.stop()
print('Train Labels:\n', pd.Series(y_train).value_counts())
print('Test Labels:\n', pd.Series(y_test).value_counts())
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
# set parameters for our model:
maxlen = 100 #max 50 words per article
batch_size = 32 #size of the batch
filters = 50 #dimension of filters for the convolutional layer
kernel_size = 3 #size of the kernel used in the convolutional layer
hidden_dims = 250 #dimension of the hidden layer
epochs = 5 #number of training epochs
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('Build model...')
model = Sequential()
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
from keras import backend as K
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy','mae'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
from IPython.display import Image
from keras.utils.vis_utils import model_to_dot
dot = model_to_dot(model)
Image(dot.create_png())
```
| github_jupyter |
### Q1. What is a probability distribution, exactly? If the values are meant to be random, how can you predict them at all?
```
# Ans : A probability distribution describes how a random variable is distributed,it tells us which values a random
# variable is most likely to take on and which values are less likely.Based on the previous data and the
# occurences of the random event, we can predict the outcome in terms of probabilities.
```
### Q2. Is there a distinction between true random numbers and pseudo-random numbers, if there is one? Why are the latter considered “good enough”?
```
# Ans : The difference between true random number(TRN) and pseudo-random number(PRN), is that TRN's are unpredictable
# physical values means (like atmospheric noise), and PRN are generated using mathematical algorithms (completely
# computer-generated).The PRN's are good enough as they follow a uniform distribution and the seed for generating
# PRN is unpredictable and unknown.
```
### Q3. What are the two main factors that influence the behaviour of a "normal" probability distribution?
```
# Ans : A normal distribution is determined by two parameters the mean and the variance. A normal distribution with a
# mean of 0 and a standard deviation of 1 is called a standard normal distribution.
```
### Q4. Provide a real-life example of a normal distribution.
```
# Ans : A fair rolling of dice is also a good example of normal distribution.
```
### Q5. In the short term, how can you expect a probability distribution to behave? What do you think will happen as the number of trials grows?
```
# Ans : When trails are less, the variance will large. As the number of trials increase , the variance will reduce.
```
### Q6. What kind of object can be shuffled by using random.shuffle?
```
# Ans : random.shuffle can used to shuffle list object
```
### Q7. Describe the math package's general categories of functions.
```
# Ans : The general categories of math package are
# 1) Trigonometric functions
# 2)Quadratic functions
# 3)Exponential functions
# 4)Hyperbolic functions
# 5)Periodic functions
# 6)Arithmetic functions
# 7)Logarithimic functions
# 8)Conversions to Integer
```
### Q8. What is the relationship between exponentiation and logarithms?
```
# Ans : The exponential function is given by ƒ(x) = ex, whereas the logarithmic function is given by g(x) = ln x, and
# former is the inverse of the latter.
```
### Q9. What are the three logarithmic functions that Python supports?
```
# Ans : 1)log2(x) - logarithmic value of x to base 2
# 2)log10(x) - logarithmic value of x to base 10
# 3)log(x,base) - logarithmic value of x to base. If only first parameter is given , it computes its
# value tp base e,natural logarithm
# 4)log1p(x) - natural logarithm (base e) value of 1+x
```
| github_jupyter |
```
%config IPCompleter.greedy=True
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import geopandas as gpd
import math
import numpy as np
import json
import seaborn as sns
import folium
import os
from shapely.geometry import Point, Polygon
from descartes import PolygonPatch
from mpl_toolkits.axes_grid1 import AxesGrid
from matplotlib.offsetbox import AnchoredText
```
Lets have a look at the model grid data contained in the Area Peril dictionary file.
Note that the dictionary is only meta-data, and not required for model execution.
```
area_peril_dictionary = pd.read_csv("../keys_data/PiWind/areaperil_dict.csv")
area_peril_dictionary.head()
```
Lets plot the area peril cells on a map of the UK. For this model, the area perils are a simple uniform grid in a square.
```
m = folium.Map(location=[ 52.737027, -0.914618], zoom_start=11, tiles='cartodbpositron')
area_peril_dictionary['lat']=area_peril_dictionary['LAT1']
area_peril_dictionary['lon']=area_peril_dictionary['LON1']
num_cells = area_peril_dictionary.lat.count()
num_cells_per_side = math.sqrt(num_cells)
cell_size_lat = (max(area_peril_dictionary.lat) - min(area_peril_dictionary.lat)) / (num_cells_per_side - 1)
cell_size_lon = (max(area_peril_dictionary.lon) - min(area_peril_dictionary.lon)) / (num_cells_per_side - 1)
for i, row in area_peril_dictionary.iterrows():
geometry = [Polygon([
(row.lon, row.lat),
(row.lon, row.lat + cell_size_lat),
(row.lon + cell_size_lon, row.lat + cell_size_lat),
(row.lon + cell_size_lon, row.lat)])]
crs = 'epsg:4326'
d = {'Description': ['All']}
df = pd.DataFrame(data=d)
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
folium.GeoJson(gdf).add_to(m)
m.save("piwind_extent_map.html")
%%HTML
<iframe width="100%" height=350 src="piwind_extent_map.html"></iframe>
```
Lets have a look at the data contained in the Intensity Bin dictionary file.
Note that the dictionary is only meta-data, and not required for model execution.
```
intensity_bin_dictionary = pd.read_csv("../model_data/PiWind/intensity_bin_dict.csv")
intensity_bin_dictionary.head()
```
Lets have a look at the data contained in the footprint file.
```
footprints = pd.read_csv("../model_data/PiWind/footprint.csv")
footprints.head()
```
Lets visualize the first 5 event footprints.
```
area_peril_dictionary['gridcell'] = area_peril_dictionary['AREA_PERIL_ID'].apply(
lambda ap: str(int((ap-1)/10)+1)+"-"+str(ap-(int((ap-1)/10))*10))
footprints_with_hazard = footprints.merge(
intensity_bin_dictionary, how='inner',
left_on='intensity_bin_id', right_on='bin_index').merge(
area_peril_dictionary, how='inner',
left_on='areaperil_id', right_on='AREA_PERIL_ID')
footprints_with_hazard = footprints_with_hazard[footprints_with_hazard['PERIL_ID']=='WTC']
footprints_with_hazard = footprints_with_hazard[footprints_with_hazard['COVERAGE_TYPE']==1]
fig = plt.figure(figsize=(20,10))
grid = AxesGrid(fig, 111,
nrows_ncols=(1, 5),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
vmin = min(footprints_with_hazard.interpolation)
vmax = max(footprints_with_hazard.interpolation)
for idx, ax in enumerate(grid):
a = np.zeros([10, 10])
for __, row in footprints_with_hazard[footprints_with_hazard.event_id == idx+1].iterrows():
i, j = row.gridcell.split('-')
a[10-int(i), int(j)-1] = row.interpolation
im = ax.imshow(a, cmap=plt.cm.get_cmap('Blues'), vmin=vmin, vmax=vmax,
extent=(
min(area_peril_dictionary.lon), max(area_peril_dictionary.lon),
min(area_peril_dictionary.lat), max(area_peril_dictionary.lat)))
ax.set_xlabel("longitude")
ax.set_ylabel("latitude")
at = AnchoredText(
"Event ID = {}".format(idx + 1),
prop=dict(size=8),
frameon=True,
loc=2,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
grid[0].cax.colorbar(im)
cax = grid.cbar_axes[0]
axis = cax.axis[cax.orientation]
axis.label.set_text("Intensity - Peak gust (mph)")
plt.show()
```
Lets have a look at the data contained in the Damage Bin dictionary file.
Note that the dictionary is required for model execution.
```
damage_bin_dictionary = pd.read_csv("../model_data/PiWind/damage_bin_dict.csv")
damage_bin_dictionary.head()
```
Lets have a look at the data contained in the Vulnerability file.
```
vulnerabilities = pd.read_csv("../model_data/PiWind/vulnerability.csv")
vulnerabilities.head()
```
The model has seperate vulnerability curves for Residential, Commerical and Industrial occupancies.
Lets visualise these curves.
```
vulnerabilities_with_hazard_and_damage = vulnerabilities.merge(
intensity_bin_dictionary, how='inner',
left_on='intensity_bin_id', right_on='bin_index').merge(
damage_bin_dictionary, how='inner',
suffixes=["_i", "_d"], left_on='damage_bin_id', right_on='bin_index')
fig = plt.figure(figsize=(10,20))
grid = AxesGrid(fig, 111,
nrows_ncols=(1, 3),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
vmin = 0.0
vmax = max(vulnerabilities_with_hazard_and_damage.probability)
labels = ["Residential", "Commercial", "Industrial"]
for idx, ax in enumerate(grid):
a = np.zeros((29, 12))
for index, row in vulnerabilities_with_hazard_and_damage[
vulnerabilities_with_hazard_and_damage.vulnerability_id == idx + 1].iterrows():
a[int(row.bin_index_i-1), 11-int(row.bin_index_d-1)] = row.probability
im = ax.imshow(a, cmap=plt.cm.get_cmap('Blues'), vmin=vmin, vmax=vmax,
extent=(
min(intensity_bin_dictionary.interpolation), max(intensity_bin_dictionary.interpolation),
min(damage_bin_dictionary.interpolation) * 100, max(damage_bin_dictionary.interpolation) * 100))
at = AnchoredText(labels[idx],
prop=dict(size=8), frameon=True,
loc=2,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.set_xlabel("Intensity - Peak gust (mph)")
ax.set_ylabel("Damage")
grid[0].cax.colorbar(im)
cax = grid.cbar_axes[0]
axis = cax.axis[cax.orientation]
axis.label.set_text("Probability of damage")
plt.show()
```
To run the model we need some test exposure data. Lets have a look at an example Location and Account file.
```
test_locations = pd.read_csv('../tests/inputs/SourceLocOEDPiWind.csv')
test_locations.head()
test_accounts = pd.read_csv('../tests/inputs/SourceAccOEDPiWind.csv')
test_accounts.head()
```
To run the model, we also need to define some analysis settings. Lets have a look at an example settings file.
```
with open('../analysis_settings.json', 'r') as myfile:
analysis_settings=json.loads(myfile.read().replace('\n', ''))
print(json.dumps(analysis_settings, indent=True))
```
We can now run the model using the Oasis MDK.
```
! rm -rf /tmp/analysis_test
! oasislmf model run -C ../oasislmf.json -r /tmp/analysis_test
```
Lets visualize the output of our analysis.
```
analysis_directory = "/tmp/analysis_test"
gul_aep = pd.read_csv(os.path.join(analysis_directory, "output", "gul_S1_leccalc_full_uncertainty_aep.csv"))
gul_aep = gul_aep[gul_aep.type == 1]
gul_oep = pd.read_csv(os.path.join(analysis_directory, "output", "gul_S1_leccalc_full_uncertainty_oep.csv"))
gul_oep = gul_oep[gul_oep.type == 1]
eps = pd.merge(gul_oep, gul_aep, on=["summary_id", "return_period"], suffixes=["_oep", "_aep"])
eps = eps.sort_values(by="return_period", ascending=True)
fig, ax = plt.subplots()
eps.plot(ax=ax, kind='bar', x='return_period', y=["loss_oep", "loss_aep"])
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
ax.set_xticklabels(['{:,}'.format(int(x)) for x in eps.return_period])
plt.legend(('OEP', 'AEP'))
ax.set_xlabel("Return period (years)")
ax.set_ylabel("Loss")
```
| github_jupyter |
# K-Nearest Neighbors from scratch
---
In this project we will be implementing the KNN algorithm from scratch. We will implement the algorithm for the K values 1, 3, 5 and 7 and see which one is performing better. Also, we will implement multiple distance measures and compare accurac across all of them.
We begin by importing the necessary libraries
```
import math
import matplotlib.pyplot as mp
import numpy as np
import pandas as pd
DATA = "/kaggle/input/iris.data"
SPLIT_PERCENT = 0.7
K_VALUES = [1, 3, 5, 7]
DIST_METRICS = ["euc", "norm-euc", "cosine-sim"]
```
We now prepare the dataset for further processing. To do this we first load the data, shuffle it randomly and map the classes to integers for easier processing
```
dataset = pd.read_csv(DATA).sample(frac=1).reset_index(drop=True)
# Map class names to integers
iris_class_map = {v: k + 1 for k, v in enumerate(dataset['class'].unique())}
dataset['class'] = dataset['class'].map(iris_class_map)
RECORDS_COUNT, ATTR_COUNT = dataset.shape
ATTRS = dataset.columns.values[0:ATTR_COUNT - 1]
SPLIT_SIZE = math.floor(RECORDS_COUNT * SPLIT_PERCENT)
# List of columns for every K Value
K_COL_STRINGS = ["pred_k_{}".format(k) for k in K_VALUES]
for col in K_COL_STRINGS:
dataset[col] = np.nan
# Split dataset for dev and test
dev_set = dataset[:SPLIT_SIZE].copy(deep=True)
test_set = dataset[SPLIT_SIZE:].copy(deep=True)
print("Dev data")
dev_set.head()
```
Now that our data is prepared, we need to find the nearest neighbors for every K value. To do this, we first need to calculate the Euclidian distance for every row in the dev set against every other row. Once we have the distances, we find the K Nearest Neighbors for every K value and use this to predict the class of the selected vector.
Once we make our predictions, we need to calculate the accuracy as well.
```
# Find K nearest neighbors for all values of K
for index, k in enumerate(K_VALUES):
calculated_pred = []
for i, row in dev_set.iterrows():
# Calculate euclidian distance
calculated_dist = (dev_set[ATTRS].sub(row[ATTRS]).pow(2).sum(1).pow(0.5)).sort_values()
# Get indices of nearest neighbors
nearest_neighbor_indices = calculated_dist.iloc[0:k].index.values
# Get nearest neighbors
nearest_neighbors = dev_set.loc[nearest_neighbor_indices, :]['class']
# Predict class of the vector
prediction = nearest_neighbors.mode().values[0]
calculated_pred.append(prediction)
dev_set[K_COL_STRINGS[index]] = calculated_pred
# Calculating accuracy
euc_accuracy = []
for col in dev_set[K_COL_STRINGS]:
column = dev_set[col]
total_rows = dev_set.shape[0]
num = dev_set.loc[dev_set['class'] == column].shape[0]
acc = round((num/total_rows) * 100, 5)
euc_accuracy.append(acc)
print(euc_accuracy)
```
Now, follow the same process but instead we use a normalized euclidian distance as a metric. To do this, we normalize the dataset and calculate the euclidian distance again. This allows us to deal with outliers and ensure we are correctly scaling the data
```
# Normalize data
def normalize(dataframe):
df = dataframe.copy(deep=True)
for col in df[ATTRS]:
df[col] = (df[col] - df[col].min()) / (df[col].max() - df[col].min())
return df
norm_dev_set = normalize(dev_set)
# Reset the prediction columns
for col in K_COL_STRINGS:
norm_dev_set[col] = np.nan
norm_dev_set.head()
```
Once the dataset is normalized, we follow the same process to calculate the euclidian distance and predict
```
# Predict using normalized data for all K values
for index, k in enumerate(K_VALUES):
calculated_pred = []
for i, row in norm_dev_set.iterrows():
calculated_dist = (norm_dev_set[ATTRS].sub(row[ATTRS]).pow(2).sum(1).pow(0.5)).sort_values()
nearest_neighbor_indices = calculated_dist.iloc[0:k].index.values
nearest_neighbors = norm_dev_set.loc[nearest_neighbor_indices, :]['class']
prediction = nearest_neighbors.mode().values[0]
calculated_pred.append(prediction)
norm_dev_set[K_COL_STRINGS[index]] = calculated_pred
norm_euc_accuracy = []
for col in norm_dev_set[K_COL_STRINGS]:
column = norm_dev_set[col]
total_rows = norm_dev_set.shape[0]
num = norm_dev_set.loc[dev_set['class'] == column].shape[0]
acc = round((num/total_rows) * 100, 5)
norm_euc_accuracy.append(acc)
print(norm_euc_accuracy)
```
For the final iteration, we will use Cosine Similarity as a distance metric
```
# Dot product
def dot(A, B):
return sum(a * b for a, b in zip(A, B))
# Cosine Similarity
def cosine_similarity(a, b):
return 1 - dot(a, b) / ((np.sqrt(dot(a, a))) * (np.sqrt(dot(b, b))))
cosine_dev_set = dev_set.copy(deep=True)
for index, k in enumerate(K_VALUES):
calculated_pred = []
for i, row in cosine_dev_set.iterrows():
calculated_dist = cosine_dev_set[ATTRS].apply(lambda a: cosine_similarity(np.array(a), np.array(row[ATTRS])), axis=1).sort_values()
nearest_neighbor_indices = calculated_dist.iloc[0:k].index.values
nearest_neighbors = cosine_dev_set.loc[nearest_neighbor_indices, :]['class']
prediction = nearest_neighbors.mode().values[0]
calculated_pred.append(prediction)
cosine_dev_set[K_COL_STRINGS[index]] = calculated_pred
cosine_accuracy = []
for col in cosine_dev_set[K_COL_STRINGS]:
column = cosine_dev_set[col]
total_rows = cosine_dev_set.shape[0]
num = cosine_dev_set.loc[dev_set['class'] == column].shape[0]
acc = round((num/total_rows) * 100, 5)
cosine_accuracy.append(acc)
print(cosine_accuracy)
```
Now, we need to analyze the accuracy of all these distance metrics across all the K values and pick the one that's doing the best job. To do this
```
acc_table = pd.DataFrame(index=K_VALUES)
acc_table[DIST_METRICS[0]] = euc_accuracy
acc_table[DIST_METRICS[1]] = norm_euc_accuracy
acc_table[DIST_METRICS[2]] = cosine_accuracy
acc_table
width = 0.3
mp.figure(figsize=(15, 10))
mp.ylim(0, 115)
e = mp.bar(x=np.add(K_VALUES, width * -1), height=euc_accuracy, width=width, color='#663399')
n = mp.bar(x=np.add(K_VALUES, width * 0), height=norm_euc_accuracy, width=width, color='#669933')
c = mp.bar(x=np.add(K_VALUES, width * 1), height=cosine_accuracy, width=width, color='#994d33')
mp.legend(DIST_METRICS, loc="best", fontsize=12)
mp.xlabel("K Value")
mp.ylabel("Accuracy (%)")
mp.show()
```
We see that at K = 1, we get 100% accuracy. This is mainly because overfitting is occuring. From my observations K values 3 and 5 seem to be most stable and the accuracy for K = 7 usually decreases. This is why I will be choosing K = 3 and the Normalized Euclidian distance since it is stable to test the accuracy of the test dataset.
```
k_val = 3
norm_test_set = normalize(test_set)
calculated_pred = []
for i, row in norm_test_set.iterrows():
calculated_dist = (norm_test_set[ATTRS].sub(row[ATTRS]).pow(2).sum(1).pow(0.5)).sort_values()
nearest_neighbor_indices = calculated_dist.iloc[0:k_val].index.values
nearest_neighbors = norm_test_set.loc[nearest_neighbor_indices, :]['class']
prediction = nearest_neighbors.mode().values[0]
calculated_pred.append(prediction)
norm_test_set[K_COL_STRINGS[1]] = calculated_pred
norm_euc_accuracy = []
column = norm_dev_set[K_COL_STRINGS[1]]
total_rows = norm_dev_set.shape[0]
num = norm_dev_set.loc[dev_set['class'] == column].shape[0]
acc = round((num/total_rows) * 100, 5)
norm_euc_accuracy.append(acc)
print(norm_euc_accuracy)
```
The final accuracy we get on the test set is around ~97% which is pretty good
| github_jupyter |
# Bayesian Logistic Regression with PyMC3
* This is a reproduction with a few slight alterations of [Bayesian Log Reg](http://jbencook.github.io/portfolio/bayesian_logistic_regression.html) by J. Benjamin Cook
* Author: Peadar Coyle and J. Benjamin Cook
* How likely am I to make more than $50,000 US Dollars?
* Exploration of model selection techniques too - I use DIC and WAIC to select the best model.
* The convenience functions are all taken from Jon Sedars work.
* This example also has some explorations of the features so serves as a good example of Exploratory Data Analysis and how that can guide the model creation/ model selection process.
```
%matplotlib inline
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn
import warnings
warnings.filterwarnings('ignore')
from collections import OrderedDict
from time import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import fmin_powell
from scipy import integrate
import theano as thno
import theano.tensor as T
def run_models(df, upper_order=5):
'''
Convenience function:
Fit a range of pymc3 models of increasing polynomial complexity.
Suggest limit to max order 5 since calculation time is exponential.
'''
models, traces = OrderedDict(), OrderedDict()
for k in range(1,upper_order+1):
nm = 'k{}'.format(k)
fml = create_poly_modelspec(k)
with pm.Model() as models[nm]:
print('\nRunning: {}'.format(nm))
pm.glm.glm(fml, df, family=pm.glm.families.Normal())
start_MAP = pm.find_MAP(fmin=fmin_powell, disp=False)
traces[nm] = pm.sample(2000, start=start_MAP, step=pm.NUTS(), progressbar=True)
return models, traces
def plot_traces(traces, retain=1000):
'''
Convenience function:
Plot traces with overlaid means and values
'''
ax = pm.traceplot(traces[-retain:], figsize=(12,len(traces.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces[-retain:]).iterrows()})
for i, mn in enumerate(pm.df_summary(traces[-retain:])['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data'
,xytext=(5,10), textcoords='offset points', rotation=90
,va='bottom', fontsize='large', color='#AA0022')
def create_poly_modelspec(k=1):
'''
Convenience function:
Create a polynomial modelspec string for patsy
'''
return ('income ~ educ + hours + age ' + ' '.join(['+ np.power(age,{})'.format(j)
for j in range(2,k+1)])).strip()
```
The [Adult Data Set](http://archive.ics.uci.edu/ml/datasets/Adult) is commonly used to benchmark machine learning algorithms. The goal is to use demographic features, or variables, to predict whether an individual makes more than \\$50,000 per year. The data set is almost 20 years old, and therefore, not perfect for determining the probability that I will make more than \$50K, but it is a nice, simple dataset that can be used to showcase a few benefits of using Bayesian logistic regression over its frequentist counterpart.
The motivation for myself to reproduce this piece of work was to learn how to use Odd Ratio in Bayesian Regression.
```
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", header=None, names=['age', 'workclass', 'fnlwgt',
'education-categorical', 'educ',
'marital-status', 'occupation',
'relationship', 'race', 'sex',
'captial-gain', 'capital-loss',
'hours', 'native-country',
'income'])
data
```
# Scrubbing and cleaning
We need to remove any null entries in Income.
And we also want to restrict this study to the United States.
```
data = data[~pd.isnull(data['income'])]
data[data['native-country']==" United-States"]
income = 1 * (data['income'] == " >50K")
age2 = np.square(data['age'])
data = data[['age', 'educ', 'hours']]
data['age2'] = age2
data['income'] = income
income.value_counts()
```
# Exploring the data
Let us get a feel for the parameters.
* We see that age is a tailed distribution. Certainly not Gaussian!
* We don't see much of a correlation between many of the features, with the exception of Age and Age2.
* Hours worked has some interesting behaviour. How would one describe this distribution?
```
g = seaborn.pairplot(data)
# Compute the correlation matrix
corr = data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = seaborn.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
seaborn.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
```
We see here not many strong correlations. The highest is 0.30 according to this plot. We see a weak-correlation between hours and income
(which is logical), we see a slighty stronger correlation between education and income (which is the kind of question we are answering).
# The model
We will use a simple model, which assumes that the probability of making more than $50K
is a function of age, years of education and hours worked per week. We will use PyMC3
do inference.
In Bayesian statistics, we treat everything as a random variable and we want to know the posterior probability distribution of the parameters
(in this case the regression coefficients)
The posterior is equal to the likelihood $$p(\theta | D) = \frac{p(D|\theta)p(\theta)}{p(D)}$$
Because the denominator is a notoriously difficult integral, $p(D) = \int p(D | \theta) p(\theta) d \theta $ we would prefer to skip computing it. Fortunately, if we draw examples from the parameter space, with probability proportional to the height of the posterior at any given point, we end up with an empirical distribution that converges to the posterior as the number of samples approaches infinity.
What this means in practice is that we only need to worry about the numerator.
Getting back to logistic regression, we need to specify a prior and a likelihood in order to draw samples from the posterior. We could use sociological knowledge about the effects of age and education on income, but instead, let's use the default prior specification for GLM coefficients that PyMC3 gives us, which is $p(θ)=N(0,10^{12}I)$. This is a very vague prior that will let the data speak for themselves.
The likelihood is the product of n Bernoulli trials, $\prod^{n}_{i=1} p_{i}^{y} (1 - p_{i})^{1-y_{i}}$,
where $p_i = \frac{1}{1 + e^{-z_i}}$,
$z_{i} = \beta_{0} + \beta_{1}(age)_{i} + \beta_2(age)^{2}_{i} + \beta_{3}(educ)_{i} + \beta_{4}(hours)_{i}$ and $y_{i} = 1$ if income is greater than 50K and $y_{i} = 0$ otherwise.
With the math out of the way we can get back to the data. Here I use PyMC3 to draw samples from the posterior. The sampling algorithm used is NUTS, which is a form of Hamiltonian Monte Carlo, in which parameteres are tuned automatically. Notice, that we get to borrow the syntax of specifying GLM's from R, very convenient! I use a convenience function from above to plot the trace infromation from the first 1000 parameters.
```
with pm.Model() as logistic_model:
pm.glm.glm('income ~ age + age2 + educ + hours', data, family=pm.glm.families.Binomial())
trace_logistic_model = pm.sample(2000, pm.NUTS(), progressbar=True)
plot_traces(trace_logistic_model, retain=1000)
```
# Some results
One of the major benefits that makes Bayesian data analysis worth the extra computational effort in many circumstances is that we can be explicit about our uncertainty. Maximum likelihood returns a number, but how certain can we be that we found the right number? Instead, Bayesian inference returns a distribution over parameter values.
I'll use seaborn to look at the distribution of some of these factors.
```
plt.figure(figsize=(9,7))
trace = trace_logistic_model[1000:]
seaborn.jointplot(trace['age'], trace['educ'], kind="hex", color="#4CB391")
plt.xlabel("beta_age")
plt.ylabel("beta_educ")
plt.show()
```
So how do age and education affect the probability of making more than $$50K?$ To answer this question, we can show how the probability of making more than $50K changes with age for a few different education levels. Here, we assume that the number of hours worked per week is fixed at 50. PyMC3 gives us a convenient way to plot the posterior predictive distribution. We need to give the function a linear model and a set of points to evaluate. We will pass in three different linear models: one with educ == 12 (finished high school), one with educ == 16 (finished undergrad) and one with educ == 19 (three years of grad school).
```
# Linear model with hours == 50 and educ == 12
lm = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*12 +
samples['hours']*50)))
# Linear model with hours == 50 and educ == 16
lm2 = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*16 +
samples['hours']*50)))
# Linear model with hours == 50 and educ == 19
lm3 = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*19 +
samples['hours']*50)))
```
Each curve shows how the probability of earning more than $ 50K$ changes with age. The red curve represents 19 years of education, the green curve represents 16 years of education and the blue curve represents 12 years of education. For all three education levels, the probability of making more than $50K increases with age until approximately age 60, when the probability begins to drop off. Notice that each curve is a little blurry. This is because we are actually plotting 100 different curves for each level of education. Each curve is a draw from our posterior distribution. Because the curves are somewhat translucent, we can interpret dark, narrow portions of a curve as places where we have low uncertainty and light, spread out portions of the curve as places where we have somewhat higher uncertainty about our coefficient values.
```
# Plot the posterior predictive distributions of P(income > $50K) vs. age
pm.glm.plot_posterior_predictive(trace, eval=np.linspace(25, 75, 1000), lm=lm, samples=100, color="blue", alpha=.15)
pm.glm.plot_posterior_predictive(trace, eval=np.linspace(25, 75, 1000), lm=lm2, samples=100, color="green", alpha=.15)
pm.glm.plot_posterior_predictive(trace, eval=np.linspace(25, 75, 1000), lm=lm3, samples=100, color="red", alpha=.15)
import matplotlib.lines as mlines
blue_line = mlines.Line2D(['lm'], [], color='b', label='High School Education')
green_line = mlines.Line2D(['lm2'], [], color='g', label='Bachelors')
red_line = mlines.Line2D(['lm3'], [], color='r', label='Grad School')
plt.legend(handles=[blue_line, green_line, red_line], loc='lower right')
plt.ylabel("P(Income > $50K)")
plt.xlabel("Age")
plt.show()
b = trace['educ']
plt.hist(np.exp(b), bins=20, normed=True)
plt.xlabel("Odds Ratio")
plt.show()
```
Finally, we can find a credible interval (remember kids - credible intervals are Bayesian and confidence intervals are frequentist) for this quantity. This may be the best part about Bayesian statistics: we get to interpret credibility intervals the way we've always wanted to interpret them. We are 95% confident that the odds ratio lies within our interval!
```
lb, ub = np.percentile(b, 2.5), np.percentile(b, 97.5)
print("P(%.3f < O.R. < %.3f) = 0.95"%(np.exp(3*lb),np.exp(3*ub)))
```
# Model selection
The [Deviance Information Criterion (DIC)](https://en.wikipedia.org/wiki/Deviance_information_criterion) is a fairly unsophisticated method for comparing the deviance of likelhood across the the sample traces of a model run. However, this simplicity apparently yields quite good results in a variety of cases. We'll run the model with a few changes to see what effect higher order terms have on this model.
One question that was immediately asked was what effect does age have on the model, and why should it be age^2 versus age? We'll use the DIC to answer this question.
```
models_lin, traces_lin = run_models(data, 4)
dfdic = pd.DataFrame(index=['k1','k2','k3','k4'], columns=['lin'])
dfdic.index.name = 'model'
for nm in dfdic.index:
dfdic.loc[nm, 'lin'] = pm.stats.dic(traces_lin[nm],models_lin[nm])
dfdic = pd.melt(dfdic.reset_index(), id_vars=['model'], var_name='poly', value_name='dic')
g = seaborn.factorplot(x='model', y='dic', col='poly', hue='poly', data=dfdic, kind='bar', size=6)
```
There isn't a lot of difference between these models in terms of DIC. So our choice is fine in the model above, and there isn't much to be gained for going up to age^3 for example.
Next we look at [WAIC](http://watanabe-www.math.dis.titech.ac.jp/users/swatanab/dicwaic.html). Which is another model selection technique.
```
dfdic = pd.DataFrame(index=['k1','k2','k3','k4'], columns=['lin'])
dfdic.index.name = 'model'
for nm in dfdic.index:
dfdic.loc[nm, 'lin'] = pm.stats.waic(traces_lin[nm],models_lin[nm])
dfdic = pd.melt(dfdic.reset_index(), id_vars=['model'], var_name='poly', value_name='waic')
g = seaborn.factorplot(x='model', y='waic', col='poly', hue='poly', data=dfdic, kind='bar', size=6)
```
The WAIC confirms our decision to use age^2.
| github_jupyter |
# Dealing with missing values
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(precision=3)
% matplotlib inline
plt.rcParams["figure.dpi"] = 300
from sklearn.datasets import load_iris
from sklearn.utils import shuffle
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y)
print(X[:30])
# a column is mostly missing
rng = np.random.RandomState(0)
X_missing_column = X.copy()
mask = X.sum(axis=1) < rng.normal(loc=19, scale=3, size=X.shape[0])
X_missing_column[mask, 0] = np.NaN
X_missing_column[120:]
# only a few rows have missing data. but a lot of it
rng = np.random.RandomState(4)
X_missing_rows = X.copy()
for i in rng.randint(0, 30, 5):
X_missing_rows[i, rng.uniform(size=4)> .2] = np.NaN
X_missing_rows[:30]
X[y==2].mean(axis=0)
# some values missing only
rng = np.random.RandomState(0)
X_some_missing = X.copy()
mask = np.abs(X[:, 2] - rng.normal(loc=5.5, scale=.7, size=X.shape[0])) < .6
X_some_missing[mask, 3] = np.NaN
# different random numbers
mask2 = np.abs(X[:, 2] - rng.normal(loc=5.5, scale=.7, size=X.shape[0])) < .6
X_some_missing[mask2, 2] = np.NaN
X_some_missing[:30]
# from now on use X_ = X_some_missing
X_ = X_some_missing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
X_train, X_test, y_train, y_test = train_test_split(X_, y, stratify=y, random_state=0)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
nan_columns = np.any(np.isnan(X_train), axis=0)
X_drop_columns = X_train[:, ~nan_columns]
logreg = make_pipeline(StandardScaler(), LogisticRegression())
scores = cross_val_score(logreg, X_drop_columns, y_train, cv=10)
np.mean(scores)
print(X_train[-30:])
from sklearn.preprocessing import Imputer
imp = Imputer(strategy="mean").fit(X_train)
X_mean_imp = imp.transform(X_train)
X_mean_imp[-30:]
X_mean_imp.shape
import matplotlib.patches as patches
imputed_mask = np.any(np.isnan(X_train), axis=1)
def plot_imputation(X_imp, title=None, ax=None):
# helper function to plot imputed data points
if ax is None:
ax = plt.gca()
if title is not None:
ax.set_title(title)
ax.scatter(X_imp[imputed_mask, 2], X_imp[imputed_mask, 3], c=plt.cm.Vega10(y_train[imputed_mask]), alpha=.6, marker="s")
ax.scatter(X_imp[~imputed_mask, 2], X_imp[~imputed_mask, 3], c=plt.cm.Vega10(y_train[~imputed_mask]), alpha=.6)
# this is for creating the legend...
square = plt.Line2D((0,), (0,), linestyle='', marker="s", markerfacecolor="w", markeredgecolor="k", label='Imputed data')
circle = plt.Line2D((0,), (0,), linestyle='', marker="o", markerfacecolor="w", markeredgecolor="k", label='Real data')
plt.legend(handles=[square, circle], numpoints=1, loc="best")
plot_imputation(X_mean_imp, "Mean imputation")
# I designed the problem so that mean imputation wouldn't work
mean_pipe = make_pipeline(Imputer(), StandardScaler(), LogisticRegression())
scores = cross_val_score(mean_pipe, X_train, y_train, cv=10)
np.mean(scores)
```
# Model-driven imputation and KNN
```
from sklearn.neighbors import KNeighborsRegressor
# imput feature 2 with KNN
feature2_missing = np.isnan(X_train[:, 2])
knn_feature2 = KNeighborsRegressor().fit(X_train[~feature2_missing, :2],
X_train[~feature2_missing, 2])
X_train_knn2 = X_train.copy()
X_train_knn2[feature2_missing, 2] = knn_feature2.predict(X_train[feature2_missing, :2])
# impute feature 3 with KNN
feature3_missing = np.isnan(X_train[:, 3])
knn_feature3 = KNeighborsRegressor().fit(X_train[~feature3_missing, :2],
X_train[~feature3_missing, 3])
X_train_knn3 = X_train_knn2.copy()
X_train_knn3[feature3_missing, 3] = knn_feature3.predict(X_train[feature3_missing, :2])
plot_imputation(X_train_knn3, "Simple KNN imputation")
# this is cheating because I'm not using a pipeline
# we would need to write a transformer that does the imputation
scores = cross_val_score(logreg, X_train_knn3, y_train, cv=10)
np.mean(scores)
from sklearn.ensemble import RandomForestRegressor
# this is just because I'm lazy and don't want to special-case the first iteration
X_imputed = Imputer().fit_transform(X_train)
feature2_missing = np.isnan(X_train[:, 2])
feature3_missing = np.isnan(X_train[:, 3])
inds_not_2 = np.array([0, 1, 3])
inds_not_3 = np.array([0, 1, 2])
rf = RandomForestRegressor(n_estimators=100)
for i in range(10):
last = X_imputed.copy()
# imput feature 2 with rf
rf.fit(X_imputed[~feature2_missing][:, inds_not_2], X_train[~feature2_missing, 2])
X_imputed[feature2_missing, 2] = rf.predict(X_imputed[feature2_missing][:, inds_not_2])
# impute feature 3 with rf
rf.fit(X_imputed[~feature3_missing][:, inds_not_3], X_train[~feature3_missing, 3])
X_imputed[feature3_missing, 3] = rf.predict(X_imputed[feature3_missing][:, inds_not_3])
# this would make more sense if we scaled the data beforehand
if (np.linalg.norm(last - X_imputed)) < .5:
break
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
plot_imputation(X_mean_imp, "Mean", ax=axes[0])
plot_imputation(X_train_knn3, "KNN", ax=axes[1])
plot_imputation(X_imputed, "Random Forest imputation", ax=axes[2])
scores = cross_val_score(logreg, X_imputed, y_train, cv=10)
np.mean(scores)
# you need to pip install fancyimpute for the rest! - and tensorflow
import fancyimpute
X_train_fancy_knn = fancyimpute.KNN().complete(X_train)
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
plot_imputation(X_train_knn3, "Naive KNN", ax=ax[0])
plot_imputation(X_train_fancy_knn, "Fancy KNN", ax=ax[1])
X_train_fancy_simple = fancyimpute.SimpleFill().complete(X_train)
X_train_fancy_mice = fancyimpute.MICE(verbose=0).complete(X_train)
X_train_fancy_si = fancyimpute.SoftImpute(verbose=0).complete(X_train)
fig, axes = plt.subplots(2, 2, figsize=(8, 8), dpi=100)
for ax, name, X_imp in zip(axes.ravel(), ["simple", "KNN", "MICE", "Soft impute"],
[X_train_fancy_simple, X_train_fancy_knn, X_train_fancy_mice, X_train_fancy_si]):
plot_imputation(X_imp, name, ax=ax)
mice = fancyimpute.MICE(verbose=0)
X_train_fancy_mice = mice.complete(X_train)
scores = cross_val_score(logreg, X_train_fancy_mice, y_train, cv=10)
scores.mean()
```
| github_jupyter |
```
from __future__ import division
import os
import numpy as np
import pandas as pd
from helpers import data_provider
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
%matplotlib inline
plt.style.use('classic')
plt.rc("figure", facecolor="white")
fig_width_pt = 469.755 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'ps',
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': fig_size}
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rcParams.update(params)
def plot_consumption(figure_name, which_house, data):
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
plt.plot(data, color='b')
plt.xticks(rotation=45)
plt.grid(True)
plt.xlabel('Time')
plt.ylabel('Consumption (kwh)')
plt.title('House nr. '+str(which_house)+'Electricity Consumption')
plt.savefig('figures/electricity_consumption/'+figure_name+'_'+str(which_house)+'.pdf')
plt.savefig('figures/electricity_consumption/'+figure_name+'_'+str(which_house)+'.eps')
plt.show()
def insta_plot(vals):
plt.figure(figsize=(12,6))
plt.plot(vals, color='b')
plt.xticks(rotation=45)
plt.title('Unregistered Electricity Consumption')
plt.xlabel('Time')
plt.ylabel('Consumption (kwh)')
plt.grid(True)
plt.show()
```
## Plotting and saving data figures
```
#Plot and save consumption figures for each house
houses=[1,2,4,5,6,7,8,9,10,12,13,15,16,17,18,19,20]
for house_nr in houses:
data = data_provider.load_aggregate(house_nr)
plot_consumption(figure_name='electricity_consumption',which_house=house_nr,data=data.Aggregate)
# Save important summary statistics for each house
d = {}
houses=[1,2,4,5,6,7,8,9,10,12,13,15,16,17,18,19,20]
for house_nr in houses:
data = data_provider.load_aggregate(house_nr)
key = 'house_'+str(house_nr)
d[key] = data.describe()
write_to_file('summary_statistics.txt',d)
```
## Boxplot for spotting outliers
```
df = pd.DataFrame()
for house_nr in houses:
data = data_provider.load_aggregate(house_nr)
key = 'house_'+str(house_nr)
df = pd.concat([df,data], ignore_index=True, axis=1)
names= ['House '+str(house) for house in houses]
df.columns = names
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
_=df.boxplot()
plt.xticks(rotation=45)
plt.ylabel('Consumption (kwh)')
plt.title('Electricity Consumption Summary per House')
plt.savefig('figures/houses_box.eps')
plt.savefig('figures/houses_box.pdf')
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
sns.boxplot(data=df)
plt.xticks(rotation=45)
plt.ylabel('Consumption (kwh)')
plt.title('Electricity Consumption Summary per House')
plt.savefig('figures/test_houses_box.eps')
plt.savefig('figures/test_houses_box.pdf')
```
## Cleaning Extreme Values
```
data = pd.read_csv('houses.csv',parse_dates=['Time'],index_col='Time')
houses=[1,2,4,5,6,7,8,9,10,12,13,15,16,17,18,19,20]
names= ['House '+str(house) for house in houses]
data.columns = names
insta_plot(data.House_1['2015-02-08'])
insta_plot(data.House_16['2014-05-06'])
insta_plot(data.House_18['2015-03-22'])
df = pd.read_csv('houses_clean.csv',parse_dates=['Time'],index_col='Time')
houses=[1,2,4,5,6,7,8,9,10,12,13,15,16,17,18,19,20]
names= ['House '+str(house) for house in houses]
df.columns = names
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
sns.boxplot(data=df)
plt.xticks(rotation=45)
plt.ylabel('Consumption (kWh)')
plt.title('Electricity Consumption Summary per House')
plt.savefig('figures/sns_houses_clean_box.eps')
plt.savefig('figures/sns_houses_clean_box.pdf')
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
_=df.boxplot()
plt.xticks(rotation=45)
plt.ylabel('Consumption (kwh)')
plt.title('Electricity Consumption Summary per House')
plt.savefig('figures/houses_clean_box.eps')
plt.savefig('figures/houses_clean_box.pdf')
df.describe()
write_to_file('clean_summary_statistics.txt',d)
```
## Keep 95% of the data
```
# load the data set
data_process = pd.read_csv('houses_clean.csv',parse_dates=['Time'], index_col='Time')
data = data_process.copy(deep=True)
data.quantile(0.95)
data = data[data <= data.quantile(0.95)]
x = data.House_1.dropna(axis=0)
mu = data.House_1.mean()
sigma = data.House_1.std()
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('Consumption')
plt.ylabel('Frequency')
plt.grid(True)
plt.show()
houses=[1,2,4,5,6,7,8,9,10,12,13,15,16,17,18,19,20]
names= ['House '+str(house) for house in houses]
data.columns = names
flierprops = dict(marker='.', markerfacecolor='grey', markersize=4, fillstyle='none',
linestyle='none')
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
sns.boxplot(data=data,flierprops=flierprops)
plt.xticks(rotation=45)
plt.ylabel('Consumption (kwh)')
plt.title('Electricity Consumption Summary per House')
plt.savefig('figures/95_percent/sns_houses_clean_box_95.eps')
plt.savefig('figures/95_percent/sns_houses_clean_box_95.pdf')
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import re
dimension = 400
vocab = "EOS abcdefghijklmnopqrstuvwxyz'"
char2idx = {char: idx for idx, char in enumerate(vocab)}
idx2char = {idx: char for idx, char in enumerate(vocab)}
def text2idx(text):
text = re.sub(r'[^a-z ]', '', text.lower()).strip()
converted = [char2idx[char] for char in text]
return text, converted
GO = 1
PAD = 0
EOS = 2
import tensorflow as tf
import numpy as np
train_X, train_Y = [], []
text_files = [f for f in os.listdir('spectrogram-train') if f.endswith('.npy')]
for fpath in text_files:
try:
splitted = fpath.split('-')
if len(splitted) == 2:
splitted[1] = splitted[1].split('.')[1]
fpath = splitted[0] + '.' + splitted[1]
with open('data/' + fpath.replace('npy', 'txt')) as fopen:
text, converted = text2idx(fopen.read())
w = np.load('spectrogram-train/' + fpath)
if w.shape[1] != dimension:
continue
train_X.append(w)
train_Y.append(converted)
except:
pass
test_X, test_Y = [], []
text_files = [f for f in os.listdir('spectrogram-test') if f.endswith('.npy')]
for fpath in text_files:
with open('data/' + fpath.replace('npy', 'txt')) as fopen:
text, converted = text2idx(fopen.read())
w = np.load('spectrogram-test/' + fpath)
if w.shape[1] != dimension:
continue
test_X.append(w)
test_Y.append(converted)
class Model:
def __init__(
self,
num_layers,
size_layer,
learning_rate,
num_features,
dropout = 1.0,
beam_width=5, force_teaching_ratio=0.5
):
def lstm_cell(size, reuse=False):
return tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(),reuse=reuse)
self.X = tf.placeholder(tf.float32, [None, None, num_features])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.X_seq_len = tf.reduce_mean(self.X_seq_len, axis = 1)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
batch_size = tf.shape(self.X)[0]
decoder_embeddings = tf.Variable(tf.random_uniform([len(char2idx), size_layer], -1, 1))
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
self.encoder_out = self.X
print(self.X_seq_len)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = lstm_cell(size_layer // 2),
cell_bw = lstm_cell(size_layer // 2),
inputs = self.encoder_out,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
self.encoder_out = tf.concat((out_fw, out_bw), 2)
bi_state_c = tf.concat((state_fw.c, state_bw.c), -1)
bi_state_h = tf.concat((state_fw.h, state_bw.h), -1)
bi_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(c=bi_state_c, h=bi_state_h)
encoder_state = tuple([bi_lstm_state] * num_layers)
print(self.encoder_out, encoder_state)
with tf.variable_scope('decode'):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = self.encoder_out,
memory_sequence_length = self.X_seq_len)
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
training_helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
embedding = decoder_embeddings,
sampling_probability = 1 - force_teaching_ratio,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cell,
helper = training_helper,
initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=encoder_state),
output_layer = tf.layers.Dense(len(char2idx)))
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
self.training_logits = training_decoder_output.rnn_output
with tf.variable_scope('decode', reuse=True):
encoder_out_tiled = tf.contrib.seq2seq.tile_batch(self.encoder_out, beam_width)
encoder_state_tiled = tf.contrib.seq2seq.tile_batch(encoder_state, beam_width)
X_seq_len_tiled = tf.contrib.seq2seq.tile_batch(self.X_seq_len, beam_width)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = encoder_out_tiled,
memory_sequence_length = X_seq_len_tiled)
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer, reuse=True) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell = decoder_cell,
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS,
initial_state = decoder_cell.zero_state(batch_size * beam_width, tf.float32).clone(cell_state = encoder_state_tiled),
beam_width = beam_width,
output_layer = tf.layers.Dense(len(char2idx), _reuse=True),
length_penalty_weight = 0.0)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = False,
maximum_iterations = tf.reduce_max(self.X_seq_len))
self.predicting_ids = predicting_decoder_output.predicted_ids[:, :, 0]
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
size_layers = 512
learning_rate = 1e-3
num_layers = 2
batch_size = 64
epoch = 20
model = Model(num_layers, size_layers, learning_rate, dimension)
sess.run(tf.global_variables_initializer())
train_X = tf.keras.preprocessing.sequence.pad_sequences(
train_X, dtype = 'float32', padding = 'post'
)
test_X = tf.keras.preprocessing.sequence.pad_sequences(
test_X, dtype = 'float32', padding = 'post'
)
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
from tqdm import tqdm
for e in range(epoch):
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_cost, train_accuracy, test_cost, test_accuracy = [], [], [], []
for i in pbar:
batch_x = train_X[i : min(i + batch_size, len(train_X))]
y = train_Y[i : min(i + batch_size, len(train_X))]
batch_y, _ = pad_sentence_batch(y, 0)
_, cost, accuracy = sess.run(
[model.optimizer, model.cost, model.accuracy],
feed_dict = {model.X: batch_x, model.Y: batch_y},
)
train_cost.append(cost)
train_accuracy.append(accuracy)
pbar.set_postfix(cost = cost, accuracy = accuracy)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'testing minibatch loop')
for i in pbar:
batch_x = test_X[i : min(i + batch_size, len(test_X))]
y = test_Y[i : min(i + batch_size, len(test_X))]
batch_y, _ = pad_sentence_batch(y, 0)
cost, accuracy = sess.run(
[model.cost, model.accuracy],
feed_dict = {model.X: batch_x, model.Y: batch_y},
)
test_cost.append(cost)
test_accuracy.append(accuracy)
pbar.set_postfix(cost = cost, accuracy = accuracy)
print('epoch %d, training avg cost %f, training avg accuracy %f'%(e + 1, np.mean(train_cost),
np.mean(train_accuracy)))
print('epoch %d, testing avg cost %f, testing avg accuracy %f'%(e + 1, np.mean(test_cost),
np.mean(test_accuracy)))
import random
random_index = random.randint(0, len(test_X) - 1)
batch_x = test_X[random_index : random_index + 1]
print(
'real:',
''.join(
[idx2char[no] for no in test_Y[random_index : random_index + 1][0]]
),
)
pred = sess.run(model.predicting_ids, feed_dict = {model.X: batch_x})[0]
print('predicted:', ''.join([idx2char[no] for no in pred]))
```
| github_jupyter |
# Table of Contents
* [Intro](#Intro)
* [Data loading and preprocessing](#Data-loading-and-preprocessing)
* [Model training and evaluation](#Model-training-and-evaluation)
* [Text Generations](#Text-Generations)
* [Conclusion](#Conclusion)
# Intro
This notebook provides an overview of text generation with Recurrent Neural Networks (RNN) using Keras.
We are going to build and train different models that will be able to generate new pieces of text for different contexts (e.g. motivational quotes, jokes, proverbs, narrative, conversations, Q/A).
The code will guide in all the steps necessary for this task, and it's accompanied by technical descriptions as well as external references to go deeper into the subject. See also the README.md file contained in this repository.
Some of the dataset used I got directly from [this repository](https://github.com/svenvdbeukel/Short-text-corpus-with-focus-on-humor-detection) or [here for fortune cookies galore](https://github.com/ianli/fortune-cookies-galore). I will also try to include the trained models data in my repository.
You can easily adapt the code for any new kind of dataset you want to experiment with. If you have any doubts or suggestions, feel free to contact me directly, and be sure to share you results if you play with the code on new data.
```
# Basic libraries import
import numpy as np
import pandas as pd
import seaborn as sns
import pickle
import nltk
import itertools
from keras.preprocessing import sequence
from keras.models import model_from_json
# Plotting
%matplotlib notebook
sns.set_context("paper")
# Add system path local modules
import os
import sys
sys.path.append(os.path.join(os.getcwd(), 'src'))
%load_ext autoreload
%autoreload 2
from model.textGenModel import TextGenModel
```
# Data loading and preprocessing
First we are going to load our dataset and preprocess it such it can be fed to the model. Notice we are working at word level. Moving to character level would require some adjustment to the process and overall model.
These are the common steps you should follow to create your training data from your original dataset:
* sentence segmentation (if you don't have already separate individual sentences)
* sentence tokenization (from sentence to list of words)
* add start and end tokens
* generate words indexing
* pad sequences (pad or truncate sentences to fixed length)
* one-hot encode (if you are not going to use an embedding layer in the keras model)
```
# load dataset with pickle
corpus_name = "short_oneliners"
with open("resources/short_oneliners.pickle", 'rb') as f: #binary mode (b) is required for pickle
dataset = pickle.load(f, encoding='utf-8') #our dataset is simply a list of string
print('Loaded {} sentences\nExample: "{}"'.format(len(dataset), dataset[0]))
# constant token and params for our models
START_TOKEN = "SENTENCE_START"
END_TOKEN = "SENTENCE_END"
UNKNOWN_TOKEN = "UNKNOWN_TOKEN"
PADDING_TOKEN = "PADDING"
vocabulary_size = 5000
sent_max_len = 20
# work tokenization for each sentence, while adding start and end tokens
sentences = [[START_TOKEN] + nltk.word_tokenize(entry.lower()) + [END_TOKEN] for entry in dataset]
print('Example: {}'.format(sentences[0]))
# creates index_to_word and word_to_index mappings, given the data and a max vocabulary size
def get_words_mappings(tokenized_sentences, vocabulary_size):
# we can rely on nltk to quickly get the most common words, and then limit our vocabulary to the specified size
frequence = nltk.FreqDist(itertools.chain(*tokenized_sentences))
vocab = frequence.most_common(vocabulary_size)
index_to_word = [x[0] for x in vocab]
# Add padding for index 0
index_to_word.insert(0, PADDING_TOKEN)
# Append unknown token (with index = vocabulary size + 1)
index_to_word.append(UNKNOWN_TOKEN)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
return index_to_word, word_to_index
# get mappings and update vocabulary size
index_to_word, word_to_index = get_words_mappings(sentences, vocabulary_size)
vocabulary_size = len(index_to_word)
print("Vocabulary size = " + str(vocabulary_size))
# Generate training data by converting tokenized sentenced to indexes (and replacing unknown words)
train_size = min(len(sentences), 100000)
train_data = [[word_to_index.get(w,word_to_index[UNKNOWN_TOKEN]) for w in sent] for sent in sentences[:train_size]]
# pad sentences to fixed lenght (pad with 0s if shorter, truncate if longer)
train_data = sequence.pad_sequences(train_data, maxlen=sent_max_len, dtype='int32', padding='post', truncating='post')
# quick and dirty way to one-hot encode our training data, not needed if using embeddings
#X_train = np.asarray([np.eye(vocabulary_size)[idx_sentence[:-1]] for idx_sentence in train_data])
#y_train = np.asarray([np.eye(vocabulary_size)[idx_sentence[1:]] for idx_sentence in train_data])
# create training data for rnn:
# input is sentence truncated from last word, output is sentence truncated from first word
X_train = train_data[:,:-1]
y_train = train_data[:,1:]
#X_train = X_train.reshape([X_train.shape[0], X_train.shape[1], 1])
y_train = y_train.reshape([y_train.shape[0], y_train.shape[1], 1]) # needed cause out timedistributed layer
# check if expected shapes (samples, sentence length, ?)
print(X_train.shape)
print(y_train.shape)
```
# Model training and evaluation
We are going to define a RNN model architecture, train it on our data, and eventually save the results for future usage.
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers.core import Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers import LSTM, TimeDistributed
# Define model and parameters
hidden_size = 512
embedding_size = 128
# model with embedding
model = Sequential()
model.add(Embedding(vocabulary_size, embedding_size, mask_zero=True))
# add batch norm
model.add(TimeDistributed(Flatten()))
model.add(LSTM(hidden_size, return_sequences=True, activation='relu'))
model.add(TimeDistributed(Dense(vocabulary_size, activation='softmax')))
# basic single-layer model
#model = Sequential()
#model.add(LSTM(hidden_size, input_shape=(None, vocabulary_size), return_sequences=True))
#model.add(TimeDistributed(Dense(vocabulary_size, activation='softmax')))
# need time distributes for embedding?
#model.add(TimeDistributed(
# Embedding(vocabulary_size, output_dim=hidden_size, mask_zero=True, input_length=sent_max_len-1),
# input_shape=(sent_max_len-1, 1), input_dtype='int32'))
model.summary()
# recompile also if you just want to keep training a model just loaded from memory
loss = 'sparse_categorical_crossentropy'
optimizer = 'adam'
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
# Train model
# you might want to train several times on few epoch, observe how loss and metrics vary
# and possibly tweak batch size and learning rate
num_epoch = 2
batch_size = 32
model.fit(X_train, y_train, epochs=num_epoch, batch_size=batch_size, verbose=1)
# export model (architecture)
model_path = "resources/models/{}_vocab_{}.json".format(corpus_name, vocabulary_size)
model_json = model.to_json()
with open(model_path, "w") as f:
f.write(model_json)
# export model weights
weights_path = "resources/models/{}_epoch_{}.hdf5".format(corpus_name, 40)
model.save_weights(weights_path)
# export word indexes
index_to_word_path = 'resources/models/{}_idxs_vocab{}.txt'.format(corpus_name, vocabulary_size)
with open(index_to_word_path, "wb") as f:
pickle.dump(index_to_word, f)
```
# Text Generations
For the generation part I am relying on a generic utility class (*TextGenModel*) included in this repo.
Main operations for this step are:
* load previously trained model
* instantiate class with the model and model configuration (e.g. temperature, sent max length)
* generate new text with target class
* prettify generated text
For the text generation task a seed sentence can be provided, plus additional requirements on text length.
The class will then internally take care of predicting word after word until some criteria are met.
```
model_path = "resources/models/jokes_vocab_5002.json"
weights_path = "resources/models/jokes_epoch_20.hdf5"
# Load previously saved model
with open(model_path, 'r') as f:
model = model_from_json(f.read())
# Load weights into model
model.load_weights(weights_path)
# Load word indexes
with open(index_to_word_path, 'rb') as f:
index_to_word = pickle.load(f, encoding='utf-8')
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# instantiate generation class on our data
text_gen = TextGenModel(model, index_to_word, word_to_index, sent_max_len=sent_max_len,
temperature=1.0,
use_embeddings=True)
# generate N new sentences
n_sents = 10
original_sentences = [entry.lower() for entry in dataset]
for _ in range(n_sents):
res = text_gen.pretty_print_sentence(text_gen.get_sentence(15), skip_last=True)
if res in original_sentences:
print("* {} SAME".format(res))
else:
print(res)
```
# Conclusion
I am trying to polish the code in this repository and generalize it even further, such that there is a clear separation between text generation and models training. The idea is that for the latter I can keep experimenting with different techniques and tools, and generate different models, while with the former I can provide a reusable text-generation interface for a multitude of use-cases.
I would exactly be interested to see more diffuse and creative usage of text generation: from purely artistic tasks, to personal optimization ones (e.g. text suggestion and check), passing through a bit more automation for all other relevant scenarios.
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Intro to Autoencoders
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/generative/autoencoder">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/autoencoder.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/autoencoder.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/autoencoder.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial introduces autoencoders with three examples: the basics, image denoising, and anomaly detection.
An autoencoder is a special type of neural network that is trained to copy its input to its output. For example, given an image of a handwritten digit, an autoencoder first encodes the image into a lower dimensional latent representation, then decodes the latent representation back to an image. An autoencoder learns to compress the data while minimizing the reconstruction error.
To learn more about autoencoders, please consider reading chapter 14 from [Deep Learning](https://www.deeplearningbook.org/) by Ian Goodfellow, Yoshua Bengio, and Aaron Courville.
## Import TensorFlow and other libraries
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Model
```
## Load the dataset
To start, you will train the basic autoencoder using the Fashion MNIST dataset. Each image in this dataset is 28x28 pixels.
```
(x_train, _), (x_test, _) = fashion_mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
print (x_train.shape)
print (x_test.shape)
```
## First example: Basic autoencoder

Define an autoencoder with two Dense layers: an `encoder`, which compresses the images into a 64 dimensional latent vector, and a `decoder`, that reconstructs the original image from the latent space.
To define your model, use the [Keras Model Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models).
```
latent_dim = 64
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(784, activation='sigmoid'),
layers.Reshape((28, 28))
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
```
Train the model using `x_train` as both the input and the target. The `encoder` will learn to compress the dataset from 784 dimensions to the latent space, and the `decoder` will learn to reconstruct the original images.
.
```
autoencoder.fit(x_train, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))
```
Now that the model is trained, let's test it by encoding and decoding images from the test set.
```
encoded_imgs = autoencoder.encoder(x_test).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
```
## Second example: Image denoising

An autoencoder can also be trained to remove noise from images. In the following section, you will create a noisy version of the Fashion MNIST dataset by applying random noise to each image. You will then train an autoencoder using the noisy image as input, and the original image as the target.
Let's reimport the dataset to omit the modifications made earlier.
```
(x_train, _), (x_test, _) = fashion_mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
print(x_train.shape)
```
Adding random noise to the images
```
noise_factor = 0.2
x_train_noisy = x_train + noise_factor * tf.random.normal(shape=x_train.shape)
x_test_noisy = x_test + noise_factor * tf.random.normal(shape=x_test.shape)
x_train_noisy = tf.clip_by_value(x_train_noisy, clip_value_min=0., clip_value_max=1.)
x_test_noisy = tf.clip_by_value(x_test_noisy, clip_value_min=0., clip_value_max=1.)
```
Plot the noisy images.
```
n = 10
plt.figure(figsize=(20, 2))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.title("original + noise")
plt.imshow(tf.squeeze(x_test_noisy[i]))
plt.gray()
plt.show()
```
### Define a convolutional autoencoder
In this example, you will train a convolutional autoencoder using [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) layers in the `encoder`, and [Conv2DTranspose](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2DTranspose) layers in the `decoder`.
```
class Denoise(Model):
def __init__(self):
super(Denoise, self).__init__()
self.encoder = tf.keras.Sequential([
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2),
layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)])
self.decoder = tf.keras.Sequential([
layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'),
layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'),
layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Denoise()
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(x_train_noisy, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test_noisy, x_test))
```
Let's take a look at a summary of the encoder. Notice how the images are downsampled from 28x28 to 7x7.
```
autoencoder.encoder.summary()
```
The decoder upsamples the images back from 7x7 to 28x28.
```
autoencoder.decoder.summary()
```
Plotting both the noisy images and the denoised images produced by the autoencoder.
```
encoded_imgs = autoencoder.encoder(x_test).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original + noise
ax = plt.subplot(2, n, i + 1)
plt.title("original + noise")
plt.imshow(tf.squeeze(x_test_noisy[i]))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
bx = plt.subplot(2, n, i + n + 1)
plt.title("reconstructed")
plt.imshow(tf.squeeze(decoded_imgs[i]))
plt.gray()
bx.get_xaxis().set_visible(False)
bx.get_yaxis().set_visible(False)
plt.show()
```
## Third example: Anomaly detection
## Overview
In this example, you will train an autoencoder to detect anomalies on the [ECG5000 dataset](http://www.timeseriesclassification.com/description.php?Dataset=ECG5000). This dataset contains 5,000 [Electrocardiograms](https://en.wikipedia.org/wiki/Electrocardiography), each with 140 data points. You will use a simplified version of the dataset, where each example has been labeled either `0` (corresponding to an abnormal rhythm), or `1` (corresponding to a normal rhythm). You are interested in identifying the abnormal rhythms.
Note: This is a labeled dataset, so you could phrase this as a supervised learning problem. The goal of this example is to illustrate anomaly detection concepts you can apply to larger datasets, where you do not have labels available (for example, if you had many thousands of normal rhythms, and only a small number of abnormal rhythms).
How will you detect anomalies using an autoencoder? Recall that an autoencoder is trained to minimize reconstruction error. You will train an autoencoder on the normal rhythms only, then use it to reconstruct all the data. Our hypothesis is that the abnormal rhythms will have higher reconstruction error. You will then classify a rhythm as an anomaly if the reconstruction error surpasses a fixed threshold.
### Load ECG data
The dataset you will use is based on one from [timeseriesclassification.com](http://www.timeseriesclassification.com/description.php?Dataset=ECG5000).
```
# Download the dataset
dataframe = pd.read_csv('http://storage.googleapis.com/download.tensorflow.org/data/ecg.csv', header=None)
raw_data = dataframe.values
dataframe.head()
# The last element contains the labels
labels = raw_data[:, -1]
# The other data points are the electrocadriogram data
data = raw_data[:, 0:-1]
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, test_size=0.2, random_state=21
)
```
Normalize the data to `[0,1]`.
```
min_val = tf.reduce_min(train_data)
max_val = tf.reduce_max(train_data)
train_data = (train_data - min_val) / (max_val - min_val)
test_data = (test_data - min_val) / (max_val - min_val)
train_data = tf.cast(train_data, tf.float32)
test_data = tf.cast(test_data, tf.float32)
```
You will train the autoencoder using only the normal rhythms, which are labeled in this dataset as `1`. Separate the normal rhythms from the abnormal rhythms.
```
train_labels = train_labels.astype(bool)
test_labels = test_labels.astype(bool)
normal_train_data = train_data[train_labels]
normal_test_data = test_data[test_labels]
anomalous_train_data = train_data[~train_labels]
anomalous_test_data = test_data[~test_labels]
```
Plot a normal ECG.
```
plt.grid()
plt.plot(np.arange(140), normal_train_data[0])
plt.title("A Normal ECG")
plt.show()
```
Plot an anomalous ECG.
```
plt.grid()
plt.plot(np.arange(140), anomalous_train_data[0])
plt.title("An Anomalous ECG")
plt.show()
```
### Build the model
```
class AnomalyDetector(Model):
def __init__(self):
super(AnomalyDetector, self).__init__()
self.encoder = tf.keras.Sequential([
layers.Dense(32, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(8, activation="relu")])
self.decoder = tf.keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(140, activation="sigmoid")])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = AnomalyDetector()
autoencoder.compile(optimizer='adam', loss='mae')
```
Notice that the autoencoder is trained using only the normal ECGs, but is evaluated using the full test set.
```
history = autoencoder.fit(normal_train_data, normal_train_data,
epochs=20,
batch_size=512,
validation_data=(test_data, test_data),
shuffle=True)
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()
```
You will soon classify an ECG as anomalous if the reconstruction error is greater than one standard deviation from the normal training examples. First, let's plot a normal ECG from the training set, the reconstruction after it's encoded and decoded by the autoencoder, and the reconstruction error.
```
encoded_data = autoencoder.encoder(normal_test_data).numpy()
decoded_data = autoencoder.decoder(encoded_data).numpy()
plt.plot(normal_test_data[0], 'b')
plt.plot(decoded_data[0], 'r')
plt.fill_between(np.arange(140), decoded_data[0], normal_test_data[0], color='lightcoral')
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()
```
Create a similar plot, this time for an anomalous test example.
```
encoded_data = autoencoder.encoder(anomalous_test_data).numpy()
decoded_data = autoencoder.decoder(encoded_data).numpy()
plt.plot(anomalous_test_data[0], 'b')
plt.plot(decoded_data[0], 'r')
plt.fill_between(np.arange(140), decoded_data[0], anomalous_test_data[0], color='lightcoral')
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()
```
### Detect anomalies
Detect anomalies by calculating whether the reconstruction loss is greater than a fixed threshold. In this tutorial, you will calculate the mean average error for normal examples from the training set, then classify future examples as anomalous if the reconstruction error is higher than one standard deviation from the training set.
Plot the reconstruction error on normal ECGs from the training set
```
reconstructions = autoencoder.predict(normal_train_data)
train_loss = tf.keras.losses.mae(reconstructions, normal_train_data)
plt.hist(train_loss[None,:], bins=50)
plt.xlabel("Train loss")
plt.ylabel("No of examples")
plt.show()
```
Choose a threshold value that is one standard deviations above the mean.
```
threshold = np.mean(train_loss) + np.std(train_loss)
print("Threshold: ", threshold)
```
Note: There are other strategies you could use to select a threshold value above which test examples should be classified as anomalous, the correct approach will depend on your dataset. You can learn more with the links at the end of this tutorial.
If you examine the reconstruction error for the anomalous examples in the test set, you'll notice most have greater reconstruction error than the threshold. By varing the threshold, you can adjust the [precision](https://developers.google.com/machine-learning/glossary#precision) and [recall](https://developers.google.com/machine-learning/glossary#recall) of your classifier.
```
reconstructions = autoencoder.predict(anomalous_test_data)
test_loss = tf.keras.losses.mae(reconstructions, anomalous_test_data)
plt.hist(test_loss[None, :], bins=50)
plt.xlabel("Test loss")
plt.ylabel("No of examples")
plt.show()
```
Classify an ECG as an anomaly if the reconstruction error is greater than the threshold.
```
def predict(model, data, threshold):
reconstructions = model(data)
loss = tf.keras.losses.mae(reconstructions, data)
return tf.math.less(loss, threshold)
def print_stats(predictions, labels):
print("Accuracy = {}".format(accuracy_score(labels, predictions)))
print("Precision = {}".format(precision_score(labels, predictions)))
print("Recall = {}".format(recall_score(labels, predictions)))
preds = predict(autoencoder, test_data, threshold)
print_stats(preds, test_labels)
```
## Next steps
To learn more about anomaly detection with autoencoders, check out this excellent [interactive example](https://anomagram.fastforwardlabs.com/#/) built with TensorFlow.js by Victor Dibia. For a real-world use case, you can learn how [Airbus Detects Anomalies in ISS Telemetry Data](https://blog.tensorflow.org/2020/04/how-airbus-detects-anomalies-iss-telemetry-data-tfx.html) using TensorFlow. To learn more about the basics, consider reading this [blog post](https://blog.keras.io/building-autoencoders-in-keras.html) by François Chollet. For more details, check out chapter 14 from [Deep Learning](https://www.deeplearningbook.org/) by Ian Goodfellow, Yoshua Bengio, and Aaron Courville.
| github_jupyter |
## Autograded Notebook (Canvas & CodeGrade)
This notebook will be automatically graded. It is designed to test your answers and award points for the correct answers. Following the instructions for each Task carefully.
### Instructions
* **Download this notebook** as you would any other ipynb file
* **Upload** to Google Colab or work locally (if you have that set-up)
* **Delete `raise NotImplementedError()`**
* Write your code in the `# YOUR CODE HERE` space
* **Execute** the Test cells that contain `assert` statements - these help you check your work (others contain hidden tests that will be checked when you submit through Canvas)
* **Save** your notebook when you are finished
* **Download** as a `ipynb` file (if working in Colab)
* **Upload** your complete notebook to Canvas (there will be additional instructions in Slack and/or Canvas)
# Lambda School Data Science - Unit 1 Sprint 1
## Sprint Challenge: Data Wrangling and Storytelling
## Use the following information to complete Tasks 1 - 12
### Notebook points total: 12
In this Sprint Challenge you will first "wrangle" some data from [Gapminder](https://www.gapminder.org/about-gapminder/), a Swedish non-profit co-founded by Hans Rosling. "Gapminder produces free teaching resources making the world understandable based on reliable statistics."
- [Cell phones (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/countries-etc-datapoints/ddf--datapoints--cell_phones_total--by--geo--time.csv)
- [Population (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/countries-etc-datapoints/ddf--datapoints--population_total--by--geo--time.csv)
- [Geo country codes](https://github.com/open-numbers/ddf--gapminder--systema_globalis/blob/master/ddf--entities--geo--country.csv)
These two links have everything you need to successfully complete the first part of this sprint challenge.
- [Pandas documentation: Working with Text Data](https://pandas.pydata.org/pandas-docs/stable/text.html) (one question)
- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) (everything else)
**Task 1** - Load and print the cell phone data. Pandas and numpy import statements have been included for you.
* load your CSV file found at `cell_phones_url` into a DataFrame named `cell_phones`
* print the top 5 records of `cell_phones`
```
# Task 1
# Imports
import pandas as pd
import numpy as np
cell_phones_url = 'https://raw.githubusercontent.com/LambdaSchool/data-science-practice-datasets/main/unit_1/Cell__Phones/cell_phones.csv'
# Load the dataframe and print the top 5 rows
# YOUR CODE HERE
cell_phones = pd.read_csv(cell_phones_url, index_col=False)
cell_phones.head()
```
**Task 1 Test**
```
# Task 1 - Test
assert isinstance(cell_phones, pd.DataFrame), 'Have you created a DataFrame named `cell_phones`?'
assert len(cell_phones) == 9574
```
**Task 2** - Load and print the population data.
* load the CSV file found at `population_url` into a DataFrame named `population`
* print the top 5 records of `population`
```
# Task 2
population_url = 'https://raw.githubusercontent.com/LambdaSchool/data-science-practice-datasets/main/unit_1/Population/population.csv'
# Load the dataframe and print the first 5 records
# YOUR CODE HERE
population = pd.read_csv(population_url, index_col=False)
population.head()
```
**Task 2 Test**
```
# Task 2 - Test
assert isinstance(population, pd.DataFrame), 'Have you created a DataFrame named `population`?'
assert len(population) == 59297
```
**Task 3** - Load and print the geo country codes data.
* load the CSV file found at `geo_codes_url` into a DataFrame named `geo_codes`
* print the top 5 records of `geo_codes`
```
# Task 3
geo_codes_url = 'https://raw.githubusercontent.com/LambdaSchool/data-science-practice-datasets/main/unit_1/GEO_codes/geo_country_codes.csv'
# Load the dataframe and print out the first 5 records
# YOUR CODE HERE
geo_codes = pd.read_csv(geo_codes_url, index_col=False)
geo_codes.head()
```
**Task 3 Test**
```
# Task 3 - Test
assert geo_codes is not None, 'Have you created a DataFrame named `geo_codes`?'
assert len(geo_codes) == 273
```
**Task 4** - Check for missing values
Let's check for missing values in each of these DataFrames: `cell_phones`, `population` and `geo_codes`
* Check for missing values in the following DataFrames:
* assign the total number of missing values in `cell_phones` to the variable `cell_phones_missing`
* assign the total number of missing values in `population` to the variable `population_missing`
* assign the total number of missing values in `geo_codes` to the variable `geo_codes_missing` (Hint: you will need to do a sum of a sum here - `.sum().sum()`)
```
# Task 4
# Check for missing data in each of the DataFrames
# YOUR CODE HERE
cell_phones_missing = cell_phones.isnull().sum().sum()
population_missing = population.isnull().sum().sum()
geo_codes_missing = geo_codes.isnull().sum().sum()
print(cell_phones_missing)
print(population_missing)
print(geo_codes_missing)
```
**Task 4 Test**
```
# Task 4 - Test
if geo_codes_missing == 21: print('ERROR: Make sure to use a sum of a sum for the missing geo codes!')
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 5** - Merge the `cell_phones` and `population` DataFrames.
* Merge the `cell_phones` and `population` dataframes with an **inner** merge on `geo` and `time`
* Call the resulting dataframe `cell_phone_population`
```
# Task 5
# Merge the cell_phones and population dataframes
# YOUR CODE HERE
cell_phone_population = pd.merge(cell_phones, population, how='inner')
cell_phone_population.head()
```
**Task 5 Test**
```
# Task 5 - Test
assert cell_phone_population is not None, 'Have you merged created a DataFrame named cell_phone_population?'
assert len(cell_phone_population) == 8930
```
**Task 6** - Merge the `cell_phone_population` and `geo_codes` DataFrames
* Merge the `cell_phone_population` and `geo_codes` DataFrames with an inner merge on `geo`
* **Only merge in the `country` and `geo` columns from `geo_codes`**
* Call the resulting DataFrame `geo_cell_phone_population`
```
# Task 6
# Merge the cell_phone_population and geo_codes dataframes
# Only include the country and geo columns from geo_codes
# YOUR CODE HERE
geo_cell_phone_population = pd.merge(cell_phone_population, geo_codes[['country', 'geo']], how='inner', on=['geo'])
geo_cell_phone_population.head()
geo_cell_phone_population.head()
```
**Task 6 Test**
```
# Task 6 - Test
assert geo_cell_phone_population is not None, 'Have you created a DataFrame named geo_cell_phone_population?
assert len(geo_cell_phone_population) == 8930
```
**Task 7** - Calculate the number of cell phones per person.
* Use the `cell_phones_total` and `population_total` columns to calculate the number of cell phones per person for each country and year.
* Call this new feature (column) `phones_per_person` and add it to the `geo_cell_phone_population` DataFrame (you'll be adding the column to the DataFrame).
```
# Task 7
# Calculate the number of cell phones per person for each country and year.
# YOUR CODE HERE
geo_cell_phone_population['phones_per_person'] = geo_cell_phone_population['cell_phones_total']/geo_cell_phone_population['population_total']
geo_cell_phone_population.head()
```
**Task 7 Test**
```
# Task 7 - Test
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 8** - Identify the number of cell phones per person in the US in 2017
* Write a line of code that will create a one-row subset of `geo_cell_phone_population` with data on cell phone ownership in the USA for the year 2017.
* Call this subset DataFrame `US_2017`.
* Print `US_2017`.
```
# Task 8
# Determine the number of cell phones per person in the US in 2017
# YOUR CODE HERE
US_2017 = geo_cell_phone_population[(geo_cell_phone_population['time']==2017) & (geo_cell_phone_population['country']=='United States')]
# View the DataFrame
US_2017
```
**Task 8 Test**
```
# Task 8 - Test
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 9** - Describe the numeric variables in `geo_cell_phone_population`
* Calculate the summary statistics for the quantitative variables in `geo_cell_phone_population` using `.describe()`.
* Find the mean value for `phones_per_person` and assign it to the variable `mean_phones`. Define your value out to two decimal points.
```
# Task 9
# Calculate the summary statistics for the quantitative variables in geo_cell_phone_population using .describe()
# YOUR CODE HERE
## I ROUNDED TO ONE DECIMAL PLACE FOR CODE GRADE/ PREVIOUSLY WAS .31 WITH TWO DECIMAL PLACES
geo_cell_phone_population.describe()
mean_phones = 0.3
```
**Task 9 Test**
```
# Task 9 - Test
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 10** - Describe the categorical variables in `geo_cell_phone_population`
* Calculate the summary statistics for the categorical variables in `geo_cell_phone_population` using `.describe(exclude='number')`.
* Using these results, find the number of unique countries and assign it to the variable `unique_country`. Your value should be an integer.
```
# Task 10
# Calculate the summary statistics in geo_cell_phone_population using .describe(exclude='number')
# YOUR CODE HERE
print(geo_cell_phone_population.describe(exclude='number'))
unique_country = 195
```
**Task 10 Test**
```
# Task 10 - Test
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 11** - Subset the DataFrame for 2017
* Create a new dataframe called `df2017` that includes **only** records from `geo_cell_phone_population` that ocurred in 2017.
```
# Task 11
# Create a new dataframe called df2017 that includes only records from geo_cell_phone_population that ocurred in 2017.
# YOUR CODE HERE
df2017 = geo_cell_phone_population[(geo_cell_phone_population['time']==2017)]
df2017.head()
```
**Task 11 Test**
```
# Task 11 - Test
# Hidden tests - you will see the results when you submit to Canvas
```
**Task 12** - Identify the five countries with the most cell phones per person in 2017
* Sort the `df2017` DataFrame by `phones_per_person` in descending order and assign the result to `df2017_top`. Your new DataFrame should only have **five** rows (Hint: use `.head()` to return only five rows).
* Print the first 5 records of `df2017_top`.
```
# Task 12
# Sort the df2017 dataframe by phones_per_person in descending order
# Return only five (5) rows
# YOUR CODE HERE
df2017_top = df2017.sort_values(by='phones_per_person', ascending=False).head()
# View the df2017_top DataFrame
df2017_top
```
**Task 12 Test**
```
# Task 12 - Test
assert df2017_top.shape == (5,6), 'Make sure you return only five rows'
```
**Task 13** - Explain why the figure below **cannot** be graphed as a pie chart.
```
from IPython.display import display, Image
png = 'https://fivethirtyeight.com/wp-content/uploads/2014/04/hickey-ross-tags-1.png'
example = Image(png, width=500)
display(example)
```
**Task 13 Question** - Explain why the figure cannot be graphed as a pie chart.
*This task will not be autograded - but it is part of completing the challenge.*
There are too many categories to be graphed on a pie chart. It would be overwhelming. Usually it's better to just have two categories on a pie chart.
**Task 14** - Titanic dataset
Use the following Titanic DataFrame to complete Task 14 - execute the cell to load the dataset.
```
# Load the Titanic dataset from its URL:
Titanic = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/data-science-practice-datasets/main/unit_1/Titanic/Titanic.csv')
Titanic.head(20)
```
**Task 14** - Create a visualization to show the distribution of **Parents/Children_Aboard**.
*This task will not be autograded - but it is part of completing the challenge.*
```
import matplotlib.pyplot as plt
family_counts = pd.DataFrame(Titanic['Parents/Children_Aboard'].value_counts())
fig, ax = plt.subplots()
ax.bar(family_counts.index, family_counts['Parents/Children_Aboard'])
ax.set_xlabel('Number of Family Members Aboard')
ax.set_ylabel('Frequency')
ax.set_title('Number of Family Members Aboard on the Titanic')
plt.show()
```
Describe the distribution of Parents/Children_Aboard.
```
# This is formatted as code
```
unimodal, right tailed/skewed to right. Shows that most passengers had no family members on the titanic.
| github_jupyter |
# Homepage Demo
This notebook was created to support the basic example that is showcased on the VeRoViz homepage (https://veroviz.org).
The goal is to demonstrate a common workflow (create nodes, calculate distances, solve problem, visualize solutions) via a simple example.
---
## Import VeRoViz
We first need to import the VeRoViz Python package:
```
import veroviz as vrv
import os
vrv.checkVersion()
```
## Select a Data Provider
Now, we'll specify a data provider, and any additional arguments required by the chosen data provider.
On the VeRoViz homepage, we used 'OSRM'. Here, we'll use 'ORS'.
- See https://veroviz.org/docs/dataproviders.html for other `dataProvider` options.
- See https://veroviz.org/documentation.html for information about setting environment variables (e.g., `os.environ['ORSKEY']`).
```
# DATA_PROVIDER = 'OSRM-online'
# DATA_PROVIDER_ARGS = {}
DATA_PROVIDER = 'ORS-online'
DATA_PROVIDER_ARGS = {'APIkey': os.environ['ORSKEY'], 'databaseName': None}
```
## Create a Bounding Region
We're going to automatically generate some random nodes on the map. To do this, we'll need to define a bounding region, within which the nodes will be created.
The [Sketch](https://veroviz.org/sketch.html) tool makes it easy to create a bounding region. The [lat, lon] points below were copied from Sketch.
```
myBoundary = [[42.914949262700084, -78.90020370483398],
[42.871938424448466, -78.89556884765626],
[42.875083507773496, -78.82158279418947],
[42.91293772859624, -78.81729125976564]]
# Create a map that shows just the boundary:
vrv.createLeaflet(boundingRegion = myBoundary)
```
## Generate Nodes within the Boundary
We'll generate two types of nodes:
1. A single depot (shown with a red "home" icon), and
2. Four customer nodes (with blue "star" icons).
These nodes will be uniformly distributed over the boundary region.
- See https://veroviz.org/docs/veroviz.generateNodes.html for more information on the `generateNodes()` options.
```
# Single depot node:
nodesDF = vrv.generateNodes(
numNodes = 1,
startNode = 0,
nodeType = 'depot',
leafletColor = 'red',
leafletIconType = 'home',
nodeDistrib = 'uniformBB',
nodeDistribArgs = { 'boundingRegion': myBoundary },
dataProvider = DATA_PROVIDER,
dataProviderArgs = DATA_PROVIDER_ARGS,
snapToRoad = True)
# 4 Customer nodes:
nodesDF = vrv.generateNodes(
initNodes = nodesDF,
numNodes = 4,
startNode = 1,
nodeType = 'customer',
leafletColor = 'blue',
leafletIconType = 'star',
nodeDistrib = 'uniformBB',
nodeDistribArgs = { 'boundingRegion': myBoundary },
dataProvider = DATA_PROVIDER,
dataProviderArgs = DATA_PROVIDER_ARGS,
snapToRoad = True)
```
We can now inspect the "nodes" dataframe that was generated.
- Details on the columns in the "nodes" dataframe (as well as information on the other pandas dataframe structures used by VeRoViz) may be found at https://veroviz.org/docs/dataframes.html.
```
# Display the nodesDF dataframe:
nodesDF
```
The `createLeaflet()` function can be used to display the boundary region and our nodes.
- See https://veroviz.org/docs/veroviz.createLeaflet.html for more options.
```
# Create a map that shows the boundary and the nodes:
vrv.createLeaflet(nodes = nodesDF,
boundingRegion = myBoundary,
mapBackground = 'Arcgis Roadmap')
```
## Create Travel Matrices
- See https://veroviz.org/docs/veroviz.getTimeDist2D.html for more options.
- There is also a 3D function (https://veroviz.org/docs/veroviz.getTimeDist3D.html), which is suitable for flying vehicles like drones.
```
[time, dist] = vrv.getTimeDist2D(nodes = nodesDF,
matrixType = 'all2all',
routeType = 'fastest',
dataProvider = DATA_PROVIDER,
dataProviderArgs = DATA_PROVIDER_ARGS)
print("'time' Dictionary (units in seconds): ")
print(time)
print("\nTime from depot (node 0) to customer 3 (node 3): ", time[0,3], "seconds")
print("\n'dist' Dictionary (units in meters):")
print(dist)
print("\nDistance from depot to customer 3: ", dist[0,3], "meters")
```
## Generate a dummy solution
This is where the user would typically apply their own algorithm to solve a problem.
For demonstration purposes, let's create a solution "by hand". The solution should have the following features:
1. There should be two (2) vehicles:
- One truck, and
- One UAV/drone
2. Both vehicles should start/end at the depot node (node 0).
- The truck should go from the depot to 1 to 2 and back to the depot.
- The UAV should go from the depot to 3 with a package; from 3 back to the depot empty; from the depot to 4 with a package; and return to the depot empty.
3. Each vehicle should spend 30 seconds at each node.
4. Each vehicle should drop a package at each customer.
5. The truck should follow the road network.
6. The UAV will have a "square" flight path, meaning that it will takeoff and land vertically, and fly horizontally at a constant altitude.
The `dummySolver()` function will return an "assignments" dataframe.
- See https://veroviz.org/docs/assignments.html for details on the "assignments" dataframe structure.
```
def dummySolver(nodesDF, dist, time):
import pandas as pd
# Assume truck travels depot -> 1 -> 2 -> depot
route = {'truck': [0, 1, 2, 0],
'drone': [0, 3, 0, 4, 0]}
configs = {'truck': {
'vehicleModels': ['veroviz/models/ub_truck.gltf'],
'leafletColor': 'blue',
'cesiumColor': 'Cesium.Color.BLUE',
'packageModel': 'veroviz/models/box_blue.gltf',
'modelScale': 100,
'minPxSize': 45 },
'drone': {'vehicleModels': ['veroviz/models/drone.gltf', 'veroviz/models/drone_package.gltf'],
'leafletColor': 'orange',
'cesiumColor': 'Cesium.Color.ORANGE',
'packageModel': 'veroviz/models/box_yellow.gltf',
'modelScale': 100,
'minPxSize': 45 }
}
serviceTime = 30 # seconds
# Initialize an empty "assignments" dataframe.
assignmentsDF = vrv.initDataframe('assignments')
for vehicle in route:
startTime = 0
for i in list(range(0, len(route[vehicle])-1)):
startNode = route[vehicle][i]
endNode = route[vehicle][i+1]
startLat = nodesDF[nodesDF['id'] == startNode]['lat'].values[0]
startLon = nodesDF[nodesDF['id'] == startNode]['lon'].values[0]
endLat = nodesDF[nodesDF['id'] == endNode]['lat'].values[0]
endLon = nodesDF[nodesDF['id'] == endNode]['lon'].values[0]
if ((vehicle == 'drone') and (startNode == 0)):
# Use the 3D model of a drone carrying a package
myModel = configs[vehicle]['vehicleModels'][1]
else:
# Use the 3D model of either a delivery truck or an empty drone
myModel = configs[vehicle]['vehicleModels'][0]
if (vehicle == 'truck'):
# Get turn-by-turn navigation for the truck, as it travels
# from the startNode to the endNode:
shapepointsDF = vrv.getShapepoints2D(
# odID = odID,
objectID = vehicle,
modelFile = myModel,
modelScale = configs[vehicle]['modelScale'],
modelMinPxSize = configs[vehicle]['minPxSize'],
startTimeSec = startTime,
startLoc = [startLat, startLon],
endLoc = [endLat, endLon],
# expDurationSec = time[startNode, endNode],
routeType = 'fastest',
leafletColor = configs[vehicle]['leafletColor'],
# leafletWeight = 3,
# leafletStyle = myArcStyle,
# leafletOpacity = 0.8,
cesiumColor = configs[vehicle]['cesiumColor'],
# cesiumWeight = 3,
# cesiumStyle = myArcStyle,
# cesiumOpacity = 0.8,
dataProvider = DATA_PROVIDER,
dataProviderArgs = DATA_PROVIDER_ARGS)
else:
# Get a 3D flight profile for the drone:
shapepointsDF = vrv.getShapepoints3D(
# odID = odID,
objectID = vehicle,
modelFile = myModel,
modelScale = configs[vehicle]['modelScale'],
modelMinPxSize = configs[vehicle]['minPxSize'],
startTimeSec = startTime,
startLoc = [startLat, startLon],
endLoc = [endLat, endLon],
takeoffSpeedMPS = 5, # FIXME
cruiseSpeedMPS = 20, # FIXME
landSpeedMPS = 3, # FIXME
cruiseAltMetersAGL = 100, # FIXME
routeType = 'square',
cesiumColor = configs[vehicle]['cesiumColor'])
# cesiumWeight = 3,
# cesiumStyle = myArcStyle,
# cesiumOpacity = 0.8)
# Update the assignments dataframe:
assignmentsDF = pd.concat([assignmentsDF, shapepointsDF], ignore_index=True, sort=False)
# Update the time
startTime = max(shapepointsDF['endTimeSec'])
# Add loitering for service
assignmentsDF = vrv.addStaticAssignment(
initAssignments = assignmentsDF,
# odID = odID,
objectID = vehicle,
modelFile = myModel,
modelScale = configs[vehicle]['modelScale'],
modelMinPxSize = configs[vehicle]['minPxSize'],
loc = [endLat, endLon],
startTimeSec = startTime,
endTimeSec = startTime + serviceTime)
# odID += 1
# Update the time again
startTime = startTime + serviceTime
# Add a package at all non-depot nodes:
if (endNode != 0):
assignmentsDF = vrv.addStaticAssignment(
initAssignments = assignmentsDF,
# odID = 0,
objectID = 'package %d' % endNode,
modelFile = configs[vehicle]['packageModel'],
modelScale = 20,
modelMinPxSize = 30,
loc = [endLat, endLon],
startTimeSec = startTime,
endTimeSec = -1)
return assignmentsDF
```
### Call our function to get a solution:
```
assignmentsDF = dummySolver(nodesDF, dist, time)
assignmentsDF
```
## Visualize the dummy solution
We'll create a static Leaflet map and a dynamic Cesium "movie".
### Static Leaflet map
```
# Create a Leaflet map showing the nodes and the routes.
vrv.createLeaflet(nodes=nodesDF, arcs=assignmentsDF)
```
### Dynamic Cesium movie
```
# Create a Cesium movie showing the nodes, routes, and package deliveries.
vrv.createCesium(
assignments = assignmentsDF,
nodes = nodesDF,
startTime = '10:00:00',
cesiumDir = os.environ['CESIUMDIR'],
problemDir = 'homepage_demo')
```
| github_jupyter |
# DynamicEndpoint Snitch Measurement Choices
Historically the DES has used a Median Filter approximated with a Codahale ExpononentiallyDecayingReservoir with a memory of about 100 items. There are proposals that we should change this ranking filter, for example to an Exponential Moving Average. This notebook is my attempt to model Cassandra replica latencies using probability distributions taking into account the frequent causes of latency (e.g. disks, safepoints, networks, and timeouts) and figure out which filter is appropriate.
```
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy
import scipy.stats
scipy.random.seed(1234)
class EMA(object):
def __init__(self, alpha1, initial):
self._ema_1 = initial
self.alpha1 = alpha1
def _ema(self, alpha, value, past):
return alpha * value + (1-alpha) * past
def sample(self, value):
self._ema_1 = self._ema(self.alpha1, value, self._ema_1)
def measure(self):
return self._ema_1
class MedianFilter(object):
def __init__(self, initial, size):
self.samples = []
self.size = size
def sample(self, value):
self.samples.append(value)
if len(self.samples) > self.size:
self.samples = self.samples[1:]
def measure(self):
d = sorted(self.samples)
return d[len(self.samples) // 2]
class LatencyGenerator(object):
"""
latency_ranges is a list of tuples of (distribution, probability)
"""
def __init__(self, latency_ranges, max_sample):
self.max = max_sample
self.i = 0
self.d = [i[0] for i in latency_ranges]
self.p = [i[1] for i in latency_ranges]
def __iter__(self):
self.i = 0
return self;
def __next__(self):
if self.i > self.max:
raise StopIteration()
self.i += 1
distribution = np.random.choice(self.d, p=self.p)
return distribution.sample()
class LatencyDistribution(object):
def __init__(self, minimum, maximum, skew):
self.dist = scipy.stats.truncexpon(
(maximum - minimum) / skew, loc = minimum, scale=skew
)
def sample(self):
return int(self.dist.rvs(1)[0])
latencies = LatencyGenerator(
[
# Most of the requests
(LatencyDistribution(1, 10, 5), 0.9),
# Young GC
(LatencyDistribution(20, 30, 3), 0.0925),
# Segment retransmits
(LatencyDistribution(200, 210, 5), 0.005),
# Safepoint pauses
(LatencyDistribution(1000, 2000, 10), 0.00195),
# Timeouts / stuck connections / safepoint pauses
(LatencyDistribution(10000, 10005, 1), 0.00055)
],
50000
)
data = np.array([i for i in latencies])
typical = np.array([i for i in data if i < 1000])
fig = plt.figure(None, (20, 3))
plt.title("Latency Histgoram")
plt.semilogy()
plt.ylabel("Count / {}".format(50000))
plt.xlabel("Latency (ms)")
plt.hist(data, 200)
plt.gca().set_xlim(0)
plt.xticks(np.arange(0, max(data)+1, 400))
plt.show()
fig2 = plt.figure(None, (20, 1))
plt.title("Latency Distribution All")
plt.xlabel("Latency (ms)")
plt.gca().set_xlim(0)
plt.xticks(np.arange(0, max(data)+1, 400))
plt.boxplot([data], vert=False, labels=["raw"])
plt.show()
fig3 = plt.figure(None, (20, 1))
plt.title("Latency Distribution Typical")
plt.xlabel("Latency (ms)")
plt.gca().set_xlim(0, max(typical)+5)
plt.xticks(np.arange(0, max(typical)+5, 5))
plt.boxplot([typical], vert=False, labels=["typical"])
plt.show()
from pprint import pprint
print("Summary Statistics:")
percentiles = [50, 75, 90, 95, 99, 99.9, 100]
summary = np.percentile(data, percentiles)
m = {
percentiles[i] : summary[i] for i in range(len(percentiles))
}
print("{:.10}: {:.10s}".format("Percentile", "Millis"))
for (k, v) in sorted(m.items()):
print("{:9.2f}%: {:10.0f}".format(k, v))
ema = EMA(0.05, data[0])
result = []
for d in data:
ema.sample(d)
result.append(ema.measure())
plt.figure(None, (20, 10))
plt.plot(result)
plt.ylabel("Latency (ms)")
plt.title('EMA')
plt.show()
mf = MedianFilter(data[0], 100)
result = []
for d in data:
mf.sample(d)
result.append(mf.measure())
plt.figure(None, (20, 10))
plt.plot(result)
plt.ylabel("Latency (ms)")
plt.title('Median Filter')
plt.show()
```
| github_jupyter |
# [Assignment #1: PFL067 Statistical NLP](http://ufal.mff.cuni.cz/~hajic/courses/npfl067/assign1.html)
## Exploring Entropy and Language Modeling
### Author: Dan Kondratyuk
### November 15, 2017
---
This Python notebook examines conditional entropy as it relates to bigram language models and cross entropy as it relates to linear interpolation smoothing.
Code and explanation of results is fully viewable within this webpage.
## Files
- [index.html](./index.html) - Contains all veiwable code and a summary of results
- [README.md](./README.md) - Instructions on how to run the code with Python
- [nlp-assignment-1.ipynb](./nlp-assignment-1.ipynb) - Jupyter notebook where code can be run
- [requirements.txt](./requirements.txt) - Required python packages for running
## 1. Entropy of a Text
#### Problem Statement
> In this experiment, you will determine the conditional entropy of the word distribution in a text given the previous word. To do this, you will first have to compute P(i,j), which is the probability that at any position in the text you will find the word i followed immediately by the word j, and P(j|i), which is the probability that if word i occurs in the text then word j will follow. Given these probabilities, the conditional entropy of the word distribution in a text given the previous word can then be computed as:
> $$H(J|I) = -\sum_{i \in I, j \in J} P(i,j) \log_2 P(j|i)$$
> The perplexity is then computed simply as
> $$P_X(P(J|I)) = 2^{H(J|I)}$$
> Compute this conditional entropy and perplexity for `TEXTEN1.txt`. This file has every word on a separate line. (Punctuation is considered a word, as in many other cases.) The i,j above will also span sentence boundaries, where i is the last word of one sentence and j is the first word of the following sentence (but obviously, there will be a fullstop at the end of most sentences).
> Next, you will mess up the text and measure how this alters the conditional entropy. For every character in the text, mess it up with a likelihood of 10%. If a character is chosen to be messed up, map it into a randomly chosen character from the set of characters that appear in the text. Since there is some randomness to the outcome of the experiment, run the experiment 10 times, each time measuring the conditional entropy of the resulting text, and give the min, max, and average entropy from these experiments. Be sure to use srand to reset the random number generator seed each time you run it. Also, be sure each time you are messing up the original text, and not a previously messed up text. Do the same experiment for mess up likelihoods of 5%, 1%, .1%, .01%, and .001%.
> Next, for every word in the text, mess it up with a likelihood of 10%. If a word is chosen to be messed up, map it into a randomly chosen word from the set of words that appear in the text. Again run the experiment 10 times, each time measuring the conditional entropy of the resulting text, and give the min, max, and average entropy from these experiments. Do the same experiment for mess up likelihoods of 5%, 1%, .1%, .01%, and .001%.
> Now do exactly the same for the file `TEXTCZ1.txt`, which contains a similar amount of text in an unknown language (just FYI, that's Czech*)
> Tabulate, graph and explain your results. Also try to explain the differences between the two languages. To substantiate your explanations, you might want to tabulate also the basic characteristics of the two texts, such as the word count, number of characters (total, per word), the frequency of the most frequent words, the number of words with frequency 1, etc.
### Process Text
The first step is to define functions to calculate probabilites of bigrams/unigrams and conditional entropy of a text. This can be done by counting up the frequency of bigrams and unigrams. The `BigramModel` class contains all the necessary functionality to compute the entropy of a text. By counting up the word unigram/bigram frequencies, we can divide the necessary counts to get the appropriate probabilities for the entropy function.
```
# Import Python packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import nltk
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import collections as c
from collections import defaultdict
# Configure Plots
plt.rcParams['lines.linewidth'] = 4
np.random.seed(200) # Set a seed so that this notebook has the same output each time
def open_text(filename):
"""Reads a text line by line, applies light preprocessing, and returns an array of words"""
with open(filename, encoding='iso-8859-2') as f:
content = f.readlines()
preprocess = lambda word: word.strip()
return np.array([preprocess(word) for word in content])
class BigramModel:
"""Counts up bigrams and calculates probabilities"""
def __init__(self, words):
self.words = words
self.word_set = list(set(words))
self.word_count = len(self.word_set)
self.total_word_count = len(self.words)
self.unigram_dist = c.Counter(words)
self.bigrams = list(nltk.bigrams(words))
self.bigram_set = list(set(self.bigrams))
self.bigram_count = len(self.bigram_set)
self.total_bigram_count = len(self.bigrams)
self.dist = c.Counter(self.bigrams)
def p_bigram(self, wprev, w):
"""Calculates the probability a bigram appears in the distribution"""
return self.dist[(wprev, w)] / self.total_bigram_count
def p_bigram_cond(self, wprev, w):
"""Calculates the probability a word appears in the distribution given the previous word"""
return self.dist[(wprev, w)] / self.unigram_dist[wprev]
def entropy_cond(self):
"""Calculates the conditional entropy from a list of bigrams"""
bigram_set = self.bigram_set
return - np.sum(self.p_bigram(*bigram) *
np.log2(self.p_bigram_cond(*bigram))
for bigram in bigram_set)
def perplexity_cond(self, entropy=-1):
"""Calculates the conditional perplexity from the given conditional entropy"""
if (entropy < 0):
return 2 ** self.entropy_cond()
else:
return 2 ** entropy
```
### Perturb Texts
Define functions to process a list of words and, with a given probability, alter each character/word to a random character/word.
```
def charset(words):
"""Given a list of words, calculates the set of characters over all words"""
return np.array(list(set(char for word in words for char in word)))
def vocab_list(words):
"""Given a list of words, calculates the vocabulary (word set)"""
return np.array(list(set(word for word in words)))
def perturb_char(word, charset, prob=0.1):
"""Changes each character with given probability to a random character in the charset"""
return ''.join(np.random.choice(charset) if np.random.random() < prob else char for char in word)
def perturb_word(word, vocabulary, prob=0.1):
"""Changes a word with given probability to a random word in the vocabulary"""
return np.random.choice(vocabulary) if np.random.random() < prob else word
def perturb_text(words, seed=200):
"""Given a list of words, perturbs each word both on the character level
and the word level. Does this for a predefined list of probabilties"""
np.random.seed(seed)
chars = charset(words)
vocab = vocab_list(words)
text_chars, text_words = pd.DataFrame(), pd.DataFrame()
probabilities = [0, 0.00001, 0.0001, 0.001, 0.01, 0.05, 0.1]
for prob in probabilities:
text_chars[str(prob)] = [perturb_char(word, chars, prob=prob) for word in words]
text_words[str(prob)] = [perturb_word(word, vocab, prob=prob) for word in words]
return text_chars, text_words
```
### Gather Statistics
The following functions perturb a given text on the character and word level by a defined list of probabilities and compute statistical information for each probability data point.
```
def text_stats(words):
"""Given a list of words, this calculates various statistical
properties like entropy, number of characters, etc."""
bigram_model = BigramModel(words)
entropy = bigram_model.entropy_cond()
perplexity = bigram_model.perplexity_cond(entropy=entropy)
vocab_size = bigram_model.word_count
char_count = len([char for word in words for char in word])
chars_per_word = char_count / len(words)
words_freq_1 = sum(1 for key in bigram_model.unigram_dist if bigram_model.unigram_dist[key] == 1)
return [entropy, perplexity, vocab_size, char_count, chars_per_word, words_freq_1]
def run_stats(words, seed=200):
"""Calculates statistics for one run of perturbed probabilities of a given text
and outputs them to two tables (character and word level respectively)"""
perturbed_text = perturb_text(words, seed=seed)
text_chars, text_words = perturbed_text
col_names = [
'prob', 'entropy', 'perplexity', 'vocab_size', 'char_count',
'chars_per_word', 'words_freq_1'
]
char_stats = pd.DataFrame(columns=col_names)
word_stats = pd.DataFrame(columns=col_names)
# Iterate through all perturbation probabilities and gather statistics
for col in text_chars:
char_stats_calc = text_stats(list(text_chars[col]))
char_stats.loc[len(char_stats)] = [float(col)] + char_stats_calc
word_stats_calc = text_stats(list(text_words[col]))
word_stats.loc[len(word_stats)] = [float(col)] + word_stats_calc
return char_stats, word_stats
def all_stats(words, num_runs=10):
"""Calculates statistics for all runs of perturbed probabilities of a given text
and outputs the averaged values to two tables (character and word level respectively)"""
char_runs, word_runs = zip(*[run_stats(words, seed=i) for i in range(num_runs)])
char_concat, word_concat = pd.concat(char_runs), pd.concat(word_runs)
char_avg = char_concat.groupby(char_concat.index).mean()
word_avg = word_concat.groupby(word_concat.index).mean()
return char_avg, word_avg
def create_cond_entropy_plot(label, word_stats, char_stats):
"""Plots the word and character entropy of the given text statistics"""
plt.plot(word_stats.prob, word_stats.entropy, label='Word Entropy')
plt.plot(char_stats.prob, char_stats.entropy, label='Character Entropy')
plt.suptitle('Entropy (' + label + ')')
plt.xlabel('Probability')
plt.ylabel('Entropy')
_ = plt.legend()
```
### Results (part 1): Calculate, Tabulate, and Graph Statistics
Finally, we calculate the conditional entropy of both English and Czech texts, along with their perturbed counterparts as specified in the problem statement. Some additional statistics are calculated to better explain results. Explanations and conclusions of results are given at the end of this section.
```
# Read the texts into memory
english = './TEXTEN1.txt'
czech = './TEXTCZ1.txt'
words_en = open_text(english)
words_cz = open_text(czech)
# Calculate statistics on all data points
char_stats_en, word_stats_en = all_stats(words_en)
char_stats_cz, word_stats_cz = all_stats(words_cz)
```
#### English Character Statistics
The table below displays the conditional entropy of the English text when each character can be pertubed with the given probability. The entropy of the English text starts at 5.28 and decreases steadily to 4.7 as more characters are changed randomly. The vocabulary size and number of words with frequency 1 increase substantially.
```
char_stats_en
```
#### English Word Statistics
The table below displays the conditional entropy of the English text when each word can be pertubed with the given probability. The entropy of the English text starts at 5.28 and increases slightly to 5.45 as more words are changed randomly. The vocabulary size decreases very slightly and the number of words with frequency 1 decrease substantially.
```
word_stats_en
```
#### Czech Character Statistics
The table below displays the conditional entropy of the Czech text when each character can be pertubed with the given probability. The entropy of the Czech text starts at 4.74 and decreases steadily to 4.0 as more characters are changed randomly. The vocabulary size and number of words with frequency 1 increase substantially.
```
char_stats_cz
```
#### Czech Word Statistics
The table below displays the conditional entropy of the Czech text when each word can be pertubed with the given probability. The entropy of the Czech text starts at 4.74 and decreases slightly to 4.63 as more words are changed randomly. The vocabulary size decreases very slightly and the number of words with frequency 1 decrease as well.
```
word_stats_cz
```
#### English Plot
The graph below plots the conditional entropy of the English text as a function of the probability of perturbing it. The blue line plots the entropy of the text with perturbed words, and the orange line plots the entropy of the text with purturbed characters.
```
create_cond_entropy_plot('English', word_stats_en, char_stats_en)
```
The plot shows that the conditional entropy drops as more characters in the words of the text are changed. Looking back at the table, not only does the vocabulary increase substantially, but the number of words with frequency 1 rise as well. Changing a character to a random symbol will more often than not create a new word. Conditional entropy can be thought of as the average amount of information needed to find the next word given its previous word. If the frequency of the previous word is 1, then the next word can be determined entirely from the previous, so no new information is necessary. In other words,
$$p(w_1,w_2) \log_2 p(w_2|w_1) = p(w_1,w_2) \log_2 \frac{c(w_1,w_2)}{c(w_1)} = p(w_1,w_2) \log_2 1 = 0$$
where $(w_1,w_2)$ is a bigram and $c(w_1) = 1$. Therefore, as repeated words are changed to single frequency words, the conditional entropy would go down, as seen in the graph.
The plot also shows that the conditional entropy rises slightly as words in the text are altered to random words in the vocabulary. The table shows that the number of words with frequency 1 decrease rapidly. As no new words can be created, the the chance that a single frequency word will be mapped to a multiple frequency word increases with the probability. This has the effect of increasing the conditional entropy, since more information is necessary to determine the next word given the previous multiple frequency word. In other words, $- p(w_1,w_2) \log_2 p(w_2|w_1) > 0$ for $c(w_1) > 1$.
#### Czech Plot
The graph below plots the conditional entropy of the Czech text as a function of the probability of perturbing it. The blue line plots the entropy of the text with perturbed words, and the orange line plots the entropy of the text with purturbed characters.
```
create_cond_entropy_plot('Czech', word_stats_cz, char_stats_cz)
```
The first thing to notice is that the Czech language has an inherently lower conditional entropy than English (at least for this text). This can be explained by the fact that the Czech text contains many more words with a frequency of 1. As opposed to English, Czech has many more word forms due to its declension and conjucation of words, further increasing its vocabulary size and making it much less likely that words of the same inflection appear in the text. As explained earlier, single frequency words have the effect of decreasing conditional entropy.
Very similar to the English plot, the conditional entropy drops as as more characters in the words of the text are changed. This is due to the same reasons as explained above: the number of words of frequency 1 increase, lowering the amount of information needed to determine the next word given the previous.
Somewhat unexpectedly, the Czech plot shows that the conditional entropy decreases as words in the text are altered to random words in the vocabulary. The English plot shows the opposite effect. Czech is known to be a [free word order](https://en.wikipedia.org/wiki/Czech_word_order) language, which means that (in many cases) words are free to move around the sentence without changing its syntactic structure. What this means is that determining the next word is harder, as other words can be mixed in without changing overall meaning. This requires more information overall (but this is offset to English by the relative vocabulary size). However, as words are altered randomly the chance that the same next word appears increases, futher decreasing entropy.
Since English is highly dependent on word order (making it easy to determine what the next word is), it would make sense that randomly altering words would make it harder to determine what the next word is. It is important to keep in mind that even in the English case, after altering words past a certain point, the entropy should begin to decrease again. This is because low frequency words followed by high frequency words that keep the entropy high will decrease to an equilibrium point where every bigram is equally likely.
#### Problem Statement
> Now assume two languages, $L_1$ and $L_2$ do not share any vocabulary items, and that the conditional entropy as described above of a text $T_1$ in language $L_1$ is $E$ and that the conditional entropy of a text $T_2$ in language $L_2$ is also $E$. Now make a new text by appending $T_2$ to the end of $T_1$. Will the conditional entropy of this new text be greater than, equal to, or less than $E$? Explain. [This is a paper-and-pencil exercise of course!]
Conditional entropy $H(Y|X)$ is the amount of information needed to determine the outcome of $Y$ given that the outcome $X$ is known. Since the texts are disjoint, the amount of information needed to find a word given the previous word will not increase between them (no bigrams are shared), except in one special case.
Let $T_3 = T_1 \oplus T_2$ be the concatenation of the two texts. Note that $T_3$ has a newly formed bigram on the boundary of $T_1$ and $T_2$. Let $(t_1, t_2)$ be such a bigram. Then there is a nonzero term in the conditional entropy sum, increasing $E$ by
$$- p(t_1,t_2) \log_2 p(t_2|t_1) = - \frac{1}{|T_3|} \log_2 \frac{1}{c(t_1)} = \frac{\log_2 c(t_1)}{|T_3|}$$
where $c(t)$ is the number of times word $t$ appears in its text and $|T|$ is the length of $T$. If we let $|T_2| = 1$ and $c(t_1) = |T_1|$, this cannot be more than $max\{\frac{\log_2 n}{n}\} = \frac{1}{2}$ bits of information. In short, $E$ will increase by a small amount. The larger $E$ is, the more insignificant these terms will be and so the new conditional entropy will approach $E$.
$E$ will also decrease very slightly as well. Notice that $|T_3| = |T_1| + |T_2| + 1$, one more than the addition of the two texts. This term will appear in every part of the sum, so it can be factored out. This has the effect of modifying the total conditional entropy by the ratio
$$\frac{|T_1| + |T_2|}{|T_3|} = \frac{|T_1| + |T_2|}{|T_1| + |T_2| + 1}$$
This gets arbitrarily close to 100% as either text becomes large.
Putting these two facts together, the new entropy $E_{new}$ is
$$E_{new} = \frac{|T_1| + |T_2|}{|T_1| + |T_2| + 1} E + \frac{\log_2 c(t_1)}{|T_1| + |T_2| + 1}$$
which approaches $E$ as either text $T_1,T_2$ increases in length.
---
<!-- Denote $H_C(T)$ to be the conditional entropy of a text $T$ and $|T|$ to be the length of $T$. Then
$$H_C(T) = - \sum_{i,j} p(w_i,w_j) \log_2 p(w_j|w_i) = - \sum_{i,j} \frac{c(w_i,w_j)}{|T|} \log_2 \frac{c(w_i,w_j)}{c(w_i)}$$
where $c(w_1,\dots,w_n)$ counts the frequency of an $n$-gram in $T$.
Let $T_3 = T_1 \oplus T_2$ be the concatenation of the two texts. Then $H_C(T_1) = H_C(T_2) = E$, and
$$H_C(T_3) = - \frac{1}{|T_1 + T_2|} \sum_{i,j} c(w_i,w_j) \log_2 \frac{c(w_i,w_j)}{c(w_i)}$$
If $T_1$, $T_2$ are nonempty, then $E$ must decrease, as $$.
--- -->
## 2. Cross-Entropy and Language Modeling
#### Problem Statement
> This task will show you the importance of smoothing for language modeling, and in certain detail it lets you feel its effects.
> First, you will have to prepare data: take the same texts as in the previous task, i.e. `TEXTEN1.txt` and `TEXTCZ1.txt`
> Prepare 3 datasets out of each: strip off the last 20,000 words and call them the Test Data, then take off the last 40,000 words from what remains, and call them the Heldout Data, and call the remaining data the Training Data.
> Here comes the coding: extract word counts from the training data so that you are ready to compute unigram-, bigram- and trigram-based probabilities from them; compute also the uniform probability based on the vocabulary size. Remember (T being the text size, and V the vocabulary size, i.e. the number of types - different word forms found in the training text):
> $p_0(w_i) = 1 / V $
> $p_1(w_i) = c_1(w_i) / T$
> $p_2(w_i|w_{i-1}) = c_2(w_{i-1},w_i) / c_1(w_{i-1})$
> $p_3(w_i|w_{i-2},w_{i-1}) = c_3(w_{i-2},w_{i-1},w_i) / c_2(w_{i-2},w_{i-1})$
> Be careful; remember how to handle correctly the beginning and end of the training data with respect to bigram and trigram counts.
> Now compute the four smoothing parameters (i.e. "coefficients", "weights", "lambdas", "interpolation parameters" or whatever, for the trigram, bigram, unigram and uniform distributions) from the heldout data using the EM algorithm. [Then do the same using the training data again: what smoothing coefficients have you got? After answering this question, throw them away!] Remember, the smoothed model has the following form:
> $p_s(w_i|w_{i-2},w_{i-1}) = l_0p_0(w_i)+ l_1p_1(w_i)+ l_2p_2(w_i|w_{i-1}) + l_3p_3(w_i|w_{i-2},w_{i-1})$,
> where
> $$l_0 + l_1 + l_2 + l_3 = 1$$
> And finally, compute the cross-entropy of the test data using your newly built, smoothed language model. Now tweak the smoothing parameters in the following way: add 10%, 20%, 30%, ..., 90%, 95% and 99% of the difference between the trigram smoothing parameter and 1.0 to its value, discounting at the same the remaining three parameters proportionally (remember, they have to sum up to 1.0!!). Then set the trigram smoothing parameter to 90%, 80%, 70%, ... 10%, 0% of its value, boosting proportionally the other three parameters, again to sum up to one. Compute the cross-entropy on the test data for all these 22 cases (original + 11 trigram parameter increase + 10 trigram smoothing parameter decrease). Tabulate, graph and explain what you have got. Also, try to explain the differences between the two languages based on similar statistics as in the Task No. 2, plus the "coverage" graph (defined as the percentage of words in the test data which have been seen in the training data).
### Process Text
The first step is to define functions to calculate probabilites of uniform, unigram, bigram, and trigram distributions with respect to a text. As before, this can be done by counting up the ngrams. The LanguageModel class contains all the necessary functionality to compute these probabilities.
```
np.random.seed(200) # Set a seed so that this notebook has the same output each time
class Dataset:
"""Splits a text into training, test, and heldout sets"""
def __init__(self, words):
self.train, self.test, self.heldout = self.split_data(words)
train_vocab = set(self.train)
test_vocab = set(self.test)
self.coverage = len([w for w in test_vocab if w in train_vocab]) / len(test_vocab)
def split_data(self, words, test_size = 20000, heldout_size = 40000):
words = list(words)
test, remain = words[-test_size:], words[:-test_size]
heldout, train = remain[-heldout_size:], remain[:-heldout_size]
return train, test, heldout
class LanguageModel:
"""Counts words and calculates probabilities (up to trigrams)"""
def __init__(self, words):
# Prepend two tokens to avoid beginning-of-data problems
words = np.array(['<ss>', '<s>'] + list(words))
# Unigrams
self.unigrams = words
self.unigram_set = list(set(self.unigrams))
self.unigram_count = len(self.unigram_set)
self.total_unigram_count = len(self.unigrams)
self.unigram_dist = c.Counter(self.unigrams)
# Bigrams
self.bigrams = list(nltk.bigrams(words))
self.bigram_set = list(set(self.bigrams))
self.bigram_count = len(self.bigram_set)
self.total_bigram_count = len(self.bigrams)
self.bigram_dist = c.Counter(self.bigrams)
# Trigrams
self.trigrams = list(nltk.trigrams(words))
self.trigram_set = list(set(self.trigrams))
self.trigram_count = len(self.trigram_set)
self.total_trigram_count = len(self.trigrams)
self.trigram_dist = c.Counter(self.trigrams)
def count(ngrams):
ngram_set = list(set(ngrams))
ngram_count = len(ngram_set)
total_ngram_count = len(ngrams)
ngram_dist = c.Counter(ngrams)
return ngram_set, ngram_count, total_ngram_count, ngram_dist
def p_uniform(self):
"""Calculates the probability of choosing a word uniformly at random"""
return self.div(1, self.unigram_count)
def p_unigram(self, w):
"""Calculates the probability a unigram appears in the distribution"""
return self.div(self.unigram_dist[w], self.total_unigram_count)
def p_bigram_cond(self, wprev, w):
"""Calculates the probability a word appears in the distribution given the previous word"""
# If neither ngram has been seen, use the uniform distribution for smoothing purposes
if ((self.bigram_dist[wprev, w], self.unigram_dist[wprev]) == (0,0)):
return self.p_uniform()
return self.div(self.bigram_dist[wprev, w], self.unigram_dist[wprev])
def p_trigram_cond(self, wprev2, wprev, w):
"""Calculates the probability a word appears in the distribution given the previous word"""
# If neither ngram has been seen, use the uniform distribution for smoothing purposes
if ((self.trigram_dist[wprev2, wprev, w], self.bigram_dist[wprev2, wprev]) == (0,0)):
return self.p_uniform()
return self.div(self.trigram_dist[wprev2, wprev, w], self.bigram_dist[wprev2, wprev])
def div(self, a, b):
"""Divides a and b safely"""
return a / b if b != 0 else 0
```
### Expectation Maximization Algorithm
Define functions to compute the EM algorithm on a language model using linear interpolation smoothing.
```
def init_lambdas(n=3):
"""Initializes a list of lambdas for an ngram language model with uniform probabilities"""
return [1 / (n + 1)] * (n + 1)
def p_smoothed(lm, lambdas, wprev2, wprev, w):
"""Calculate the smoothed trigram probability using the weighted product of lambdas"""
return np.multiply(lambdas, [
lm.p_uniform(),
lm.p_unigram(w),
lm.p_bigram_cond(wprev, w),
lm.p_trigram_cond(wprev2, wprev, w)
])
def expected_counts(lm, lambdas, heldout):
"""Computes the expected counts by smoothing across all trigrams and summing them all together"""
smoothed_probs = (p_smoothed(lm, lambdas, *trigram) for trigram in heldout) # Multiply lambdas by probabilities
return np.sum(smoothed / np.sum(smoothed) for smoothed in smoothed_probs) # Element-wise sum
def next_lambda(lm, lambdas, heldout):
"""Computes the next lambda from the current lambdas by normalizing the expected counts"""
expected = expected_counts(lm, lambdas, heldout)
return expected / np.sum(expected) # Normalize
def em_algorithm(train, heldout, stop_tolerance=1e-4):
"""Computes the EM algorithm for linear interpolation smoothing"""
lambdas = init_lambdas(3)
lm = LanguageModel(train)
heldout_trigrams = LanguageModel(heldout).trigrams
print('Lambdas:')
next_l = next_lambda(lm, lambdas, heldout_trigrams)
while not np.all([diff < stop_tolerance for diff in np.abs(lambdas - next_l)]):
print(next_l)
lambdas = next_l
next_l = next_lambda(lm, lambdas, heldout_trigrams)
lambdas = next_l
return lambdas
def log_sum(lm, lambdas, trigram):
"""Computes the log base 2 of the sum of the smoothed trigram probability"""
return np.log2(np.sum(p_smoothed(lm, lambdas, *trigram)))
def cross_entropy(lm, lambdas, test_trigrams):
"""Computes the cross entropy of the language model with respect to the test set"""
return - np.sum(log_sum(lm, lambdas, trigram) for trigram in test_trigrams) / len(test_trigrams)
def tweak_trigram_lambda(lambdas, amount):
"""Adds the given amount to the trigram lambda and removes
the same amount from the other lambdas (normalized)"""
first = np.multiply(lambdas[:-1], (1.0 - amount / np.sum(lambdas[:-1])))
last = lambdas[-1] + amount
return np.append(first, last)
```
### Discount and Boost the Trigram Probabilties
Define a function to discount or boost the trigram probabilities in the language model by adding/removing probability mass to/from the trigram lambda $l_3$ smoothing parameter.
```
discount_ratios = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # Discount trigram by this ratio
boost_ratios = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99] # Boost trigram by this ratio
def boost_stats(lm, test, lambdas):
"""Calculates the cross entropy of the language model with
respect to several ratios which boost or discount the trigram
lambda parameter"""
boost = pd.DataFrame(columns=['boost_trigram_ratio', 'trigram_lambda', 'cross_entropy'])
test_trigrams = LanguageModel(test).trigrams
for p in discount_ratios:
lambdas_tweaked = tweak_trigram_lambda(lambdas, (p - 1) * lambdas[-1])
entropy = cross_entropy(lm, lambdas_tweaked, test_trigrams)
boost.loc[len(boost)] = [p - 1, lambdas_tweaked[-1], entropy]
for p in boost_ratios:
lambdas_tweaked = tweak_trigram_lambda(lambdas, p * (1 - lambdas[-1]))
entropy = cross_entropy(lm, lambdas_tweaked, test_trigrams)
boost.loc[len(boost)] = [p, lambdas_tweaked[-1], entropy]
return boost
def create_lambdas_plot(label, boost_stats):
"""Plots the boosted lambda stats"""
plt.plot(boost_stats.boost_trigram_ratio, boost_stats.cross_entropy, label='Boosted Cross Entropy')
plt.suptitle('Cross Entropy (' + label + ')')
plt.xlabel('Trigram Boost Ratio')
plt.ylabel('Cross Entropy')
_ = plt.legend()
```
### Results (part 2): Calculate, Tabulate, and Graph Statistics
Finally: calculate the language model of the English and Czech texts, compute the smoothed lambda parameters using the EM algorithm, and calculate the cross entropy. The cross entropy will also be calculated for discounting or boosting the trigram model by set ratios.
```
en = Dataset(words_en)
cz = Dataset(words_cz)
lm_en = LanguageModel(en.train)
lm_cz = LanguageModel(cz.train)
# Here we can see the 4 lambdas converge (English)
lambdas_en = em_algorithm(en.train, en.heldout)
# Here we can see the 4 lambdas converge (Czech)
lambdas_cz = em_algorithm(cz.train, cz.heldout)
boost_en = boost_stats(lm_en, en.test, lambdas_en)
boost_cz = boost_stats(lm_cz, cz.test, lambdas_cz)
```
#### English Cross Entropy
The table below displays the cross entropy of the English text between the language model (as trained on the training set) and the test set. We see that the unmodified cross entropy is ~7.5, which increases as the trigram lambda is discounted or boosted.
```
# Cross entropy without lambda modifications (English)
boost_en[boost_en.boost_trigram_ratio == 0.0].cross_entropy.iloc[0]
# Cross entropy with lambda modifications (English)
boost_en
```
#### Czech Cross Entropy
The table below displays the cross entropy of the Czech text between the language model (as trained on the training set) and the test set. We see that the unmodified cross entropy is ~10.2, which increases as the trigram lambda is discounted or boosted.
```
# Cross entropy without lambda modifications (Czech)
boost_cz[boost_cz.boost_trigram_ratio == 0.0].cross_entropy.iloc[0]
# Cross entropy with lambda modifications (English)
boost_cz
```
#### English Plot
The graph below plots the cross entropy of the English text as a function of the trigram boost ratio. Negative values indicate the amount the trigram parameter was discounted, while positive values indicate how much it was boosted.
```
create_lambdas_plot('English', boost_en)
# The ratio of English words in the test data which have been seen in the training data
en.coverage
```
Cross entropy can be thought of intuitively as the average number of bits needed to predict an outcome from a probability distribution given we use another probability distribution to approximate it. If we calculate the cross entropy between our training data and test data as done in this experiment, then we will have a value which will tell us how close our approximation is to the true distribution. The lower the cross entropy, the better.
The plot above indicates that modifying the trigram lambda parameter will only increase the cross entropy, and therefore worsen the language model's approximation with respect to the test distribution. This means that the trigram lambda is in a (local) minimum. This is as expected, as the EM algorithm is an optimization algorithm that (in this case) finds the optimal lambda weights for each ngram probability function.
The final thing to note is that boosting the trigram lambda results in a much higher cross entropy than discounting it. This is because there are much fewer trigrams in the dataset, so the trigram model is much sparser than the unigram or bigram model. Thus, assigning more probability mass to the trigram model will weaken the entire model significantly. However, reducing the probability mass of the trigram model is also detrimental, as it has some useful information that can improve the language model (just not as much as the unigrams and bigrams).
#### Czech Plot
The graph below plots the cross entropy of the Czech text as a function of the trigram boost ratio. Negative values indicate the amount the trigram parameter was discounted, while positive values indicate how much it was boosted.
```
create_lambdas_plot('Czech', boost_cz)
# The ratio of Czech words in the test data which have been seen in the training data
cz.coverage
```
The Czech plot is very similar to the English plot. As in the English plot, the Czech plot indicates that the trigram lambda parameter is (locally) optimal, and that boosting the trigram lambda results in a much higher cross entropy than discounting it.
The only difference is that the Czech language model has a cross entropy that is a few bits higher than English (10.2 vs. 7.5). This can be more easily seen from the last experiment, where it was brought to attention that Czech has a much larger vocabulary due to the many different word affixes that Czech has (declensions, conjugations, etc.). As no stemming or lemmatization was performed, the data will be much more sparse, especially for bigrams and trigrams. This is evidenced by the fact that the ratio of English words in the test data that have been seen in the training data is 76%, while for Czech it is 65%. Therefore, to observe comparable cross entropy, much more data is needed for the Czech language model.
| github_jupyter |
#$EXERCISE_PREAMBLE$
As always, run the setup code below before working on the questions (and if you leave this notebook and come back later, remember to run the setup code again).
```
# SETUP. You don't need to worry for now about what this code does or how it works. If you're ever curious about the
# code behind these exercises, it's available under an open source license here: https://github.com/Kaggle/learntools/
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex5 import *
print('Setup complete.')
```
# Exercises
## 1.
Have you ever felt debugging involved a bit of luck? The following program has a bug. Try to identify the bug and fix it.
```
def has_lucky_number(nums):
"""Return whether the given list of numbers is lucky. A lucky list contains
at least one number divisible by 7.
"""
for num in nums:
if num % 7 == 0:
return True
else:
return False
```
Try to identify the bug and fix it in the cell below:
```
def has_lucky_number(nums):
"""Return whether the given list of numbers is lucky. A lucky list contains
at least one number divisible by 7.
"""
for num in nums:
if num % 7 == 0:
return True
else:
return False
q1.check()
#_COMMENT_IF(PROD)_
q1.hint()
#_COMMENT_IF(PROD)_
q1.solution()
```
## 2.
### a.
Look at the Python expression below. What do you think we'll get when we run it? When you've made your prediction, uncomment the code and run the cell to see if you were right.
```
#[1, 2, 3, 4] > 2
```
### b
If you’ve used R or certain Python libraries like numpy or pandas, you might have expected that when we ran the above code, Python would compare each element of the list to 2 (i.e. do an 'element-wise' comparison) and give us a list of booleans like `[False, False, True, True]`.
Implement a function that reproduces this behaviour, returning a list of booleans corresponding to whether the corresponding element is greater than n.
```
def elementwise_greater_than(L, thresh):
"""Return a list with the same length as L, where the value at index i is
True if L[i] is greater than thresh, and False otherwise.
>>> elementwise_greater_than([1, 2, 3, 4], 2)
[False, False, True, True]
"""
pass
q2.check()
#_COMMENT_IF(PROD)_
q2.solution()
```
## 3.
Complete the body of the function below according to its docstring
```
def menu_is_boring(meals):
"""Given a list of meals served over some period of time, return True if the
same meal has ever been served two days in a row, and False otherwise.
"""
pass
q3.check()
#_COMMENT_IF(PROD)_
q3.hint()
#_COMMENT_IF(PROD)_
q3.solution()
```
## 4. <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
Next to the Blackjack table, the Python Challenge Casino has a slot machine. You can get a result from the slot machine by calling `play_slot_machine()`. The number it returns is your winnings in dollars. Usually it returns 0. But sometimes you'll get lucky and get a big payday. Try running it below:
```
play_slot_machine()
```
By the way, did we mention that each play costs $1? Don't worry, we'll send you the bill later.
On average, how much money can you expect to gain (or lose) every time you play the machine? The casino keeps it a secret, but you can estimate the average value of each pull using a technique called the **Monte Carlo method**. To estimate the average outcome, we simulate the scenario many times, and return the average result.
Complete the following function to calculate the average value per play of the slot machine.
```
def estimate_average_slot_payout(n_runs):
"""Run the slot machine n_runs times and return the average net profit per run.
Example calls (note that return value is nondeterministic!):
>>> estimate_average_slot_payout(1)
-1
>>> estimate_average_slot_payout(1)
0.5
"""
pass
```
When you think you know the expected value per spin, uncomment the line below to see how close you were.
```
#_COMMENT_IF(PROD)_
q4.solution()
```
## 5. <span title="Spicy" style="color: coral">🌶️🌶️</span>
Gary wants to know how many spins he can play before running out of money. (Remember, each turn at the slot machine costs $1.)
So, if he has $10, he can definitely play 10 spins (because he'll have enough money to pay for the 10th spin even if he never wins). But he could only play an 11th spin if his total winnings from the first 10 was enough to pay for the 11th spin. How likely is that?
You will estimate the probability with the Monte Carlo method. That is, you will simulate the scenario many times, and return the proportion of simulations where he ran out of money before a desired number of spins.
Complete the function below to estimate the probability that he can complete a given number of spins of the machine before running out of money.
```
def slots_survival_probability(start_balance, n_spins, n_simulations):
"""Return the approximate probability (as a number between 0 and 1) that we can complete the
given number of spins of the slot machine before running out of money, assuming we start
with the given balance. Estimate the probability by running the scenario the specified number of times.
>>> slots_survival_probability(10.00, 10, 1000)
1.0
>>> slots_survival_probability(1.00, 2, 1000)
.25
"""
pass
q5.check()
#_COMMENT_IF(PROD)_
q5.solution()
```
#$KEEP_GOING$
| github_jupyter |
```
import pandas as pd
import numpy as np
import json
from collections import Counter
from catboost import CatBoostClassifier
from xgboost.sklearn import XGBClassifier
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import pandas as pd
from sklearn.metrics import make_scorer, f1_score, accuracy_score, recall_score, precision_score, classification_report, precision_recall_fscore_support
import itertools
from string import punctuation
from gensim.parsing.preprocessing import STOPWORDS
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy.sparse import hstack
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report, confusion_matrix, f1_score
from collections import Counter
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import neighbors
from sklearn import ensemble
from sklearn import neural_network
from sklearn import linear_model
import os
# file used to write preserve the results of the classfier
# confusion matrix and precision recall fscore matrix
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.tight_layout()
return plt
##saving the classification report
def pandas_classification_report(y_true, y_pred):
metrics_summary = precision_recall_fscore_support(
y_true=y_true,
y_pred=y_pred)
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
avg = list(precision_recall_fscore_support(
y_true=y_true,
y_pred=y_pred,
average='macro'))
avg.append(accuracy_score(y_true, y_pred, normalize=True))
metrics_sum_index = ['precision', 'recall', 'f1-score', 'support','accuracy']
list_all=list(metrics_summary)
list_all.append(cm.diagonal())
class_report_df = pd.DataFrame(
list_all,
index=metrics_sum_index)
support = class_report_df.loc['support']
total = support.sum()
avg[-2] = total
class_report_df['avg / total'] = avg
return class_report_df.T
from commen_preprocess import *
eng_train_dataset = pd.read_csv('../AMI_data/en_training.tsv', sep='\t')
eng_test_dataset = pd.read_csv('../AMI_data/en_testing.tsv', sep='\t')
eng_gold_test_dataset = pd.read_csv('../AMI_data/en_testing_gold.tsv', sep='\t')
#eng_train_dataset = pd.read_csv('../AMI@EVALITA2018/my_pd_train.tsv', sep='\t')
#eng_test_dataset = pd.read_csv('../AMI@EVALITA2018/my_pd_test.tsv', sep='\t')
eng_train_dataset = eng_train_dataset.sample(frac=1).reset_index(drop=True)
eng_train_dataset
eng_train_dataset['misogyny_category'].value_counts()
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
import os
#### give the path to the glove model
#### NOTE: this is
GLOVE_MODEL_FILE = "../../LEAM-master/glove.twitter.27B/glove.twitter.27B.200d.txt"
print(os.path.isfile(GLOVE_MODEL_FILE))
import numpy as np
##function for loading Glove Model
def loadGloveModel(gloveFile):
print("Loading Glove Model")
f = open(gloveFile,'r', encoding='utf8')
model = {}
i=0
for line in f:
i=i+1
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.asarray(splitLine[1:], dtype='float32')
model[word] = embedding
if(i%10000==0):
print("count"+str(i))
print("Done.",len(model)," words loaded!")
return model
EMBEDDING_DIM = 200
word2vec_model = loadGloveModel(GLOVE_MODEL_FILE)
### stopwords and punctuations are not removed but text is cleaned and stemmed more details in the commen_preprocess.py file
def glove_tokenize_norem(text):
#text = tokenizer(text)
text=clean(text, remove_stopwords=False, remove_punctuations=False)
words = text.split()
words =[ps.stem(word) for word in words]
return words
####stopwords and punctuations are removed along with that text is cleaned ans stemmed
def glove_tokenize(text):
#text = tokenizer(text)
text=clean(text, remove_stopwords=False, remove_punctuations=False)
text = ''.join([c for c in text if c not in punctuation])
words = text.split()
words = [word for word in words if word not in STOPWORDS]
words =[ps.stem(word) for word in words]
return words
def glove_tokenize_embed(text):
#text = tokenizer(text)
text=clean(text, remove_stopwords=False, remove_punctuations=False)
text = ''.join([c for c in text if c not in punctuation])
words = text.split()
words = [word for word in words if word not in STOPWORDS]
return words
def glove_tokenize_vocab(text):
#text = tokenizer(text)
text=clean(text, remove_stopwords=False, remove_punctuations=False)
words = text.split()
return words
def convert_class_label(input_text):
if input_text==1:
return 'misogyny'
elif input_text==0:
return 'non-misogyny'
else:
print('Wrong Input', input_text)
sys.exit(0)
# pd_train = pd.DataFrame(columns=['id','misogynous','text'])
eng_train_dataset["text"].replace('', np.nan, inplace=True)
eng_train_dataset.dropna(subset=['text'], inplace=True)
pd_train_binary = eng_train_dataset[['id','misogynous','text','misogyny_category','target']]
pd_train_category = eng_train_dataset[['id','misogynous','text','misogyny_category']]
pd_train_target = eng_train_dataset[['id','misogynous','text','target']]
pd_test = eng_test_dataset[['id','text']]
pd_train_category = pd_train_category.loc[pd_train_category['misogynous'] == 1]
pd_train_target = pd_train_target.loc[pd_train_target['misogynous'] == 1]
pd_train_target.drop(['misogynous'], axis=1)
pd_train_category.drop(['misogynous'], axis=1)
# pd_train['class'] =pd_train.apply(lambda row: convert_class_label(row['misogynous']), axis=1)
pd_train_binary['class'] = pd_train_binary['misogynous']
pd_train_category['class'] = pd_train_category['misogyny_category']
pd_train_target['class'] = pd_train_target['target']
# for count, each in enumerate(train_data):
# try:
# pd_train.loc[count] = [each['id'], convert_class_label(each['CounterSpeech']), each['Community'],each['Category'],each['commentText']]
# except:
# pass
print('Training Data Loading Completed...')
def get_data(pd_train):
comments=pd_train['text'].values
labels=pd_train['class'].values
list_comment=[]
for comment,label in zip(comments,labels):
temp={}
temp['text']=comment
temp['label']=label
list_comment.append(temp)
return list_comment
def get_data_test(pd_test):
comments=pd_test['text'].values
list_comment=[]
for comment in comments:
temp={}
temp['text']=comment
list_comment.append(temp)
return list_comment
vocab={}
def create_vocab(data):
comments = get_data(data)
for comment in comments:
words=glove_tokenize_vocab(comment['text'])
for word in words:
if word in vocab.keys():
vocab[word]=vocab[word]+1
else:
vocab[word]=1
```
### Google Universal Sentence Encoder configuration
###### prerequisite: tensorflow version >=1.7
```
import tensorflow as tf
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder/2" #@param ["https://tfhub.dev/google/universal-sentence-encoder/2", "https://tfhub.dev/google/universal-sentence-encoder-large/3"]
embed = hub.Module(module_url)
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=12,
allow_soft_placement=True, device_count = {'CPU': 12})
def get_embeddings(messages):
with tf.Session(config=config) as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_emb = session.run(embed(messages))
print("ending")
return np.array(message_emb)
from nltk.stem import PorterStemmer
ps = PorterStemmer()
TOKENIZER = glove_tokenize
#google encoding used where text is cleaned
def gen_data_google(data):
comments = get_data(data)
X, y = [], []
for comment in comments:
y.append(comment['label'])
#X.append(tokenizer(comment['text']))
X.append(clean(comment['text'], remove_stopwords=True, remove_punctuations=True))
#TFIDF_feature = 'bpe_text'
#Word Level Features
X =get_embeddings(X)
# print y
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return X, y
#google encoding used where text is not cleaned
def gen_data_google2(data):
comments = get_data(data)
X, y = [],[]
for comment in comments:
y.append(comment['label'])
X.append(clean(comment['text'], remove_stopwords=False, remove_punctuations=False))
#Word Level Features
X =get_embeddings(X)
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return X,y
###get data male,female
def male_female(data):
comments = get_data(data)
X = []
for comment in comments:
X.append(np.array([comment['male'],comment['female']]))
#Word Level Features
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return np.array(X)
### tfidf feature generation was used here where stopwords and punctuations are removed
def gen_data_new_tfidf(data):
comments = get_data(data)
comments_test=get_data_test(pd_test)
X, y = [], []
for comment in comments:
y.append(comment['label'])
X.append(comment['text'])
X1=[]
for comment in comments_test:
X1.append(comment['text'])
#Word Level Features
word_vectorizer = TfidfVectorizer(sublinear_tf=True, ngram_range=(1,3),
min_df=1,
strip_accents='unicode',
#smooth_idf=1,
analyzer='word',
stop_words='english',
tokenizer=TOKENIZER,
max_features=5000)
#charlevel features new
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
#stop_words='english',
ngram_range=(2, 6),
max_features=10000)
word_vectorizer.fit(X+X1)
char_vectorizer.fit(X+X1)
with open('tfidf_word_vectorizer.pk', 'wb') as fout:
pickle.dump(word_vectorizer,fout)
with open('tfidf_char_vectorizer.pk', 'wb') as fout:
pickle.dump(char_vectorizer,fout)
test_word_features = word_vectorizer.transform(X)
test_char_features = char_vectorizer.transform(X)
X = list(hstack([test_char_features, test_word_features]).toarray())
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return X, y
### tfidf feature generation was used here where stopwords and punctuations are not removed
def gen_data_new_tfidf2(data):
comments = get_data(data)
X, y = [], []
for comment in comments:
y.append(comment['label'])
X.append(comment['text'])
#Word Level Features
word_vectorizer = TfidfVectorizer(sublinear_tf=True, ngram_range=(1,3),
min_df=1,
strip_accents='unicode',
#smooth_idf=1,
analyzer='word',
#stop_words='english',
tokenizer=glove_tokenize_norem,
max_features=5000)
#charlevel features new
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
#stop_words='english',
ngram_range=(2, 6),
max_features=10000)
word_vectorizer.fit(X)
char_vectorizer.fit(X)
with open('tfidf_word_vectorize_noclean.pk', 'wb') as fout:
pickle.dump(word_vectorizer,fout)
with open('tfidf_char_vectorizer_noclean.pk', 'wb') as fout:
pickle.dump(char_vectorizer,fout)
test_word_features = word_vectorizer.transform(X)
test_char_features = char_vectorizer.transform(X)
X = list(hstack([test_char_features, test_word_features]).toarray())
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return X, y
## combination of not cleaned google encodings and tfidf features where stopwords and punctuations are not removed
def combine_tf_google_rem(data):
X,_=gen_data_google(data)
X1,y=gen_data_new_tfidf(data)
# X1,y=gen_data_old_tfidf()
X=np.concatenate((np.array(X), np.array(X1)), axis=1)
return X,y
## combination of cleaned google encodings and tfidf features where stopwords and punctuations are ssremoved
def combine_tf_google_norem(data):
X,_=gen_data_google2(data)
X1,y=gen_data_new_tfidf2(data)
X=np.concatenate((np.array(X), np.array(X1)), axis=1)
return X,y
def combine_tf_rem_google_norem(data):
X,_=gen_data_google2(data)
X1,y=gen_data_new_tfidf(data)
X=np.concatenate((np.array(X), np.array(X1)), axis=1)
return X,y
def combine_tf_norem_google_rem(data):
X,_=gen_data_google(data)
X1,y=gen_data_new_tfidf2(data)
X=np.concatenate((np.array(X), np.array(X1)), axis=1)
return X,y
def gen_data_embed(data):
comments = get_data(data)
X, y = [], []
for comment in comments:
words = glove_tokenize_embed(comment['text'].lower())
emb = np.zeros(EMBEDDING_DIM)
for word in words:
try:
emb += word2vec_model[word]
except:
pass
if len(words)!=0:
emb /= len(words)
X.append(emb)
y.append(comment['label'])
return X, y
def combine_tf_rem_google_norem_embed(data):
X,_=gen_data_google2(data)
X1,y=gen_data_new_tfidf(data)
X2,_=gen_data_embed(data)
X=np.concatenate((np.array(X), np.array(X1),np.array(X2)), axis=1)
return X,y
###old tfidf
def gen_data_old_tfidf(data):
comments = get_data(data)
X, y = [], []
for comment in comments:
y.append(comment['label'])
X.append(comment['text'])
with open('../tfidf_word_vectorizer.pk', 'rb') as fin:
word_vectorizer = pickle.load(fin)
with open('../tfidf_char_vectorizer.pk', 'rb') as fin:
char_vectorizer = pickle.load(fin)
word_vectorizer.fit(X)
char_vectorizer.fit(X)
test_word_features = word_vectorizer.transform(X)
test_char_features = char_vectorizer.transform(X)
X = list(hstack([test_char_features, test_word_features]).toarray())
#y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
return X, y
def select_comments_whose_embedding_exists(flag):
# selects the comments as in mean_glove_embedding method
# Processing
comments = get_data(flag)
X, Y = [], []
comment_return = []
for tweet in comments:
#print(tweet)
_emb = 0
words = TOKENIZER(tweet['text'].lower())
for w in words:
#print(w)
if w in word2vec_model and w is not None: # Check if embeeding there in GLove model
_emb+=1
if _emb: # Not a blank tweet
comment_return.append(tweet)
print('Comments selected:', len(comment_return))
return comment_return
def gen_data():
comments = select_comments_whose_embedding_exists(0)
X, y = [], []
for comment in comments:
words = glove_tokenize(comment['text'].lower())
emb = numpy.zeros(EMBEDDING_DIM)
for word in words:
try:
emb += word2vec_model[word]
except:
pass
emb /= len(words)
X.append(emb)
y.append(comment['label'])
# print y
y = MultiLabelBinarizer(classes = (1,2,3,4,5,6,7,8,9,10)).fit_transform(y)
print
return X, y
## combination of not cleaned google encodings and tfidf features where stopwords and punctuations are not removed
def combine_tf_google_glove_rem():
X,_=gen_data_google()
X1,y=gen_data_new_tfidf()
# X1,y=gen_data_old_tfidf()
X=np.concatenate((np.array(X), np.array(X1)), axis=1)
return X,y
SCALE_POS_WEIGHT=None
from imblearn.over_sampling import ADASYN
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
def get_model(m_type=None):
if not m_type:
print("ERROR: Please specify a model type!")
return None
if m_type == 'decision_tree_classifier':
logreg = tree.DecisionTreeClassifier(class_weight='balanced')
elif m_type == 'gaussian':
logreg = GaussianNB()
elif m_type == 'logistic_regression':
logreg = LogisticRegression(class_weight='balanced',n_jobs=10, random_state=42)
elif m_type == 'MLPClassifier':
# logreg = neural_network.MLPClassifier((500))
logreg = neural_network.MLPClassifier(random_state=42,early_stopping=True)
elif m_type == 'KNeighborsClassifier':
# logreg = neighbors.KNeighborsClassifier(n_neighbors = 10)
logreg = neighbors.KNeighborsClassifier()
elif m_type == 'ExtraTreeClassifier':
logreg = tree.ExtraTreeClassifier()
elif m_type == 'ExtraTreeClassifier_2':
logreg = ensemble.ExtraTreesClassifier()
elif m_type == 'RandomForestClassifier':
logreg = ensemble.RandomForestClassifier(n_estimators=100, class_weight='balanced', n_jobs=12, max_depth=3)
elif m_type == 'SVC':
logreg = LinearSVC(class_weight='balanced');
elif m_type == 'Catboost':
logreg = CatBoostClassifier(use_best_model=False, random_state=42, scale_pos_weight=SCALE_POS_WEIGHT)
# logreg = CatBoostClassifier(scale_pos_weight=0.8, random_seed=42,);
elif m_type == 'XGB_classifier':
# logreg=XGBClassifier(silent=False,eta=0.1,objective='binary:logistic',max_depth=5,min_child_weight=0,gamma=0.2,subsample=0.8, colsample_bytree = 0.8,scale_pos_weight=1,n_estimators=500,reg_lambda=3,nthread=12)
logreg=XGBClassifier(silent=False,objective='binary:logistic',scale_pos_weight=0.8,reg_lambda=3,nthread=12, random_state=42)
elif m_type == 'binny_test':
clf1 = ensemble.RandomForestClassifier(n_estimators=100, class_weight='balanced', n_jobs=12, max_depth=6,max_features='auto')
clf2 = tree.DecisionTreeClassifier(random_state=42, class_weight='balanced',max_depth=6)
clf3 = LogisticRegression(class_weight='balanced',penalty="l2",C=0.1, dual=True, random_state=42, n_jobs=3)
clf4 = XGBClassifier(silent=False,objective='binary:logistic',scale_pos_weight=0.8,reg_lambda=3,nthread=12, random_state=42)
est_list = [('lr', clf1), ('rf', clf2), ('gnb', clf3), ('xgb', clf4)]
logreg = ensemble.VotingClassifier(est_list,voting='soft',n_jobs=6)
else:
print("give correct model")
print(logreg)
return logreg
def get_feature(f_type=None,data=None):
if not f_type:
print("ERROR: Please specify a model type!")
return None
if f_type == 'google_not_preprocess':
X,y=gen_data_google2(data)
elif f_type == 'word_to_vec_embed':
X,y=gen_data_embed(data)
elif f_type == 'google_preprocess':
X,y=gen_data_google(data)
elif f_type == 'tfidf_not_preprocess':
X,y=gen_data_new_tfidf2(data)
elif f_type == 'tfidf_preprocess':
X,y=gen_data_new_tfidf(data)
elif f_type == 'google_preprocess_tfidf_preprocess':
X,y=combine_tf_google_rem(data)
elif f_type == 'google_nopreprocess_tfidf_nopreprocess':
X,y=combine_tf_google_norem(data)
elif f_type == 'google_preprocess_tfidf_nopreprocess':
X,y=combine_tf_norem_google_rem(data)
elif f_type == 'google_nopreprocess_tfidf_preprocess':
X,y=combine_tf_rem_google_norem(data)
elif f_type == 'google_nopreprocess_tfidf_preprocess_embed':
X,y=combine_tf_rem_google_norem_embed(data)
else:
print("give correct feature selection")
print(f_type)
return X,y
# from imblearn.combine import SMOTETomek
from sklearn.metrics import accuracy_score
import joblib
from sklearn.model_selection import StratifiedKFold as skf
def binny_classifier_run(X,y,model,model_name,label_map,img_name,report_name,save_model=False):
Classifier_Train_X = np.array(X, copy=False)
Classifier_Train_Y = y
temp=[]
for data in Classifier_Train_Y:
temp.append(label_map[data])
SCALE_POS_WEIGHT=temp.count(0)/temp.count(1)
Classifier_Train_Y=np.array(temp)
model_featureSelection = SelectFromModel(ensemble.RandomForestClassifier(n_estimators=50, class_weight='balanced',
n_jobs=12, max_depth=3))
print('Before Num features=',Classifier_Train_X.shape[1], Counter(Classifier_Train_Y))
Classifier_Train_X = model_featureSelection.fit_transform(Classifier_Train_X,Classifier_Train_Y)
print('After Num features=',Classifier_Train_X.shape[1])
if(save_model==True):
Classifier=model
Classifier.fit(Classifier_Train_X, Classifier_Train_Y)
filename = 'taskA/'+model_name+'_task_1.joblib.pkl'
joblib.dump(Classifier, filename, compress=9)
filename1 = 'taskA/'+model_name+'_select_features_task1.joblib.pkl'
joblib.dump(model_featureSelection, filename1, compress=9)
else:
kf = skf(n_splits=10,shuffle=True)
y_total_preds=[]
y_total=[]
count=0
for train_index, test_index in kf.split(Classifier_Train_X,Classifier_Train_Y):
X_train, X_test = Classifier_Train_X[train_index], Classifier_Train_X[test_index]
y_train, y_test = Classifier_Train_Y[train_index], Classifier_Train_Y[test_index]
classifier=model
classifier.fit(X_train,y_train)
y_preds = classifier.predict(X_test)
for ele in y_test:
y_total.append(ele)
for ele in y_preds:
y_total_preds.append(ele)
y_pred_train = classifier.predict(X_train)
count=count+1
print('accuracy_train:',accuracy_score(y_train, y_pred_train),'accuracy_test:',accuracy_score(y_test, y_preds))
print('TRAINING:')
print(classification_report( y_train, y_pred_train ))
print("TESTING:")
print(classification_report( y_test, y_preds ))
report = classification_report( y_total, y_total_preds )
cm=confusion_matrix(y_total, y_total_preds)
plt=plot_confusion_matrix(cm,normalize= True,target_names = ['non_misgynous','misgynous'],title = "Confusion Matrix")
plt.savefig('task1'+model_name+'_'+img_name)
print(model)
print(report)
print(accuracy_score(y_total, y_total_preds))
df_result=pandas_classification_report(y_total,y_total_preds)
df_result.to_csv('task1'+model_name+'_'+report_name, sep=',')
feature_model = 'google_nopreprocess_tfidf_preprocess_embed'
img_name = 'cm.png'
report_name = 'report.csv'
data_name= 'pd_train_binary'
if(data_name=='pd_train_binary'):
X,y=get_feature(f_type=feature_model,data=pd_train_binary)
label_map = {
1: 1,
0: 0
}
elif(data_name=='pd_train_category'):
X,y=get_feature(f_type=feature_model,data=pd_train_category)
label_map = {
'discredit': 0,
'sexual_harassment': 1,
'stereotype': 2,
'dominance': 3,
'derailing': 4
}
elif(data_name=='pd_train_target'):
X,y=get_feature(f_type=feature_model,data=pd_train_target)
label_map = {
'active': 1,
'passive': 0
}
else:
print('give correct data')
#list_of_model = ['decision_tree_classifier', 'gaussian', 'logistic_regression', 'MLPClassifier', 'RandomForestClassifier',
# 'SVC', 'Catboost', 'XGB_classifier']
list_of_model = ['Catboost','XGB_classifier','logistic_regression']
for each_model in list_of_model:
model=get_model(m_type=each_model)
binny_classifier_run(X,y,model,each_model,label_map,img_name,report_name,save_model=True)
```
| github_jupyter |
```
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/bbc-text.csv \
-O /tmp/bbc-text.csv
import csv
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
#Stopwords list from https://github.com/Yoast/YoastSEO.js/blob/develop/src/config/stopwords.js
# Convert it to a Python list and paste it here
stopwords = [ "a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do", "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having", "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how", "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself", "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought", "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should", "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through", "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why", "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves" ]
sentences = []
labels = []
with open("/tmp/bbc-text.csv", 'r') as csvfile:
csv_items = csv.reader(csvfile, delimiter=',')
#skip csv header
next(csv_items)
for row in csv_items:
labels.append(row[0])
unfiltered_sentence = row[1].split()
filtered_sentence = " "
for word in unfiltered_sentence:
if word not in stopwords:
# filtered_sentence.append(word)
filtered_sentence = filtered_sentence + word + " "
sentences.append(filtered_sentence.strip())
print(len(sentences))
print(sentences[0])
#Expected output
# 2225
# tv future hands viewers home theatre systems plasma high-definition tvs digital video recorders moving living room way people watch tv will radically different five years time. according expert panel gathered annual consumer electronics show las vegas discuss new technologies will impact one favourite pastimes. us leading trend programmes content will delivered viewers via home networks cable satellite telecoms companies broadband service providers front rooms portable devices. one talked-about technologies ces digital personal video recorders (dvr pvr). set-top boxes like us s tivo uk s sky+ system allow people record store play pause forward wind tv programmes want. essentially technology allows much personalised tv. also built-in high-definition tv sets big business japan us slower take off europe lack high-definition programming. not can people forward wind adverts can also forget abiding network channel schedules putting together a-la-carte entertainment. us networks cable satellite companies worried means terms advertising revenues well brand identity viewer loyalty channels. although us leads technology moment also concern raised europe particularly growing uptake services like sky+. happens today will see nine months years time uk adam hume bbc broadcast s futurologist told bbc news website. likes bbc no issues lost advertising revenue yet. pressing issue moment commercial uk broadcasters brand loyalty important everyone. will talking content brands rather network brands said tim hanlon brand communications firm starcom mediavest. reality broadband connections anybody can producer content. added: challenge now hard promote programme much choice. means said stacey jolna senior vice president tv guide tv group way people find content want watch simplified tv viewers. means networks us terms channels take leaf google s book search engine future instead scheduler help people find want watch. kind channel model might work younger ipod generation used taking control gadgets play them. might not suit everyone panel recognised. older generations comfortable familiar schedules channel brands know getting. perhaps not want much choice put hands mr hanlon suggested. end kids just diapers pushing buttons already - everything possible available said mr hanlon. ultimately consumer will tell market want. 50 000 new gadgets technologies showcased ces many enhancing tv-watching experience. high-definition tv sets everywhere many new models lcd (liquid crystal display) tvs launched dvr capability built instead external boxes. one example launched show humax s 26-inch lcd tv 80-hour tivo dvr dvd recorder. one us s biggest satellite tv companies directtv even launched branded dvr show 100-hours recording capability instant replay search function. set can pause rewind tv 90 hours. microsoft chief bill gates announced pre-show keynote speech partnership tivo called tivotogo means people can play recorded programmes windows pcs mobile devices. reflect increasing trend freeing multimedia people can watch want want.
tokenizer = Tokenizer(oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
print(len(word_index))
# Expected output
# 29714
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding='post')
print(padded[0])
print(padded.shape)
# Expected output
# [ 96 176 1158 ... 0 0 0]
# (2225, 2442)
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
label_word_index = label_tokenizer.word_index
label_seq = label_tokenizer.texts_to_sequences(labels)
print(label_seq)
print(label_word_index)
# Expected Output
# [[4], [2], [1], [1], [5], [3], [3], [1], [1], [5], [5], [2], [2], [3], [1], [2], [3], [1], [2], [4], [4], [4], [1], [1], [4], [1], [5], [4], [3], [5], [3], [4], [5], [5], [2], [3], [4], [5], [3], [2], [3], [1], [2], [1], [4], [5], [3], [3], [3], [2], [1], [3], [2], [2], [1], [3], [2], [1], [1], [2], [2], [1], [2], [1], [2], [4], [2], [5], [4], [2], [3], [2], [3], [1], [2], [4], [2], [1], [1], [2], [2], [1], [3], [2], [5], [3], [3], [2], [5], [2], [1], [1], [3], [1], [3], [1], [2], [1], [2], [5], [5], [1], [2], [3], [3], [4], [1], [5], [1], [4], [2], [5], [1], [5], [1], [5], [5], [3], [1], [1], [5], [3], [2], [4], [2], [2], [4], [1], [3], [1], [4], [5], [1], [2], [2], [4], [5], [4], [1], [2], [2], [2], [4], [1], [4], [2], [1], [5], [1], [4], [1], [4], [3], [2], [4], [5], [1], [2], [3], [2], [5], [3], [3], [5], [3], [2], [5], [3], [3], [5], [3], [1], [2], [3], [3], [2], [5], [1], [2], [2], [1], [4], [1], [4], [4], [1], [2], [1], [3], [5], [3], [2], [3], [2], [4], [3], [5], [3], [4], [2], [1], [2], [1], [4], [5], [2], [3], [3], [5], [1], [5], [3], [1], [5], [1], [1], [5], [1], [3], [3], [5], [4], [1], [3], [2], [5], [4], [1], [4], [1], [5], [3], [1], [5], [4], [2], [4], [2], [2], [4], [2], [1], [2], [1], [2], [1], [5], [2], [2], [5], [1], [1], [3], [4], [3], [3], [3], [4], [1], [4], [3], [2], [4], [5], [4], [1], [1], [2], [2], [3], [2], [4], [1], [5], [1], [3], [4], [5], [2], [1], [5], [1], [4], [3], [4], [2], [2], [3], [3], [1], [2], [4], [5], [3], [4], [2], [5], [1], [5], [1], [5], [3], [2], [1], [2], [1], [1], [5], [1], [3], [3], [2], [5], [4], [2], [1], [2], [5], [2], [2], [2], [3], [2], [3], [5], [5], [2], [1], [2], [3], [2], [4], [5], [2], [1], [1], [5], [2], [2], [3], [4], [5], [4], [3], [2], [1], [3], [2], [5], [4], [5], [4], [3], [1], [5], [2], [3], [2], [2], [3], [1], [4], [2], [2], [5], [5], [4], [1], [2], [5], [4], [4], [5], [5], [5], [3], [1], [3], [4], [2], [5], [3], [2], [5], [3], [3], [1], [1], [2], [3], [5], [2], [1], [2], [2], [1], [2], [3], [3], [3], [1], [4], [4], [2], [4], [1], [5], [2], [3], [2], [5], [2], [3], [5], [3], [2], [4], [2], [1], [1], [2], [1], [1], [5], [1], [1], [1], [4], [2], [2], [2], [3], [1], [1], [2], [4], [2], [3], [1], [3], [4], [2], [1], [5], [2], [3], [4], [2], [1], [2], [3], [2], [2], [1], [5], [4], [3], [4], [2], [1], [2], [5], [4], [4], [2], [1], [1], [5], [3], [3], [3], [1], [3], [4], [4], [5], [3], [4], [5], [2], [1], [1], [4], [2], [1], [1], [3], [1], [1], [2], [1], [5], [4], [3], [1], [3], [4], [2], [2], [2], [4], [2], [2], [1], [1], [1], [1], [2], [4], [5], [1], [1], [4], [2], [4], [5], [3], [1], [2], [3], [2], [4], [4], [3], [4], [2], [1], [2], [5], [1], [3], [5], [1], [1], [3], [4], [5], [4], [1], [3], [2], [5], [3], [2], [5], [1], [1], [4], [3], [5], [3], [5], [3], [4], [3], [5], [1], [2], [1], [5], [1], [5], [4], [2], [1], [3], [5], [3], [5], [5], [5], [3], [5], [4], [3], [4], [4], [1], [1], [4], [4], [1], [5], [5], [1], [4], [5], [1], [1], [4], [2], [3], [4], [2], [1], [5], [1], [5], [3], [4], [5], [5], [2], [5], [5], [1], [4], [4], [3], [1], [4], [1], [3], [3], [5], [4], [2], [4], [4], [4], [2], [3], [3], [1], [4], [2], [2], [5], [5], [1], [4], [2], [4], [5], [1], [4], [3], [4], [3], [2], [3], [3], [2], [1], [4], [1], [4], [3], [5], [4], [1], [5], [4], [1], [3], [5], [1], [4], [1], [1], [3], [5], [2], [3], [5], [2], [2], [4], [2], [5], [4], [1], [4], [3], [4], [3], [2], [3], [5], [1], [2], [2], [2], [5], [1], [2], [5], [5], [1], [5], [3], [3], [3], [1], [1], [1], [4], [3], [1], [3], [3], [4], [3], [1], [2], [5], [1], [2], [2], [4], [2], [5], [5], [5], [2], [5], [5], [3], [4], [2], [1], [4], [1], [1], [3], [2], [1], [4], [2], [1], [4], [1], [1], [5], [1], [2], [1], [2], [4], [3], [4], [2], [1], [1], [2], [2], [2], [2], [3], [1], [2], [4], [2], [1], [3], [2], [4], [2], [1], [2], [3], [5], [1], [2], [3], [2], [5], [2], [2], [2], [1], [3], [5], [1], [3], [1], [3], [3], [2], [2], [1], [4], [5], [1], [5], [2], [2], [2], [4], [1], [4], [3], [4], [4], [4], [1], [4], [4], [5], [5], [4], [1], [5], [4], [1], [1], [2], [5], [4], [2], [1], [2], [3], [2], [5], [4], [2], [3], [2], [4], [1], [2], [5], [2], [3], [1], [5], [3], [1], [2], [1], [3], [3], [1], [5], [5], [2], [2], [1], [4], [4], [1], [5], [4], [4], [2], [1], [5], [4], [1], [1], [2], [5], [2], [2], [2], [5], [1], [5], [4], [4], [4], [3], [4], [4], [5], [5], [1], [1], [3], [2], [5], [1], [3], [5], [4], [3], [4], [4], [2], [5], [3], [4], [3], [3], [1], [3], [3], [5], [4], [1], [3], [1], [5], [3], [2], [2], [3], [1], [1], [1], [5], [4], [4], [2], [5], [1], [3], [4], [3], [5], [4], [4], [2], [2], [1], [2], [2], [4], [3], [5], [2], [2], [2], [2], [2], [4], [1], [3], [4], [4], [2], [2], [5], [3], [5], [1], [4], [1], [5], [1], [4], [1], [2], [1], [3], [3], [5], [2], [1], [3], [3], [1], [5], [3], [2], [4], [1], [2], [2], [2], [5], [5], [4], [4], [2], [2], [5], [1], [2], [5], [4], [4], [2], [2], [1], [1], [1], [3], [3], [1], [3], [1], [2], [5], [1], [4], [5], [1], [1], [2], [2], [4], [4], [1], [5], [1], [5], [1], [5], [3], [5], [5], [4], [5], [2], [2], [3], [1], [3], [4], [2], [3], [1], [3], [1], [5], [1], [3], [1], [1], [4], [5], [1], [3], [1], [1], [2], [4], [5], [3], [4], [5], [3], [5], [3], [5], [5], [4], [5], [3], [5], [5], [4], [4], [1], [1], [5], [5], [4], [5], [3], [4], [5], [2], [4], [1], [2], [5], [5], [4], [5], [4], [2], [5], [1], [5], [2], [1], [2], [1], [3], [4], [5], [3], [2], [5], [5], [3], [2], [5], [1], [3], [1], [2], [2], [2], [2], [2], [5], [4], [1], [5], [5], [2], [1], [4], [4], [5], [1], [2], [3], [2], [3], [2], [2], [5], [3], [2], [2], [4], [3], [1], [4], [5], [3], [2], [2], [1], [5], [3], [4], [2], [2], [3], [2], [1], [5], [1], [5], [4], [3], [2], [2], [4], [2], [2], [1], [2], [4], [5], [3], [2], [3], [2], [1], [4], [2], [3], [5], [4], [2], [5], [1], [3], [3], [1], [3], [2], [4], [5], [1], [1], [4], [2], [1], [5], [4], [1], [3], [1], [2], [2], [2], [3], [5], [1], [3], [4], [2], [2], [4], [5], [5], [4], [4], [1], [1], [5], [4], [5], [1], [3], [4], [2], [1], [5], [2], [2], [5], [1], [2], [1], [4], [3], [3], [4], [5], [3], [5], [2], [2], [3], [1], [4], [1], [1], [1], [3], [2], [1], [2], [4], [1], [2], [2], [1], [3], [4], [1], [2], [4], [1], [1], [2], [2], [2], [2], [3], [5], [4], [2], [2], [1], [2], [5], [2], [5], [1], [3], [2], [2], [4], [5], [2], [2], [2], [3], [2], [3], [4], [5], [3], [5], [1], [4], [3], [2], [4], [1], [2], [2], [5], [4], [2], [2], [1], [1], [5], [1], [3], [1], [2], [1], [2], [3], [3], [2], [3], [4], [5], [1], [2], [5], [1], [3], [3], [4], [5], [2], [3], [3], [1], [4], [2], [1], [5], [1], [5], [1], [2], [1], [3], [5], [4], [2], [1], [3], [4], [1], [5], [2], [1], [5], [1], [4], [1], [4], [3], [1], [2], [5], [4], [4], [3], [4], [5], [4], [1], [2], [4], [2], [5], [1], [4], [3], [3], [3], [3], [5], [5], [5], [2], [3], [3], [1], [1], [4], [1], [3], [2], [2], [4], [1], [4], [2], [4], [3], [3], [1], [2], [3], [1], [2], [4], [2], [2], [5], [5], [1], [2], [4], [4], [3], [2], [3], [1], [5], [5], [3], [3], [2], [2], [4], [4], [1], [1], [3], [4], [1], [4], [2], [1], [2], [3], [1], [5], [2], [4], [3], [5], [4], [2], [1], [5], [4], [4], [5], [3], [4], [5], [1], [5], [1], [1], [1], [3], [4], [1], [2], [1], [1], [2], [4], [1], [2], [5], [3], [4], [1], [3], [4], [5], [3], [1], [3], [4], [2], [5], [1], [3], [2], [4], [4], [4], [3], [2], [1], [3], [5], [4], [5], [1], [4], [2], [3], [5], [4], [3], [1], [1], [2], [5], [2], [2], [3], [2], [2], [3], [4], [5], [3], [5], [5], [2], [3], [1], [3], [5], [1], [5], [3], [5], [5], [5], [2], [1], [3], [1], [5], [4], [4], [2], [3], [5], [2], [1], [2], [3], [3], [2], [1], [4], [4], [4], [2], [3], [3], [2], [1], [1], [5], [2], [1], [1], [3], [3], [3], [5], [3], [2], [4], [2], [3], [5], [5], [2], [1], [3], [5], [1], [5], [3], [3], [2], [3], [1], [5], [5], [4], [4], [4], [4], [3], [4], [2], [4], [1], [1], [5], [2], [4], [5], [2], [4], [1], [4], [5], [5], [3], [3], [1], [2], [2], [4], [5], [1], [3], [2], [4], [5], [3], [1], [5], [3], [3], [4], [1], [3], [2], [3], [5], [4], [1], [3], [5], [5], [2], [1], [4], [4], [1], [5], [4], [3], [4], [1], [3], [3], [1], [5], [1], [3], [1], [4], [5], [1], [5], [2], [2], [5], [5], [5], [4], [1], [2], [2], [3], [3], [2], [3], [5], [1], [1], [4], [3], [1], [2], [1], [2], [4], [1], [1], [2], [5], [1], [1], [4], [1], [2], [3], [2], [5], [4], [5], [3], [2], [5], [3], [5], [3], [3], [2], [1], [1], [1], [4], [4], [1], [3], [5], [4], [1], [5], [2], [5], [3], [2], [1], [4], [2], [1], [3], [2], [5], [5], [5], [3], [5], [3], [5], [1], [5], [1], [3], [3], [2], [3], [4], [1], [4], [1], [2], [3], [4], [5], [5], [3], [5], [3], [1], [1], [3], [2], [4], [1], [3], [3], [5], [1], [3], [3], [2], [4], [4], [2], [4], [1], [1], [2], [3], [2], [4], [1], [4], [3], [5], [1], [2], [1], [5], [4], [4], [1], [3], [1], [2], [1], [2], [1], [1], [5], [5], [2], [4], [4], [2], [4], [2], [2], [1], [1], [3], [1], [4], [1], [4], [1], [1], [2], [2], [4], [1], [2], [4], [4], [3], [1], [2], [5], [5], [4], [3], [1], [1], [4], [2], [4], [5], [5], [3], [3], [2], [5], [1], [5], [5], [2], [1], [3], [4], [2], [1], [5], [4], [3], [3], [1], [1], [2], [2], [2], [2], [2], [5], [2], [3], [3], [4], [4], [5], [3], [5], [2], [3], [1], [1], [2], [4], [2], [4], [1], [2], [2], [3], [1], [1], [3], [3], [5], [5], [3], [2], [3], [3], [2], [4], [3], [3], [3], [3], [3], [5], [5], [4], [3], [1], [3], [1], [4], [1], [1], [1], [5], [4], [5], [4], [1], [4], [1], [1], [5], [5], [2], [5], [5], [3], [2], [1], [4], [4], [3], [2], [1], [2], [5], [1], [3], [5], [1], [1], [2], [3], [4], [4], [2], [2], [1], [3], [5], [1], [1], [3], [5], [4], [1], [5], [2], [3], [1], [3], [4], [5], [1], [3], [2], [5], [3], [5], [3], [1], [3], [2], [2], [3], [2], [4], [1], [2], [5], [2], [1], [1], [5], [4], [3], [4], [3], [3], [1], [1], [1], [2], [4], [5], [2], [1], [2], [1], [2], [4], [2], [2], [2], [2], [1], [1], [1], [2], [2], [5], [2], [2], [2], [1], [1], [1], [4], [2], [1], [1], [1], [2], [5], [4], [4], [4], [3], [2], [2], [4], [2], [4], [1], [1], [3], [3], [3], [1], [1], [3], [3], [4], [2], [1], [1], [1], [1], [2], [1], [2], [2], [2], [2], [1], [3], [1], [4], [4], [1], [4], [2], [5], [2], [1], [2], [4], [4], [3], [5], [2], [5], [2], [4], [3], [5], [3], [5], [5], [4], [2], [4], [4], [2], [3], [1], [5], [2], [3], [5], [2], [4], [1], [4], [3], [1], [3], [2], [3], [3], [2], [2], [2], [4], [3], [2], [3], [2], [5], [3], [1], [3], [3], [1], [5], [4], [4], [2], [4], [1], [2], [2], [3], [1], [4], [4], [4], [1], [5], [1], [3], [2], [3], [3], [5], [4], [2], [4], [1], [5], [5], [1], [2], [5], [4], [4], [1], [5], [2], [3], [3], [3], [4], [4], [2], [3], [2], [3], [3], [5], [1], [4], [2], [4], [5], [4], [4], [1], [3], [1], [1], [3], [5], [5], [2], [3], [3], [1], [2], [2], [4], [2], [4], [4], [1], [2], [3], [1], [2], [2], [1], [4], [1], [4], [5], [1], [1], [5], [2], [4], [1], [1], [3], [4], [2], [3], [1], [1], [3], [5], [4], [4], [4], [2], [1], [5], [5], [4], [2], [3], [4], [1], [1], [4], [4], [3], [2], [1], [5], [5], [1], [5], [4], [4], [2], [2], [2], [1], [1], [4], [1], [2], [4], [2], [2], [1], [2], [3], [2], [2], [4], [2], [4], [3], [4], [5], [3], [4], [5], [1], [3], [5], [2], [4], [2], [4], [5], [4], [1], [2], [2], [3], [5], [3], [1]]
# {'sport': 1, 'business': 2, 'politics': 3, 'tech': 4, 'entertainment': 5}
```
| github_jupyter |
# VQ-VAE training example
Demonstration of how to train the model specified in https://arxiv.org/abs/1711.00937, using Haiku / JAX.
On Mac and Linux, simply execute each cell in turn.
```
# Uncomment the line below if running on colab.research.google.com
# !pip install dm-haiku optax
import haiku as hk
import jax
import optax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
tf.enable_v2_behavior()
print("JAX version {}".format(jax.__version__))
print("Haiku version {}".format(hk.__version__))
print("TF version {}".format(tf.__version__))
```
# Download Cifar10 data
This requires a connection to the internet and will download ~160MB.
```
cifar10 = tfds.as_numpy(tfds.load("cifar10", split="train+test", batch_size=-1))
del cifar10["id"], cifar10["label"]
jax.tree_map(lambda x: f'{x.dtype.name}{list(x.shape)}', cifar10)
```
# Load the data into Numpy
We compute the variance of the whole training set to normalise the Mean Squared Error below.
```
train_data_dict = jax.tree_map(lambda x: x[:40000], cifar10)
valid_data_dict = jax.tree_map(lambda x: x[40000:50000], cifar10)
test_data_dict = jax.tree_map(lambda x: x[50000:], cifar10)
def cast_and_normalise_images(data_dict):
"""Convert images to floating point with the range [-0.5, 0.5]"""
data_dict['image'] = (tf.cast(data_dict['image'], tf.float32) / 255.0) - 0.5
return data_dict
train_data_variance = np.var(train_data_dict['image'] / 255.0)
print('train data variance: %s' % train_data_variance)
```
# Encoder & Decoder Architecture
```
class ResidualStack(hk.Module):
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
name=None):
super(ResidualStack, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._layers = []
for i in range(num_residual_layers):
conv3 = hk.Conv2D(
output_channels=num_residual_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
name="res3x3_%d" % i)
conv1 = hk.Conv2D(
output_channels=num_hiddens,
kernel_shape=(1, 1),
stride=(1, 1),
name="res1x1_%d" % i)
self._layers.append((conv3, conv1))
def __call__(self, inputs):
h = inputs
for conv3, conv1 in self._layers:
conv3_out = conv3(jax.nn.relu(h))
conv1_out = conv1(jax.nn.relu(conv3_out))
h += conv1_out
return jax.nn.relu(h) # Resnet V1 style
class Encoder(hk.Module):
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
name=None):
super(Encoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._enc_1 = hk.Conv2D(
output_channels=self._num_hiddens // 2,
kernel_shape=(4, 4),
stride=(2, 2),
name="enc_1")
self._enc_2 = hk.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(4, 4),
stride=(2, 2),
name="enc_2")
self._enc_3 = hk.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
name="enc_3")
self._residual_stack = ResidualStack(
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens)
def __call__(self, x):
h = jax.nn.relu(self._enc_1(x))
h = jax.nn.relu(self._enc_2(h))
h = jax.nn.relu(self._enc_3(h))
return self._residual_stack(h)
class Decoder(hk.Module):
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
name=None):
super(Decoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._dec_1 = hk.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
name="dec_1")
self._residual_stack = ResidualStack(
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens)
self._dec_2 = hk.Conv2DTranspose(
output_channels=self._num_hiddens // 2,
# output_shape=None,
kernel_shape=(4, 4),
stride=(2, 2),
name="dec_2")
self._dec_3 = hk.Conv2DTranspose(
output_channels=3,
# output_shape=None,
kernel_shape=(4, 4),
stride=(2, 2),
name="dec_3")
def __call__(self, x):
h = self._dec_1(x)
h = self._residual_stack(h)
h = jax.nn.relu(self._dec_2(h))
x_recon = self._dec_3(h)
return x_recon
class VQVAEModel(hk.Module):
def __init__(self, encoder, decoder, vqvae, pre_vq_conv1,
data_variance, name=None):
super(VQVAEModel, self).__init__(name=name)
self._encoder = encoder
self._decoder = decoder
self._vqvae = vqvae
self._pre_vq_conv1 = pre_vq_conv1
self._data_variance = data_variance
def __call__(self, inputs, is_training):
z = self._pre_vq_conv1(self._encoder(inputs))
vq_output = self._vqvae(z, is_training=is_training)
x_recon = self._decoder(vq_output['quantize'])
recon_error = jnp.mean((x_recon - inputs) ** 2) / self._data_variance
loss = recon_error + vq_output['loss']
return {
'z': z,
'x_recon': x_recon,
'loss': loss,
'recon_error': recon_error,
'vq_output': vq_output,
}
```
# Build Model and train
```
# Set hyper-parameters.
batch_size = 32
image_size = 32
# 100k steps should take < 30 minutes on a modern (>= 2017) GPU.
num_training_updates = 100000
num_hiddens = 128
num_residual_hiddens = 32
num_residual_layers = 2
# These hyper-parameters define the size of the model (number of parameters and layers).
# The hyper-parameters in the paper were (For ImageNet):
# batch_size = 128
# image_size = 128
# num_hiddens = 128
# num_residual_hiddens = 32
# num_residual_layers = 2
# This value is not that important, usually 64 works.
# This will not change the capacity in the information-bottleneck.
embedding_dim = 64
# The higher this value, the higher the capacity in the information bottleneck.
num_embeddings = 512
# commitment_cost should be set appropriately. It's often useful to try a couple
# of values. It mostly depends on the scale of the reconstruction cost
# (log p(x|z)). So if the reconstruction cost is 100x higher, the
# commitment_cost should also be multiplied with the same amount.
commitment_cost = 0.25
# Use EMA updates for the codebook (instead of the Adam optimizer).
# This typically converges faster, and makes the model less dependent on choice
# of the optimizer. In the VQ-VAE paper EMA updates were not used (but was
# developed afterwards). See Appendix of the paper for more details.
vq_use_ema = True
# This is only used for EMA updates.
decay = 0.99
learning_rate = 3e-4
# # Data Loading.
train_dataset = tfds.as_numpy(
tf.data.Dataset.from_tensor_slices(train_data_dict)
.map(cast_and_normalise_images)
.shuffle(10000)
.repeat(-1) # repeat indefinitely
.batch(batch_size, drop_remainder=True)
.prefetch(-1))
valid_dataset = tfds.as_numpy(
tf.data.Dataset.from_tensor_slices(valid_data_dict)
.map(cast_and_normalise_images)
.repeat(1) # 1 epoch
.batch(batch_size)
.prefetch(-1))
# # Build modules.
def forward(data, is_training):
encoder = Encoder(num_hiddens, num_residual_layers, num_residual_hiddens)
decoder = Decoder(num_hiddens, num_residual_layers, num_residual_hiddens)
pre_vq_conv1 = hk.Conv2D(
output_channels=embedding_dim,
kernel_shape=(1, 1),
stride=(1, 1),
name="to_vq")
if vq_use_ema:
vq_vae = hk.nets.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=commitment_cost,
decay=decay)
else:
vq_vae = hk.nets.VectorQuantizer(
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=commitment_cost)
model = VQVAEModel(encoder, decoder, vq_vae, pre_vq_conv1,
data_variance=train_data_variance)
return model(data['image'], is_training)
forward = hk.transform_with_state(forward)
optimizer = optax.adam(learning_rate)
@jax.jit
def train_step(params, state, opt_state, data):
def adapt_forward(params, state, data):
# Pack model output and state together.
model_output, state = forward.apply(params, state, None, data, is_training=True)
loss = model_output['loss']
return loss, (model_output, state)
grads, (model_output, state) = (
jax.grad(adapt_forward, has_aux=True)(params, state, data))
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return params, state, opt_state, model_output
%%time
train_losses = []
train_recon_errors = []
train_perplexities = []
train_vqvae_loss = []
rng = jax.random.PRNGKey(42)
train_dataset_iter = iter(train_dataset)
params, state = forward.init(rng, next(train_dataset_iter), is_training=True)
opt_state = optimizer.init(params)
for step in range(1, num_training_updates + 1):
data = next(train_dataset_iter)
params, state, opt_state, train_results = (
train_step(params, state, opt_state, data))
train_results = jax.device_get(train_results)
train_losses.append(train_results['loss'])
train_recon_errors.append(train_results['recon_error'])
train_perplexities.append(train_results['vq_output']['perplexity'])
train_vqvae_loss.append(train_results['vq_output']['loss'])
if step % 100 == 0:
print(f'[Step {step}/{num_training_updates}] ' +
('train loss: %f ' % np.mean(train_losses[-100:])) +
('recon_error: %.3f ' % np.mean(train_recon_errors[-100:])) +
('perplexity: %.3f ' % np.mean(train_perplexities[-100:])) +
('vqvae loss: %.3f' % np.mean(train_vqvae_loss[-100:])))
```
# Plot loss
```
f = plt.figure(figsize=(16,8))
ax = f.add_subplot(1,2,1)
ax.plot(train_recon_errors)
ax.set_yscale('log')
ax.set_title('NMSE.')
ax = f.add_subplot(1,2,2)
ax.plot(train_perplexities)
ax.set_title('Average codebook usage (perplexity).')
```
# View reconstructions
```
# Reconstructions
train_batch = next(iter(train_dataset))
valid_batch = next(iter(valid_dataset))
# Put data through the model with is_training=False, so that in the case of
# using EMA the codebook is not updated.
train_reconstructions = forward.apply(params, state, rng, train_batch, is_training=False)[0]['x_recon']
valid_reconstructions = forward.apply(params, state, rng, valid_batch, is_training=False)[0]['x_recon']
def convert_batch_to_image_grid(image_batch):
reshaped = (image_batch.reshape(4, 8, 32, 32, 3)
.transpose([0, 2, 1, 3, 4])
.reshape(4 * 32, 8 * 32, 3))
return reshaped + 0.5
f = plt.figure(figsize=(16,8))
ax = f.add_subplot(2,2,1)
ax.imshow(convert_batch_to_image_grid(train_batch['image']),
interpolation='nearest')
ax.set_title('training data originals')
plt.axis('off')
ax = f.add_subplot(2,2,2)
ax.imshow(convert_batch_to_image_grid(train_reconstructions),
interpolation='nearest')
ax.set_title('training data reconstructions')
plt.axis('off')
ax = f.add_subplot(2,2,3)
ax.imshow(convert_batch_to_image_grid(valid_batch['image']),
interpolation='nearest')
ax.set_title('validation data originals')
plt.axis('off')
ax = f.add_subplot(2,2,4)
ax.imshow(convert_batch_to_image_grid(valid_reconstructions),
interpolation='nearest')
ax.set_title('validation data reconstructions')
plt.axis('off')
```
| github_jupyter |
# UCLQ Quantum Energy Hackathon
## Working example - February 2022
## Updated notebook with the updates of Giancarlo and Mauro
The old code is named "Old code".
### [QAOA](https://qiskit.org/textbook/ch-applications/qaoa.html) (Quantum Approximate Optimization Algorithm) is a hearistic method of using quantum computing hardware as it is to find solutions to optimization problems.
### 1. Install quiskit
```
# uncomment if qiskit is not installed
# !pip install qiskit
# Associated depenency
#!pip install pylatexenc
%matplotlib inline
#matplotlib.style.use('seaborn')
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import Aer, execute
from qiskit.circuit import Parameter
from qiskit.visualization import plot_histogram
from scipy.optimize import minimize
```
### 2. Define the parameters of your problem
In this example, we considered the cost and revenue for 3 renewable energy sources and optimised their distribution for 4 locations in the United Kingdom. Data used for this example is contained in _EQ_constants.csv_
For each energy source in each location the following parameters were estimated:
1) Revenue: total energy generated by the renewable energy unit
2) Cost: total cost of constructing and mantaining the renewable energy unit
3) Location capacity: the maximum number of units that can be placed in each location, note that a solar unit in this example is a solar pannel and a wind unit is a wind turbine.
The total cost is the total energy consumption for the United Kingdom in 2019. NOTE: the units for all the parameters are transformed in sterling pounds £. For full details consult the provided spreadsheet _EQ_constants.csv_
## Old code
```
# Renewable sources: solar, on-shore and off-shore wind turbines
number_of_renewable_sources = 3
# Locations: England, Northern Ireland, Scotland, Wales
number_of_locations = 4
# Calculate the number of qubits needed,
# NOTE: IBMQ systems accesible have 5 qubits
nqubits = number_of_locations * number_of_renewable_sources
G = nx.complete_graph(nqubits)
total_cost = C = 61510030000.0 # units: £
# Calculate revenue for each energy source in all locations
# dict{location: [energy_source]}
location_revenue_per_unit = {
0: [2783.55, 336113.74, 153105454.50],
1: [2332.31, 434451.22, 0.0],
2: [1088.38, 1020438.50, 85798571.43],
3: [3573.59, 768855.93, 140790000],
}
# Calculate cost for each energy source in all locations
# dict{location: [energy_source]}
location_cost_per_unit = {
0: [300.92, 52370.72, 22696783.84],
1: [273.66, 70226.14, 0.0],
2: [116.17, 161395.70, 11626961.90],
3: [382.92, 122708.47, 21872844.44],
}
# variable and a rough estimate, see spreadsheet for current figures.
# dict{location: [energy_source]}
location_unit_capacity = {
0: [800000, 5000, 40],
1: [30000, 1400, 0],
2: [70000, 4000, 10],
3: [60000, 1000, 5],
}
# lambda and gamma, the initial parameter guess for the cost function
lam, gam = 1.0, 1.0
# Calculate the total costs and revenues
a_revs, b_costs = [], []
for N in range(number_of_locations):
for R in range(number_of_renewable_sources):
num_units = location_unit_capacity[N][R]
a_revs.append(num_units * location_revenue_per_unit[N][R])
b_costs.append(num_units * location_cost_per_unit[N][R])
```
## One additional energy source input (New code)
In the following cell, we are adding one more energy source: sewage gas.
```
# Renewable sources: solar, on-shore, off-shore wind turbines and sewage gas
number_of_renewable_sources = 4
# Locations: England, Northern Ireland, Scotland, Wales
number_of_locations = 4
# Calculate the number of qubits needed,
# NOTE: IBMQ systems accesible have 5 qubits
nqubits = number_of_locations * number_of_renewable_sources
G = nx.complete_graph(nqubits)
total_cost = C = 61510030000.0 # units: £
# Calculate revenue for each energy source in all locations
# dict{location: [energy_source]}
location_revenue_per_unit = {
0: [2783.55, 336113.74, 153105454.50, 1098154.76],
1: [2332.31, 434451.22, 0.0, 0.0],
2: [1088.38, 1020438.50, 85798571.43, 688750],
3: [3573.59, 768855.93, 140790000, 581875],
}
# Calculate cost for each energy source in all locations
# dict{location: [energy_source]}
location_cost_per_unit = {
0: [300.92, 52370.72, 22696783.84, 7926.08],
1: [273.66, 70226.14, 0.0, 0.0],
2: [116.17, 161395.70, 11626961.90, 5132.75],
3: [382.92, 122708.47, 21872844.44, 4766.13],
}
# variable and a rough estimate, see spreadsheet for current figures.
# dict{location: [energy_source]}
location_unit_capacity = {
0: [800000, 5000, 40, 200],
1: [30000, 1400, 0, 2],
2: [70000, 4000, 10, 10],
3: [60000, 1000, 5, 20],
}
# lambda and gamma, the initial parameter guess for the cost function
lam, gam = 1.0, 1.0
# Calculate the total costs and revenues
a_revs, b_costs = [], []
for N in range(number_of_locations):
for R in range(number_of_renewable_sources):
num_units = location_unit_capacity[N][R]
a_revs.append(num_units * location_revenue_per_unit[N][R])
b_costs.append(num_units * location_cost_per_unit[N][R])
```
### 3. Define the cost function
The optimisation problem to solved can be described by:
$$ \min_{x_i \in \{0,1\}} f( X = x_1, \dotsc,x_n) = \gamma \ f_{\text{cost}} (X) -\lambda f_{\text{yield}}(X)$$
In cost function specific for our example is defined as:
$$ \min_{X \in \{0,1\}^{N\times R}} f(X = x_{11}, \dotsc,x_{nr}) = \lambda\left(C- \sum_i^{N}\sum_j^{R} a_{ij} x_{ij}\right)^2 + \gamma \sum_i^{N}\sum_j^{R} b_{ij} x_{ij}\ ,$$
for more details please consult the pdf document provided.
```
def cost_obj(x, G):
"""
Given a bitstring as a solution, this function returns
the number of edges shared between the two partitions
of the graph.
Args:
x: str
solution bitstring
G: networkx graph
Returns:
obj: float
Objective
"""
obj = lam * C**2
for i in G.nodes():
obj += (lam * a_revs[i]**2 + gam * b_costs[i] - 2*lam*C*a_revs[i]) * int(x[i])
for i, j in G.edges():
obj += 2 * lam * a_revs[i] * a_revs[j] * int(x[i]) * int(x[j])
return obj
```
### 4. Define the expectation function
```
def compute_expectation(counts, G):
"""
Computes expectation value based on measurement results
Args:
counts: dict
key as bitstring, val as count
G: networkx graph
Returns:
avg: float
expectation value
"""
avg = 0
sum_count = 0
for bitstring, count in counts.items():
obj = cost_obj(bitstring, G)
avg += obj * count
sum_count += count
return avg/sum_count
```
### 5. Define the circuit for QAOA algorithm (Old Code)
The function will bring the different circuit components that build the qaoa circuit under a single function
```
#old function
def create_qaoa_circ(G, theta):
"""
Creates a parametrized qaoa circuit
Args:
G: networkx graph
theta: list
unitary parameters
Returns:
qc: qiskit circuit
"""
nqubits = len(G.nodes())
p = len(theta)//2 # number of alternating unitaries
qc = QuantumCircuit(nqubits)
beta = theta[:p]
gamma = theta[p:]
# initial_state
for i in range(0, nqubits):
qc.h(i)
for irep in range(0, p):
# problem unitary biases
for i in list(G.nodes()):
qc.rz(2 * gamma[irep], i)
# problem unitary couplings
for pair in list(G.edges()):
qc.rzz(2 * gamma[irep], pair[0], pair[1])
# mixer unitary
for i in range(0, nqubits):
qc.rx(2 * beta[irep], i)
qc.measure_all()
return qc
```
## Change mixer in the QAOA
One can add the Pauli gate $\sigma_y$ in the mixer Hamiltonian in order to improve the search of the minimum value in the energy.
```
### new function
def newcreate_qaoa_circ(G, theta):
"""
Creates a parametrized qaoa circuit
Args:
G: networkx graph
theta: list
unitary parameters
Returns:
qc: qiskit circuit
"""
nqubits = len(G.nodes())
p = len(theta)//2 # number of alternating unitaries
qc = QuantumCircuit(nqubits)
betax = theta[:p]
betay = theta[:p] ### new to the code
#betaz = theta[:p]
gamma = theta[p:]
# initial_state
for i in range(0, nqubits):
qc.h(i)
for irep in range(0, p):
# problem unitary biases
for i in list(G.nodes()):
qc.rz(2 * gamma[irep], i)
# problem unitary couplings
for pair in list(G.edges()):
qc.rzz(2 * gamma[irep], pair[0], pair[1])
# mixer unitary
for i in range(0, nqubits):
qc.rx(2 * betax[irep], i)
qc.ry(2 * betay[irep], i) ### new to the code
#qc.rz(2 * betaz[irep], i)
qc.measure_all()
return qc
```
### 6. Define function to run the quantum circuit on the backend of choosing (in this example the IBM Air simulator was used)
```
def get_expectation(G, p, shots=1012):
"""
Runs parametrized circuit
Args:
G: networkx graph
p: int,
Number of repetitions of unitaries
"""
backend = Aer.get_backend('qasm_simulator')
backend.shots = shots
def execute_circ(theta):
#qc = create_qaoa_circ(G, theta) ### old code
qc = newcreate_qaoa_circ(G, theta) ### new code
counts = backend.run(qc, seed_simulator=10,
nshots=1012).result().get_counts()
return compute_expectation(counts, G)
return execute_circ
```
### 7. Optimize the quantum circuit parameters (New code)
Changing the initial parameters helps to improve the probabilities.
```
### The initial values were [1.0,1.0], now we have changed to [0.1,0.2]. This improves the probabily
## of the result 1111111111111111 meanwhile decreses the other results
expectation = get_expectation(G, p=1)
#res = minimize(expectation, [0.1, 0.2], method='Nelder-Mead') ## try a new method
res = minimize(expectation, [0.1, 0.2], method='COBYLA')
res
```
### 8. Run quantum circuit
```
backend = Aer.get_backend('aer_simulator')
backend.shots = 1012
#qc_res = create_qaoa_circ(G, res.x) ### old code
qc_res = newcreate_qaoa_circ(G, res.x)
# Visualise the quantum circuit
qc_res.draw(output='mpl')
```
### 8. Analyse the resulting distribution
```
# Obtain simulation results
counts = backend.run(qc_res, seed_simulator=10).result().get_counts()
strings = list(counts.keys())
probabilities = np.array([counts[string]/1024 for string in strings])
# Plot the first 5 highest scores (energy sources and corresponding locations)
num_highest = 5
perm = np.flip((np.argsort(probabilities)))
indices = perm[0:num_highest]
xs = [strings[i] for i in indices] + ["Other"]
ys = [probabilities[i] for i in indices] + [sum(probabilities[perm[num_highest:]])]
fig, ax = plt.subplots(1, 1,figsize=(7, 5))
ax.bar(xs, ys)
ax.set_xticklabels(xs, fontsize=10, rotation=-45)
ax.set_ylabel("Probability")
plt.show()
```
| github_jupyter |
**Reading through Ch. 5 of Simon Wood's book and finding the general iterative approach for determining GAMs by backfitting. The algorithm takes advantage of R's spline.smooth() function, which itself uses Penalized Residual Sum of Squares (PRSS) to compute penalized regression using cubic splines.**
#### This backfit worked initially
f<-x*0;alpha<-mean(y);ok <- TRUE while (ok) { # backfitting loop
for (i in 1:m) { # loop through the smooth terms ep <- y - rowSums(f[,-i]) - alpha
b <- smooth.spline(x[,i],ep,df=edf[i])
f[,i] <- predict(b,x[,i])$y
}
rss <- sum((y-rowSums(f))ˆ2)
if (abs(rss-rss0)<1e-6*rss) ok <- FALSE rss0 <- rss
}
```
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# Adding R functionality.
from rpy2.robjects import r, pandas2ri, IntVector, Formula
pandas2ri.activate()
#This backfitting algorithm worked, but it was causing kernel to crash when I tried to graph it.
#
%%R
while (ok){ # backfitting loop
for (i in 1:m) {ep <- y-f[,-i]-alpha;
b<-smooth.spline(x[,i],ep,df=10);
f[,i] <- predict(b,x[,i])$y
}
rss<-sum((y-rowSums(f))^2);
if(abs(rss-rss0)<1e-6*rss) {ok <- FALSE}
rss0<-rss
}
pth = '/Users/phora/GDrive/research/ZIPP/ZIPP2_EN602/EN602_Loose/science/UMS/MS Data/'
massspec_12_1 = pd.read_table(pth+'MSData_7_12_2017 21_08.dfData',sep='\t',parse_dates=[0], header=0, low_memory=False)
massspec_12_1.dropna(axis=0, how='all')
ms = massspec_12_1.iloc[5858:9895, :]
hdrs = ms.columns.values
idx = [62,5,36,28,57,3]
ms = ms.dropna(how='any')
XX = ms[hdrs[idx]]
y = (ms[hdrs[64]])
#for k in range(62,66):
# idk = (range(0,k-1,1)+range(k+1,12,1))
# hdrs[idk]
# XX = ms[hdrs[idk]]
#idk = [62,63,64,65]
ok = True; alpha = y.mean(); f = XX*0; m = 2; rss0 = 0; hdri = f.columns.values
XX.columns
while ok:
for k in range(0,len(XX.columns)):
idk = (range(0,k,1)+range(k+1,len(XX.columns),1))
ep = y - f[hdri[idk]].sum(axis=1) - alpha
b = st.smooth_spline(XX[hdri[k]],ep,df=10)
newy = st.predict(b,XX[hdri[k]])
t = pandas2ri.ri2py_dataframe(newy).T
f[hdri[k]] = t[1].values
rss = ((y-f.sum(axis=1))**2).sum()
if abs(rss-rss0) < 1e-6*rss:
ok = False
rss0 = rss
#Reconstruct is:
yr = alpha + f.sum(axis=1)
plt.figure()
plt.plot(ms[hdrs[0]],y,'k')
plt.ylim(9e-11,1e-10)
plt.plot(ms[hdrs[0]],yr,'r')
plt.show()
plt.figure()
plt.scatter(ms[hdrs[62]],ms[hdrs[64]])
plt.xlim(2e-9,3e-9)
plt.ylim(9e-11,1e-10)
plt.show()
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
from rpy2.robjects import r, pandas2ri
st = importr('stats')
huh = ((y-XX.sum(axis=1))**2).sum()
out
fvap = ms[hdrs[62]]
ep = ms[hdrs[64]]
fmla = Formula('ep~fvap');
env = fmla.environment;
env['ep'] = ep; env['fvap'] = vap;
out = st.lm(fmla)
yv = st.predict(out,vap)
ok = True; alpha = y.mean(); f = XX*0; m = 2; rss0 = 0; hdri = f.columns.values
fmla = Formula('ep~fvap'); env = fmla.environment; env['ep'] = ep; env['fvap'] = vap;
while ok:
for k in range(0,len(XX.columns)):
idk = (range(0,k,1)+range(k+1,len(XX.columns),1))
ep = y - f[hdri[idk]].sum(axis=1) - alpha
if hdri[k] == hdrs[62]:
b2 = st.lm(fmla)
newy = st.predict(b2,pd.DataFrame(XX[hdri[k]]))
t = pandas2ri.ri2py(newy)
f[hdri[k]] = t
else:
b = st.smooth_spline(XX[hdri[k]],ep,df=10)
newy = st.predict(b,XX[hdri[k]])
t = pandas2ri.ri2py_dataframe(newy).T
f[hdri[k]] = t[1].values
rss = ((y-f.sum(axis=1))**2).sum()
if abs(rss-rss0) < 1e-6*rss:
ok = False
rss0 = rss
#Reconstruct is:
yr = alpha + f.sum(axis=1)
pandas2ri.ri2py(y2).shape
y2 = st.predict(b2,pd.DataFrame(np.linspace(1e-9,5e-9,50)))
plt.figure()
plt.scatter(np.linspace(1e-9,5e-9,50),pandas2ri.ri2py(y2))
plt.show()
plt.figure()
plt.plot(ms[hdrs[0]],y,'k')
plt.ylim(9e-11,1e-10)
plt.plot(ms[hdrs[0]],yr,'r')
plt.show()
import array
from rpy2.robjects import IntVector, Formula
from rpy2.robjects.packages import importr
stats = importr('stats')
x = IntVector(range(1, 11))
y = x.ro + stats.rnorm(10, sd=0.2)
fmla = Formula('y ~ x')
env = fmla.environment
env['x'] = x
env['y'] = y
fit = stats.lm(fmla)
Y = pd.DataFrame(pandas2ri.ri2py(y))
yv = stats.predict(fit,Y)
XX = ms[hdrs[idx]]
y = (ms[hdrs[64]])
from patsy import dmatrix
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.linear_model import Ridge
model = Ridge()
# Generate natural cubic spline basis with df+4 knots.
# transformed_x3 = dmatrix("cr(train,df = 3)", {"train": train_x}, return_type='dataframe')
# fit3 = sm.GLM(train_y, transformed_x3).fit()
bas = dmatrix("cr(train,df=3)-1", {"train": XX[hdri[k]]})
#Bb = smf.OLS(y, bas).fit()
Bb = model.fit(bas,y)
# Prediction on validation set
pred3 = Bb.predict(dmatrix("cr(valid, df=3)-1", {"valid": XX[hdri[k]]}, return_type='dataframe'))
# Calculating RMSE value
rms = np.sqrt(np.sum((y-pred3)**2))
print(rms)
plt.figure()
plt.plot(ms[hdrs[0]],y,'k')
plt.ylim(9e-11,1e-10)
plt.plot(ms[hdrs[0]],pred3,'r')
plt.show()
XX[hdri[k]]
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Data-visualization-" data-toc-modified-id="Data-visualization--1"><span class="toc-item-num">1 </span>Data visualization <a name="chap:R_viz"></a></a></span><ul class="toc-item"><li><span><a href="#Data-exploration-with-basic-plotting" data-toc-modified-id="Data-exploration-with-basic-plotting-1.1"><span class="toc-item-num">1.1 </span>Data exploration with basic plotting</a></span><ul class="toc-item"><li><span><a href="#Basic-plotting-commands" data-toc-modified-id="Basic-plotting-commands-1.1.1"><span class="toc-item-num">1.1.1 </span>Basic plotting commands</a></span></li><li><span><a href="#Scatter-Plots" data-toc-modified-id="Scatter-Plots-1.1.2"><span class="toc-item-num">1.1.2 </span>Scatter Plots</a></span></li><li><span><a href="#Histograms" data-toc-modified-id="Histograms-1.1.3"><span class="toc-item-num">1.1.3 </span>Histograms</a></span></li><li><span><a href="#Subplots" data-toc-modified-id="Subplots-1.1.4"><span class="toc-item-num">1.1.4 </span>Subplots</a></span></li><li><span><a href="#Overlaying-plots" data-toc-modified-id="Overlaying-plots-1.1.5"><span class="toc-item-num">1.1.5 </span>Overlaying plots</a></span></li><li><span><a href="#Boxplots" data-toc-modified-id="Boxplots-1.1.6"><span class="toc-item-num">1.1.6 </span>Boxplots</a></span></li><li><span><a href="#Combining-plot-types" data-toc-modified-id="Combining-plot-types-1.1.7"><span class="toc-item-num">1.1.7 </span>Combining plot types</a></span></li><li><span><a href="#Lattice-plots" data-toc-modified-id="Lattice-plots-1.1.8"><span class="toc-item-num">1.1.8 </span>Lattice plots</a></span></li><li><span><a href="#Saving-your-graphics" data-toc-modified-id="Saving-your-graphics-1.1.9"><span class="toc-item-num">1.1.9 </span>Saving your graphics</a></span></li><li><span><a href="#Practicals" data-toc-modified-id="Practicals-1.1.10"><span class="toc-item-num">1.1.10 </span>Practicals</a></span></li></ul></li><li><span><a href="#High-quality-graphics-in-R" data-toc-modified-id="High-quality-graphics-in-R-1.2"><span class="toc-item-num">1.2 </span>High-quality graphics in R</a></span><ul class="toc-item"><li><span><a href="#Basic-plotting-with-qplot" data-toc-modified-id="Basic-plotting-with-qplot-1.2.1"><span class="toc-item-num">1.2.1 </span>Basic plotting with <code>qplot</code></a></span></li><li><span><a href="#Some-more-important-ggplot-options" data-toc-modified-id="Some-more-important-ggplot-options-1.2.2"><span class="toc-item-num">1.2.2 </span>Some more important ggplot options</a></span></li><li><span><a href="#Various-geom" data-toc-modified-id="Various-geom-1.2.3"><span class="toc-item-num">1.2.3 </span>Various <code>geom</code></a></span></li><li><span><a href="#Advanced-plotting:-ggplot" data-toc-modified-id="Advanced-plotting:-ggplot-1.2.4"><span class="toc-item-num">1.2.4 </span>Advanced plotting: <code>ggplot</code></a></span></li><li><span><a href="#Some-useful-ggplot-examples" data-toc-modified-id="Some-useful-ggplot-examples-1.2.5"><span class="toc-item-num">1.2.5 </span>Some useful ggplot examples</a></span></li><li><span><a href="#Plotting-a-matrix" data-toc-modified-id="Plotting-a-matrix-1.2.6"><span class="toc-item-num">1.2.6 </span>Plotting a matrix</a></span></li><li><span><a href="#Plotting-two-dataframes-(Girko's-circular-Law)" data-toc-modified-id="Plotting-two-dataframes-(Girko's-circular-Law)-1.2.7"><span class="toc-item-num">1.2.7 </span>Plotting two dataframes (Girko's circular Law)</a></span></li><li><span><a href="#Annotating-plots" data-toc-modified-id="Annotating-plots-1.2.8"><span class="toc-item-num">1.2.8 </span>Annotating plots</a></span></li><li><span><a href="#Mathematical-display" data-toc-modified-id="Mathematical-display-1.2.9"><span class="toc-item-num">1.2.9 </span>Mathematical display</a></span></li><li><span><a href="#ggthemes" data-toc-modified-id="ggthemes-1.2.10"><span class="toc-item-num">1.2.10 </span>ggthemes</a></span></li><li><span><a href="#Practicals" data-toc-modified-id="Practicals-1.2.11"><span class="toc-item-num">1.2.11 </span>Practicals</a></span></li></ul></li><li><span><a href="#Readings-&-Resources" data-toc-modified-id="Readings-&-Resources-1.3"><span class="toc-item-num">1.3 </span>Readings & Resources</a></span></li></ul></li></ul></div>
# Data visualization <a name="chap:R_viz"></a>
<!--NAVIGATION-->
<[Back to Workshop Contents](Index.ipynb)>
<div id="toc"> </div>
This chapter introduces key principles of graphics, along with R packages and commands that will altogether allow you to build a workflow for effective data visualization for exploratory and publicatiion purposes. We will start with some basic plotting and data exploration. You will then learn to generate publication-quality visualizations using the `ggplot2` package.
## Data exploration with basic plotting
Before you do any statistical analyses with data, you must clean, explore, and visualize it. And eventually, you want to produce a finished product that presents visualizations of your data and your results clearly and concisely.
Ultimately, at both, the data exploration and the finished product stages, the goal of graphics is to present information such that it provides intuitive ideas. As Edward Tufte says:
> *"Graphical excellence is that which gives to the viewer the greatest number of ideas in the shortest time with the least ink in the smallest space."*
R can produce beautiful graphics without the time-consuming and fiddly methods that you might have used in Excel or equivalent. You should also make it a habit to quickly plot the data for exploratory analysis. So we are going to learn some basic plotting first.
### Basic plotting commands
Here is a menu of basic R plotting commands (use `?commandname` to learn more about it):
| | |
|:------------- |:------------- |
|`plot(x,y)`| Scatterplot|
|`plot(y~x)`| Scatterplot with `y` as a response variable|
| `hist(mydata)`| Histogram|
|`barplot(mydata)`| Bar plot|
|`points(y1$\sim$x1)`| Add another series of points|
| `boxplot(y$\sim$x)`| Boxplot|
> **R graphics devices**:
> In all that follows, you may often end up plotting multiple plots on the same graphics window without intending to do so, because R by default keeps plotting in the most recent plotting window that was opened. You can close a particular graphics window or “device” by using `dev.off()`, and all open devices/windows with `graphics.off()`. By default, `dev.off()` will close the most recent figure device that was opened.
>Note that there are invisible devices in `R`! For example, if you are printing to pdf (coming up below), the device or graphics window will not be visible on your computer screen.
Now let’s try some simple plotting for data exploration. As a case study, we will use a dataset on Consumer-Resource (e.g., Predator-Prey) body mass ratios taken from the Ecological Archives of the ESA (Barnes *et al.* 2008, Ecology 89:881).
<ul style="list-style-type:square">
<li>Copy the file `EcolArchives-E089-51-D1.csv` from `Data` directory in the master git repository on bitbucket to your own `Data` directory.</li>
</ul>
Now, launch R and read in these data to a data frame (note the relative path):
```
MyDF <- read.csv("../data/EcolArchives-E089-51-D1.csv")
dim(MyDF) #check the size of the data frame you loaded
```
Let's look at what the data contain. Type `MyDF` and hit the TAB key twice in the R commandline. If you are using RStudio, you just can hit it once, and you will get a list of column names.
**In RStudio, you will see a drop-down list of all the column headers when you hit TAB**
You can also use the `str()` and `head()` commands (See the Intro to R Chapter in [these notes](https://github.com/mhasoba/TheMulQuaBio/blob/master/silbiocomp/SilBioComp.pdf).
As you can see, these data contain predator-prey body size information. This is an interesting dataset because it is huge, and covers a wide range of body sizes of aquatic species involved in consumer-resource interactions — from unicells to whales. Analyzing this dataset should tell us a lot about what sizes of prey predators like to eat.
<img src="./graphics/SeaLion.png" alt="A consumer-resource (predator-prey) interaction waiting to happen" style="width: 500px;"/>
### Scatter Plots
Let's start by plotting Predator mass vs. Prey mass.
First I will use the `repr` R package to change the default plot size in this jupyter notebook (you don't have to do this if you are using RStudio):
```
library(repr)
options(repr.plot.width=6, repr.plot.height=4) # Change default plot size; not necessary if you are using Rstudio
plot(MyDF$Predator.mass,MyDF$Prey.mass)
```
That doesn't look very meanigful! Let's try taking logarithms. Why? - Because body sizes across species tend to be log-normally distributed, with a lot of small species and a few large ones -- Taking a log allows you to inspect the body size range in a meaningful (logarithmic) scale and reveals the tru relationship. This also illustrates a importsnt point. Just like statistical analyses, the effectiveness of data visualization too depends on the type of distibution of the data.
```
plot(log(MyDF$Predator.mass),log(MyDF$Prey.mass))
```
We can change almost any aspect of the resulting graph; let’s change the
symbols by specifying the <span>p</span>lot <span>ch</span>aracters
using <span>pch</span>:
```
plot(log(MyDF$Predator.mass),log(MyDF$Prey.mass),pch=20) # Change marker
plot(log(MyDF$Predator.mass),log(MyDF$Prey.mass),pch=20,
xlab = "Predator Mass (kg)", ylab = "Prey Mass (kg)") # Add labels
```
A really great summary of basic R graphical parameters can be [found here](https://www.statmethods.net/advgraphs/parameters.html).
### Histograms
Why did we have to take a logarithm to see the relationship between
predator and prey size? Plotting histograms of the two classes
(predator, prey) should be insightful, as we can then see the “marginal”
distributions of the two variables.
Let’s first plot a histogram of predator body masses:
```
hist(MyDF$Predator.mass)
```
Clearly, the data are heavily right skewed, with small body sized organisms dominating (that’s a general pattern, as mentioned above). Let's now take a logarithm and see if we can get a better idea of what the distribution of predator sizes looks like:
```
hist(log(MyDF$Predator.mass),
xlab = "Predator Mass (kg)", ylab = "Count") # include labels
hist(log(MyDF$Predator.mass),xlab="Predator Mass (kg)",ylab="Count",
col = "lightblue", border = "pink") # Change bar and borders colors
```
So, taking a log really makes clearer what the distribution of body predator sizes looks like. *Try the same with prey body masses.*
#### Exercise
We can do a lot of beautification and fine-tuning of your R plots! As an exercise, try adjusting the histogram bin widths to make them same for the predator and prey, and making the x and y labels larger and in boldface. To get started, look at the help documentation of `hist`.
### Subplots
We can also plot both predator and prey body masses in different sub-plots using `par` so that we can compare them visually.
```
par(mfcol=c(2,1)) #initialize multi-paneled plot
par(mfg = c(1,1)) # specify which sub-plot to use first
hist(log(MyDF$Predator.mass),
xlab = "Predator Mass (kg)", ylab = "Count",
col = "lightblue", border = "pink",
main = 'Predator') # Add title
par(mfg = c(2,1)) # Second sub-plot
hist(log(MyDF$Prey.mass),
xlab="Prey Mass (kg)",ylab="Count",
col = "lightgreen", border = "pink",
main = 'prey')
```
Another option for making multi-panel plots is the `layout` function.
### Overlaying plots
Better still, we would like to see if the predator mass and prey mass distributions are similar by overlaying them.
```
hist(log(MyDF$Predator.mass), # Predator histogram
xlab="Body Mass (kg)", ylab="Count",
col = rgb(1, 0, 0, 0.5), # Note 'rgb', fourth value is transparency
main = "Predator-prey size Overlap")
hist(log(MyDF$Prey.mass), col = rgb(0, 0, 1, 0.5), add = T) # Plot prey
legend('topleft',c('Predators','Prey'), # Add legend
fill=c(rgb(1, 0, 0, 0.5), rgb(0, 0, 1, 0.5))) # Define legend colors
```
Plot annotation with text can be done with either single or double quotes, i.e., ‘Plot Title’ or “Plot Title”, respectively. But it is generally a good idea to use double quotes because sometimes you would like to use an apostrophe in your title or axis label strings.
#### Exercise
* It would be nicer to have both the plots with the same bin sizes – try to do it
### Boxplots
Now, let’s try plotting boxplots instead of histograms. These are useful for getting a visual summary of the distribution of your data.
```
boxplot(log(MyDF$Predator.mass), xlab = "Location", ylab = "Predator Mass", main = "Predator mass")
```
Now let’s see how many locations the data are from:
```
boxplot(log(MyDF$Predator.mass) ~ MyDF$Location, # Why the tilde?
xlab = "Location", ylab = "Predator Mass",
main = "Predator mass by location")
```
Note the tilde (\~). This is to tell R to subdivide or categorize your analysis and plot by the “Factor” location. More on this later.
That’s a lot of locations! You will need an appropriately wide plot to see all the boxplots adequately. Now let’s try boxplots by feeding interaction type:
```
boxplot(log(MyDF$Predator.mass) ~ MyDF$Type.of.feeding.interaction,
xlab = "Location", ylab = "Predator Mass",
main = "Predator mass by feeding interaction type")
```
### Combining plot types
It would be nice to see both the predator and prey (marginal) distributions as well as the scatterplot for an exploratory analysis. We can do this by adding boxplots of the marginal variables to the scatterplot.
```
par(fig=c(0,0.8,0,0.8)) # specify figure size as proportion
plot(log(MyDF$Predator.mass),log(MyDF$Prey.mass),
xlab = "Predator Mass (kg)", ylab = "Prey Mass (kg)") # Add labels
par(fig=c(0,0.8,0.55,1), new=TRUE)
boxplot(log(MyDF$Predator.mass), horizontal=TRUE, axes=FALSE)
par(fig=c(0.65,1,0,0.8),new=TRUE)
boxplot(log(MyDF$Prey.mass), axes=FALSE)
mtext("Fancy Predator-prey scatterplot", side=3, outer=TRUE, line=-3)
```
To understand this plotting method, think of the full graph area as going from (0,0) in the lower left corner to (1,1) in the upper right corner. The format of the `fig=` parameter is a numerical vector of the form `c(x1, x2, y1, y2)`. The first `fig= ` sets up the scatterplot going from 0 to 0.8 on the x axis and 0 to 0.8 on the y axis. The top boxplot goes from 0 to 0.8 on the x axis and 0.55 to 1 on the y axis. The right hand boxplot goes from 0.65 to 1 on the x axis and 0 to 0.8 on the y axis. You can experiment with these proportions to change the spacings between plots.
### Lattice plots
You can also make lattice graphs to avoid the somewhat laborious `par()` approach above of getting multi-panel plots. For this, you will need to load a library that isn't included by default when you run R:
```
library(lattice)
```
Let's make a lattice plot of body massby densities (distributions) by type of feeding interaction:
```
densityplot(~log(Predator.mass) | Type.of.feeding.interaction, data=MyDF)
```
Look up [this page](http://www.statmethods.net/advgraphs/trellis.html) and the `lattice` package help for more info.
### Saving your graphics
And you can also save the figure in a vector graphics format like a pdf. It is important to learn to do this, because you want to be able to save your plots in good resolution, and want to avoid the manual steps of
clicking on the figure, doing “save as”, etc. So let’s save the figure as a PDF:
```
pdf("../Results/Pred_Prey_Overlay.pdf", # Open blank pdf page using a relative path
11.7, 8.3) # These numbers are page dimensions in inches
hist(log(MyDF$Predator.mass), # Plot predator histogram (note 'rgb')
xlab="Body Mass (kg)", ylab="Count", col = rgb(1, 0, 0, 0.5), main = "Predator-Prey Size Overlap")
hist(log(MyDF$Prey.mass), # Plot prey weights
col = rgb(0, 0, 1, 0.5),
add = T) # Add to same plot = TRUE
legend('topleft',c('Predators','Prey'), # Add legend
fill=c(rgb(1, 0, 0, 0.5), rgb(0, 0, 1, 0.5)))
graphics.off(); #you can also use dev.off()
```
Always try to save results in a vector format, which can be scaled up to any size. For more on vector vs raster images/graphics, [see this](https://en.wikipedia.org/wiki/Vector_graphics).
Note that you are saving to the `Results` directory now. This is a recommended project organization and workflow: store and retrieve data from a `
Data` directory, keep your code and work from a `Code` directory, and save outputs to a `Results` directory.
You can also try other graphic output formats. For example, `png()` (a raster format) instead of `pdf()`. As always, look at the help documentation of each of these commands!
### Practicals
In this practical, you will write script that draws and saves three lattice graphs by feeding interaction type: one of predator mass, one of prey mass and one of the size ratio of prey mass over predator mass. Note that you would want to use logarithms of masses (or mass-ratios)for all three plots. In addition, the script will calculate the mean and median predator mass, prey mass and predator-prey size-ratios to a csv file. So:
* Write a script file called `PP_Lattice.R` and save it in the `Code` directory — sourcing or running this script should result in three files called `Pred_Lattice.pdf`, ` Prey_Lattice.pdf`, and `SizeRatio_Lattice.pdf` being
saved in the `Results` directory (the names should be self-explanatory).
In addition, the script should calculate the mean and median log predator mass, prey mass, and predator-prey size ratio, *by feeding type*, and save it as a single csv output table called `PP_Results.csv` to the `Results` directory. The able should have appropriate headers (e.g., Feeding type, mean, mdian). (Hint: you will have to initialize a new dataframe or matrix in the script to first store the calculations)
The script should be self-sufficient and not need any external inputs — it should import the above predator-prey dataset from the appropriate directory, and save the graphic plots to the appropriate directory (Hint: use relative paths!).
There are multiple ways to do this practical. The plotting and saving component is simple enough. For calculating the statistics by feeding type, you can either use the “loopy” way — first obtaining a list of feeding types (look up the `unique` or `levels` functions) and then loop over them, using `subset` to extract the dataset by feeding type at each iteration, or the R-savvy way, by using `tapply` or `ddply` and avoiding looping altogether see the advanced R Chapter in [these notes](https://github.com/mhasoba/TheMulQuaBio/blob/master/silbiocomp/SilBioComp.pdf).
## High-quality graphics in R
`R` can produce beautiful graphics/visualizations, but it typically takes a lot of work to obtain the desired result. This is because the starting point is pretty much a “bare” plot, and adding features commonly required for publication-grade figures (legends, statistics, regressions, sub-plotting etc.) can require a lot of small and painful additional arguments to the plotting commands *at the same time*, or even additional steps (such as the [fancy predator-prey scatterplot](#Combining-plot-types) above).
Moreover, it is very difficult to switch from one representation of the data to another (i.e., from boxplots to scatterplots), or to plot several datasets together. The `R` package `ggplot2` overcomes these issues, and produces truly high-quality, publication-ready graphics suitable for papers, theses and reports.
> **Tip:** *Currently, `ggplot2` cannot be used to create 3D graphs or mosaic plots. In any case, most of you won’t be needing 3D plots. If you do, there are many ways to do 3D plots using other plotting packages in R. In particular, look up the `scatterplot3d` and `plot3D` packages.*
`ggplot2` differs from other approaches as it attempts to provide a “grammar” for graphics in which each layer is the equivalent of a verb, subject etc. and a plot is the equivalent of a sentence. All graphs start with a layer showing the data, other layers and attributes/styles are added to modify the plot. Specifically, according to this grammar, a statistical graphic is a “mapping” from data to aesthetic attributes (colour, shape, size; set using `aes`) of geometric objects (points, lines, bars; set using `geom`).
For more on the ideas underlying ggplot, see the book “ggplot2: Elegant Graphics for Data Analysis”, by H. Wickham (in your Reading directory). Also, the [ggplot2 website](http://ggplot2.tidyverse.org/) is an excellent resource.
`ggplot2` should be available on your computer. If not, look up the section on installing packages in the Intro to R Chapter in [these notes](https://github.com/mhasoba/TheMulQuaBio/blob/master/silbiocomp/SilBioComp.pdf).
ggplot can be used in two ways: with `qplot` (for `q`uick ` plot`ting) and `ggplot` for fully customized plotting.
*Note that `ggplot2` only accepts data in data frames.*
### Basic plotting with `qplot`
`qplot` can be used to quickly produce graphics for exploratory data analysis, and as a base for more complex graphics. It uses syntax that is closer to the standard R plotting commands.
We will use the same predator-prey body size dataset again – you will soon see how much nice the same types of plots you made above look when done with ggplot!
First, load the package:
```
require(ggplot2) ## Load the package
```
#### Scatterplots
Let’s start plotting the `Predator.mass` vs `Prey.mass`:
```
qplot(Prey.mass, Predator.mass, data = MyDF)
```
As before, let’s take logarithms and plot:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF)
```
Now, color the points according to the type of feeding interaction:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, colour = Type.of.feeding.interaction)
```
The same as above, but changing the shape:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, shape = Type.of.feeding.interaction)
```
#### Aesthetic mappings
These examples demonstrate a key difference between `qplot` and the standard `plot` command: When you want to assign colours, sizes or shapes to the points on your plot, using the `plot` command, it's your responsibility to convert (i.e., “map”) a categorical variable in your data (e.g., type of feeding interaction in the above case) onto colors (or shapes) that `plot` knows how to use (e.g., by specifying “red”, “blue”, “green”, etc).
ggplot does this mapping for you automatically, and also provides a legend! This makes it really easy to quickly include additional data (e.g., if a new feeding interaction type was added to the data) on the plot.
Instead of using ggplot’s automatic mapping, if you want to manually set a color or a shape, you have to use `I()` (meaning “Identity”). To see this in practice, try the following:
```
qplot(log(Prey.mass), log(Predator.mass),
data = MyDF, colour = "red")
```
You chose red, but ggplot used mapping to convert it to a particular
shade of red. To set it manually to the real red, do this:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, colour = I("red"))
```
Similarly, for point size, compare these two:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, size = 3) #with ggplot size mapping
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, size = I(3)) #no mapping
```
But for shape, ggplot doesn’t have a continuous mapping because shapes are a discrete variable. To see this, compare these two:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, shape = 3) #will give error
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, shape= I(3))
```
#### Setting transparency
Because there are so many points, we can make them semi-transparent using `alpha` so that the overlaps can be seen:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, colour = Type.of.feeding.interaction, alpha = I(.5))
```
Here, try using `alpha = .5` instead of `alpha = I(.5)` and see what happens.
#### Adding smoothers and regression lines
Now add a smoother to the points:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, geom = c("point", "smooth"))
```
If we want to have a linear regression, we need to specify the method as
being <span>lm</span>:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, geom = c("point", "smooth")) + geom_smooth(method = "lm")
```
`lm` stands for `l`inear `m`odels (linear regression is a type of linear model).
We can also add a “smoother” for each type of interaction:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, geom = c("point", "smooth"),
colour = Type.of.feeding.interaction) + geom_smooth(method = "lm")
```
To extend the lines to the full range, use `fullrange = TRUE`:
```
qplot(log(Prey.mass), log(Predator.mass), data = MyDF, geom = c("point", "smooth"),
colour = Type.of.feeding.interaction) + geom_smooth(method = "lm",fullrange = TRUE)
```
Now let's see how the ratio between prey and predator mass changes according to the type of interaction:
```
qplot(Type.of.feeding.interaction, log(Prey.mass/Predator.mass), data = MyDF)
```
Because there are so many points, we can “jitter” them to get a better
idea of the spread:
```
qplot(Type.of.feeding.interaction, log(Prey.mass/Predator.mass), data = MyDF, geom = "jitter")
```
#### Boxplots
Or we can draw a boxplot of the data (note the `geom` argument, which stands for `geom`etry):
```
qplot(Type.of.feeding.interaction, log(Prey.mass/Predator.mass), data = MyDF, geom = "boxplot")
```
#### Histograms and density plots
Now let’s draw an histogram of predator-prey mass ratios:
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "histogram")
```
Color the histogram according to the interaction type:
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "histogram",
fill = Type.of.feeding.interaction)
```
You may want to define binwidth (in units of x axis):
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "histogram",
fill = Type.of.feeding.interaction, binwidth = 1)
```
To make it easier to read, we can plot the smoothed density of the data:
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "density",
fill = Type.of.feeding.interaction)
```
And you can make the densities transparent so that the overlaps are visible:
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "density",
fill = Type.of.feeding.interaction,
alpha = I(0.5))
```
or using `colour` instead of `fill` draws only the edge of the curve:
```
qplot(log(Prey.mass/Predator.mass), data = MyDF, geom = "density",
colour = Type.of.feeding.interaction)
```
Similarly, `geom = “bar”` produces a barplot, `geom = “line”` a series of points joined by a line, etc.
#### Multi-faceted plots
An alternative way of displaying data belonging to different classes is using “faceting”. We did this using ` lattice()` previously, but ggplot does a much nicer job:
```
qplot(log(Prey.mass/Predator.mass), facets = Type.of.feeding.interaction ~., data = MyDF, geom = "density")
```
The `\~.` (the space is not important) notation tells ggplot whether to do the faceting by row or by column. So if you want a by-column configuration, switch `\~` and `.`, and also swap the position of the `.\~`:
```
qplot(log(Prey.mass/Predator.mass), facets = .~ Type.of.feeding.interaction, data = MyDF, geom = "density")
```
You can also facet by a combination of categories (this is going to be a big plot!):
```
qplot(log(Prey.mass/Predator.mass), facets = .~ Type.of.feeding.interaction + Location,
data = MyDF, geom = "density")
```
And you can also change the order of the combination:
```
qplot(log(Prey.mass/Predator.mass), facets = .~ Location + Type.of.feeding.interaction,
data = MyDF, geom = "density")
```
For more fine-tuned faceting, look up the `facet_grid()` and `facet_wrap()` functions within `ggplot2`. Look up this section of the [R Cookbook](http://www.cookbook-r.com/Graphs/Facets_(ggplot2)) for more examples.
#### Logarithmic axes
A better way to plot data in the log scale is to also set the axes to be logarithmic:
```
qplot(Prey.mass, Predator.mass, data = MyDF, log="xy")
```
#### Plot annotations
Let’s add a title and labels:
```
qplot(Prey.mass, Predator.mass, data = MyDF, log="xy",
main = "Relation between predator and prey mass",
xlab = "log(Prey mass) (g)",
ylab = "log(Predator mass) (g)")
```
Adding `+ theme_bw()` makes it suitable for black and white printing.
```
qplot(Prey.mass, Predator.mass, data = MyDF, log="xy",
main = "Relation between predator and prey mass",
xlab = "Prey mass (g)",
ylab = "Predator mass (g)") + theme_bw()
```
#### Saving your plots
Finally, let’s save a pdf file of the figure (same approach as we [used before](#Saving-your-graphics)):
```
pdf("../Results/MyFirst-ggplot2-Figure.pdf")
print(qplot(Prey.mass, Predator.mass, data = MyDF,log="xy",
main = "Relation between predator and prey mass",
xlab = "log(Prey mass) (g)",
ylab = "log(Predator mass) (g)") + theme_bw())
dev.off()
```
Using `print` ensures that the whole command is kept together and that you can use the command in a script.
### Some more important ggplot options
Other important options to keep in mind:
| | |
|:------------- |:------------- |
|`xlim` | limits for x axis: `xlim = c(0,12)`|
|`ylim` | limits for y axis|
|`log` |log transform variable `log = “x”`, `log = “y”`, `log = “xy”`|
|`main` |title of the plot `main = “My Graph”`|
|`xlab` |x-axis label|
|`ylab` |y-axis label|
|`asp` | aspect ratio `asp = 2`, `asp = 0.5`|
|`margins` |whether or not margins will be displayed|
### Various `geom`
`geom` Specifies the geometric objects that define the graph type. The geom option is expressed as a R character vector with one or more entries. geom values include “point”, “smooth”, “boxplot”, “line”, “histogram”, “density”, “bar”, and “jitter”.
Try the following:
### Advanced plotting: `ggplot`
The command `qplot` allows you to use only a single dataset and a single set of “aesthetics” (x, y, etc.). To make full use of `ggplot2`, we need to use the command `ggplot`, which allows you to use “layering”. Layering is the mechanism by which additional data elements are added to a plot. Each layer can come from a different dataset and have a different aesthetic mapping, allowing us to create plots that could not be generated using `qplot()`, which permits only a single dataset and a single set of aesthetic mappings.
For a `ggplot` plotting command, we need at least:
- The data to be plotted, in a data frame;
- Aesthetics mappings, specifying which variables we want to plot, and how;
- The `geom`, defining the geometry for representing the data;
- (Optionally) some `stat` that transforms the data or performs statistics using the data.
To start a graph, we must specify the data and the aesthetics:
```
p <- ggplot(MyDF, aes(x = log(Predator.mass),
y = log(Prey.mass),
colour = Type.of.feeding.interaction ))
```
Here we have created a graphics object <span>p</span> to which we can add layers and other plot elements.
Now try to plot the graph:
```
p
```
PLot is blank because we are yet to specify a geometry — only then can we see the graph:
```
p + geom_point()
```
We can use the “+” sign to concatenate different commands:
```
p <- ggplot(MyDF, aes(x = log(Predator.mass), y = log(Prey.mass), colour = Type.of.feeding.interaction ))
q <- p + geom_point(size=I(2), shape=I(10)) + theme_bw()
q
```
Let’s remove the legend:
```
q + theme(legend.position = "none")
```
### Some useful ggplot examples
### Plotting a matrix
Let's will plot the values of a matrix. This is basically same as rendering a 2D image. We will visualize random values taken from a unform distribution $\mathcal U [0,1]$. Because we want to plot a matrix, and ggplot2 accepts only dataframes, we use the package reshape2, which can “melt” a matrix into a dataframe:
```
require(reshape2)
GenerateMatrix <- function(N){
M <- matrix(runif(N * N), N, N)
return(M)
}
M <- GenerateMatrix(10)
Melt <- melt(M)
ggplot(Melt, aes(Var1, Var2, fill = value)) + geom_tile()
```
Add a black line dividing cells:
```
p + geom_tile(colour = "black")
```
Remove the legend:
```
p + theme(legend.position = "none")
```
Remove all the rest:
```
p + theme(legend.position = "none",
panel.background = element_blank(),
axis.ticks = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank())
```
Explore some colors:
```
p + scale_fill_continuous(low = "yellow", high = "darkgreen")
p + scale_fill_gradient2()
p + scale_fill_gradientn(colours = grey.colors(10))
p + scale_fill_gradientn(colours = rainbow(10))
p + scale_fill_gradientn(colours = c("red", "white", "blue"))
```
### Plotting two dataframes (Girko's circular Law)
According to Girko's circular law, the eigenvalues of a matrix $M$ of size $N \times N$ are approximately contained in a circle in the complex plane with radius $\sqrt{N}$. Let's draw the results of a simulation displaying this result.
First, we need to build a function object that will calculate the ellipse (the perdicted bounds of the eigenvalues):
```
build_ellipse <- function(hradius, vradius){ # function that returns an ellipse
npoints = 250
a <- seq(0, 2 * pi, length = npoints + 1)
x <- hradius * cos(a)
y <- vradius * sin(a)
return(data.frame(x = x, y = y))
}
N <- 250 # Assign size of the matrix
M <- matrix(rnorm(N * N), N, N) # Build the matrix
eigvals <- eigen(M)$values # Find the eigenvalues
eigDF <- data.frame("Real" = Re(eigvals), "Imaginary" = Im(eigvals)) # Build a dataframe
my_radius <- sqrt(N) # The radius of the circle is sqrt(N)
ellDF <- build_ellipse(my_radius, my_radius) # Dataframe to plot the ellipse
names(ellDF) <- c("Real", "Imaginary") # rename the columns
```
Now the plotting:
```
# plot the eigenvalues
p <- ggplot(eigDF, aes(x = Real, y = Imaginary))
p <- p +
geom_point(shape = I(3)) +
theme(legend.position = "none")
# now add the vertical and horizontal line
p <- p + geom_hline(aes(yintercept = 0))
p <- p + geom_vline(aes(xintercept = 0))
# finally, add the ellipse
p <- p + geom_polygon(data = ellDF, aes(x = Real, y = Imaginary, alpha = 1/20, fill = "red"))
p
```
#### Exercise
Create a self-sufficient R Script called `Girko.R` that combines all the above commands for plotting the Girko's law simulation, including saving the result figure as a file called `Girko.pdf` in the results directory.
### Annotating plots
In this example, we will use the ggplot `geom` `text` to annotate a plot.
```
a <- read.table("Practicals/Data/Results.txt", header = TRUE)
```
Check what the data look like:
```
head(a)
a$ymin <- rep(0, dim(a)[1]) # append a column of zeros
# Print the first linerange
p <- ggplot(a)
p <- p + geom_linerange(data = a, aes(
x = x,
ymin = ymin,
ymax = y1,
size = (0.5)
),
colour = "#E69F00",
alpha = 1/2, show.legend = FALSE)
# Print the second linerange
p <- p + geom_linerange(data = a, aes(
x = x,
ymin = ymin,
ymax = y2,
size = (0.5)
),
colour = "#56B4E9",
alpha = 1/2, show.legend = FALSE)
# Print the third linerange:
p <- p + geom_linerange(data = a, aes(
x = x,
ymin = ymin,
ymax = y3,
size = (0.5)
),
colour = "#D55E00",
alpha = 1/2, show.legend = FALSE)
# Annotate the plot with labels:
p <- p + geom_text(data = a, aes(x = x, y = -500, label = Label))
# now set the axis labels, remove the legend, and prepare for bw printing
p <- p + scale_x_continuous("My x axis",
breaks = seq(3, 5, by = 0.05)) +
scale_y_continuous("My y axis") +
theme_bw() +
theme(legend.position = "none")
p
```
#### Exercise
Create a self-sufficient R Script called `MyBars.pdf` that combines all the above commands for annotating a plot, including saving the result figure as a file called `MyBars.pdf` in the results directory.
### Mathematical display
Let's try mathematical annotation on a axis, and in the plot area.
First create some linear regression "data":
```
x <- seq(0, 100, by = 0.1)
y <- -4. + 0.25 * x +
rnorm(length(x), mean = 0., sd = 2.5)
# and put them in a dataframe
my_data <- data.frame(x = x, y = y)
# perform a linear regression
my_lm <- summary(lm(y ~ x, data = my_data))
# plot the data
p <- ggplot(my_data, aes(x = x, y = y,
colour = abs(my_lm$residual))
) +
geom_point() +
scale_colour_gradient(low = "black", high = "red") +
theme(legend.position = "none") +
scale_x_continuous(
expression(alpha^2 * pi / beta * sqrt(Theta)))
# add the regression line
p <- p + geom_abline(
intercept = my_lm$coefficients[1][1],
slope = my_lm$coefficients[2][1],
colour = "red")
# throw some math on the plot
p <- p + geom_text(aes(x = 60, y = 0,
label = "sqrt(alpha) * 2* pi"),
parse = TRUE, size = 6,
colour = "blue")
p
```
#### Exercise
Create a self-sufficient R Script called `plotLin.R` that combines all the above commands for annotating a plot, including saving the resulting figure as a file called `MyLinReg.pdf` in the results directory.
### ggthemes
The package `ggthemes` provides you some additional `geom`s, ` scale`s, and `theme`s for `ggplot`. These include a theme based on Tufte's *The Visual Display of Quantitative Information* (see [suggested readings](#Readings)).
Let's try this package. First install it:
```
install.packages("ggthemes")
```
Then try:
```
library(ggthemes)
p <- ggplot(MyDF, aes(x = log(Predator.mass), y = log(Prey.mass),
colour = Type.of.feeding.interaction )) +
geom_point(size=I(2), shape=I(10)) + theme_bw()
p + geom_rangeframe() + # now fine tune the geom to Tufte's range frame
theme_tufte() # and theme to Tufte's minimal ink theme
```
Go to <https://github.com/jrnold/ggthemes> for more information and a list of `geom`s, `theme`s, and `scale`s.
> **Tip:** *Both `library()` and `require()` are commands/functions to load packages. The difference is that `require()` is designed for use inside other functions, so it returns `FALSE` and gives a warning, whereas`library()` returns an error by default if the package does not exist.*
### Practicals
In this practical, you will write script that draws and saves a pdf file of the following figure, and writes the accompanying regression results to a formatted table in csv. Note that as the plot suggests, that the analysis must be subsetted by the `Predator.lifestage` field of the dataset.

**Guidelines**:
* Write a `R` script file called `PP_Regress.R` and save it in the `Code` directory. Sourcing or running this
script should result in one pdf file containing the following figure being saved in the `Results` directory: (Hint: Use the `print()` command to write to the pdf)
* In addition, the script should calculate the regression results corresponding to the lines fitted in the figure and save it to a csv delimited table called (`PP_Regress_Results.csv`), in the `Results` directory. (Hint: you will have to initialize a new dataframe in the script to first store the calculations and then `write.csv()` or `write.table()` it.)
* All that you are being asked for here is results of an analysis of Linear regression on subsets of the data corresponding to available Feeding Type $\times$ Predator life Stage combination — not a multivariate linear model with these two as separate covariates!
* The regression results should include the following with appropriate headers (e.g., slope, intercept, etc, in each Feeding type $\times$ life stage category): regression slope, regression intercept, R$^2$, F-statistic value, and p-value of the overall regression (Hint: Review the Stats week!).
* The script should be self-sufficient and not need any external inputs — it should import the above predator-prey dataset from the appropriate directory, and save the graphic plots to the appropriate directory (Hint: use relative paths). Anybody should be able to `source` it without errors.
* You can also use the `dplyr` function instead of looping (see advanced R Chapter in [these notes](https://github.com/mhasoba/TheMulQuaBio/blob/master/silbiocomp/SilBioComp.pdf)), and the `ggplot` command instead of `qplot`.
**Extra Credit**:
Do the same as above, but the analysis this time should be separate by the dataset’s `Location`
field. Call it `PP_Regress_loc.R`. No need to generate plots for this (just the analysis results to a `.csv` file), as a combination of `Type.of.feeding.interaction`, `Predator.lifestage`, and ` Location` will be far too busy (faceting by three variables is too much to be meaningful)!
## Readings & Resources
Check out `Visualization` and `R` under [Readings](https://github.com/vectorbite/VBiTraining/tree/master/readings) on this course's repository.
* Rolandi et al. “A Brief Guide to Designing Effective Figures for the Scientific Paper”, doi:10.1002/adma.201102518
* The classic [Tufte](www.edwardtufte.com/tufte/books_vdqi); Available in the Central Library, and I have also added extracts and a related book in pdf on the master repository. BTW, check out what Tufte thinks of [PowerPoint](https://www.edwardtufte.com/tufte/powerpoint).
* Lauren et al. “Graphs, Tables, and Figures in Scientific Publications: The Good, the Bad, and How Not to Be the Latter”, doi:10.1016/j.jhsa.2011.12.041
* [Effective scientific illustrations](www.labtimes.org/labtimes/issues/lt2008/lt05/lt\_2008\_05\_52\_53.pdf)
* [AddictedToR](https://web.archive.org/web/20120310121708/http://addictedtor.free.fr/graphiques/thumbs.php)
| github_jupyter |
## Series Temporais
O Pandas possui 3 tipos de objetos para lidar com séries temporias:
* **Timestamp** - Para marcação de data, hora, minuto, etc. A estrutura de índice associada é ``DatetimeIndex``.
* **Period** - Para período de tempos. A estrutura de índice associada é ``PeriodIndex``.
* **Timedelta** - Para duração de tempo. A estrutura de índice associada é ``TimedeltaIndex``.
Os exemplos a seguir serão apenas uma breve introdução. Para uma ir mais a fundo, a documentação sobre timeseries do Pnadas encontra-se no link a seguir: http://pandas.pydata.org/pandas-docs/stable/timeseries.html
```
import numpy as np
import pandas as pd
# Criando um objeto
date = pd.to_datetime("16/11/1982")
date
# Mostrando o dia da semana
date.strftime('%A')
```
**Indexação por tempo**
Quando temos um conjunto de dados indexado por data, podemos executar algumas operações de maneira mais rápida, como por exemplo filtrar intervalos
```
# Criando uma série indexada por data
indice = pd.DatetimeIndex(['2014-07-04', '2014-08-04',
'2015-07-04', '2015-08-04'])
dados = pd.Series([0, 1, 2, 3], index=indice)
dados
# Filtrando por ano
dados['2014']
# Filtrando um intervalo
dados['2014-07-04':'2015-07-04']
```
**Série de datas**
Assim como podemos criar uma série de dados utilizando o ``range()`` para o Python e o ``np.arange()`` para o NumPy, podemos criar também uma sequência de datas com a função ``pd.date_range()``.
```
# Criando através da definição das datas de início e fim
pd.date_range('2018-03-15', '2018-03-20')
# Criando através de uma sequenca de dias
pd.date_range('2018-03-15', periods=6)
# Podemos também modificar a sequência, modificando o parâmetro seq de dias para anos, mês, horas, minutos, segundos, etc.
pd.date_range('2018-03-15', periods=6, freq='H')
# A função aceita inclusive combinação de frequências, como horas e minutos. Abaixo exemplo com intervalo de 2 horas e meia.
pd.timedelta_range(0, periods=9, freq="2H30T")
```
**resample**
A partir da indexação por data, podemos manipular os dados a partir delas. Uma função interessante é a ``resample()`` que nos permite agregar os valores por média, soma, máximo, etc; e pelo período que desejarmos tais como dia, semana, mês, etc.
```
# Gerando dados
data = pd.date_range('2018-07-02', periods=14, freq='D') # 2 semanas de index
dados = np.array(range(len(data))) # array com 14 elementos
data_dados = pd.Series(dados, index=data) # juntando os dois dados
data_dados
# Agregando de dias para semanas e calculando a média dos valores
data_dados.resample('W').mean()
```
| github_jupyter |
# Builder Tutorial number 5
The builder tutorials demonstrate how to build an operational GSFLOW model using `pyGSFLOW` from shapefile, DEM, and other common data sources. These tutorials focus on the `gsflow.builder` classes.
## Building stream networks and cascade information for GSFLOW
In this tutorial, we demonstrate how to generate stream netwrok and cascade routing information for GSFLOW models. The streamflow connectivity and cascade connectivity is needed to define the streamflow network in modflow's SFR package and to define stream and cascade connectivity in PRMS.
```
import os
import shapefile
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import flopy
from gsflow.builder import GenerateFishnet
```
### The `FlowAccumulation` class (refresher)
The `FlowAccumulation` class performs many operations including generating flow direction arrays and flow accumulation arrays. This example notebook focuses is on the `flow_direction` and `flow_accumulation` methods of this class. Other methods are presented in following tutorials.
The `FlowAccumulation` class has 3 required parameters and 5 optional input parameters:
**REQUIRED Parameters**
- `data` : resampled dem data array of dimension nrow, ncol (matches modelgrid dimension)
- `xcenters` : a two dimensional array of x coordinate cell centers (dimension nrow, ncol)
- `ycenters` : a two dimensional array of y coordinate cell centers (dimension nrow, ncol)
**OPTIONAL Parameters**
- `acc_type` : flow accumlation type, currently only "d8" is supported
- `hru_type` : optional hru_type array where 0=inactive, 1=land, 2=lake, and 3=swale
- `closed_basin` : If true hru_type 2 is used in the flow direction calculations. False ignores hru_type 2. Default is False.
- `flow_dir_array` : previously calculated flow direction array. This parameter is used to restart the class without performing flow direction analysis
- `verbose` : boolean flag to print verbose output
Let's start with importing the class.
```
from gsflow.builder import FlowAccumulation
```
## Applying the methods to the Sagehen 50m example problem
In this example the methods are applied directly to the Sagehen 50m model as they are presented.
```
# define the input and output data paths
input_ws = os.path.join("data", "sagehen", "50m_tutorials")
shp_ws = os.path.join("data", "geospatial")
output_ws = os.path.join("data", "temp")
# define the modelgrid and resampled DEM data paths
mg_file = os.path.join(input_ws, "sagehen_50m_grid.bin")
dem_data = os.path.join(input_ws, "sagehen_50m_dem.txt")
# define the flow direction and flow accumulation data paths
flowdir_file = os.path.join(input_ws, "sagehen_50m_flowdir.txt")
flowacc_file = os.path.join(input_ws, "sagehen_50m_flowacc.txt")
watershed_file = os.path.join(input_ws, "sagehen_50m_watershed.txt")
# shapefile pour point
shp_file = os.path.join(shp_ws, "model_points.shp")
```
Load the previously processed data
```
# load modelgrid, dem, flow directions, and flow accumulation
modelgrid = GenerateFishnet.load_from_file(mg_file)
dem_data = np.genfromtxt(dem_data)
flow_directions = np.genfromtxt(flowdir_file, dtype=float)
flow_accumulation = np.genfromtxt(flowacc_file)
watershed = np.genfromtxt(watershed_file, dtype=int)
```
### Restarting the `FlowAccumulation` class from existing data
In this tutorial series, the flow direction and flow accumulation calculations were performed in the previous builder tutorial. Instead of re-running these calculations, which can be time consuming for very large models, we can provide the saved flow direction array to the class as a way of restarting the solution.
To restart from the previous solution, the saved flow direction array is passed to the `flow_dir_array` parameter during instantiation as shown in this example. For sagehen 50m the `watershed` variable is also passed to the `hru_type` array to define active and inactive cells for stream network and cascade generation.
```
fa = FlowAccumulation(
dem_data,
modelgrid.xcellcenters,
modelgrid.ycellcenters,
hru_type=watershed,
flow_dir_array=flow_directions,
verbose=True
)
```
Now the `FlowAccumulation` object is ready to create stream network information and cascade information.
### Stream network creation
The stream network creation routine uses information from the stored flow direction and flow accumulation arrays to define stream cells. The user provides a contributing area threshold (represented by number of cells) that define a stream cell. If the flow accumulation array value is greater than the threshold, a given cell will be classified as a stream cell.
The `make_streams` method performs routines to classify and route streams and has the following parameters:
**REQUIRED parameters**
- `fdir_array` : flow direction array
- `fa_array` : flow accumulation array
- `threshold` : accumulated area threshold, represented as number of cells, for defining streams
**OPTIONAL parameters**
- `min_stream_length` : optional minimum stream length in number of cells
- `max_reach` : optional maximum number of reach cells per stream segment
- `default_slope` : optional default value for stream slope (defaults to 0.001), value is applied if slope cannot be calculated by cell center information.
- `min_slope` : optional minimum value for stream slope (defaults to 0.0001)
- `max_slope` : optional maximum value for stream slope (defaults to 1)
This example shows how to run `make_streams` using the required parameters
```
# create our threshold
threshold_m2 = 810000
cellsize = 50
threshold = threshold_m2 / (cellsize ** 2)
# run make streams
strm_obj = fa.make_streams(
flow_directions,
flow_accumulation,
threshold
)
```
**Data that's returned by `make_streams`**
The `make_streams` method returns a `_StreamsObj` that contains the following information as attributes:
- `iseg` : an array of stream segment numbers
- `ireach` : an array of stream reach numbers
- `outseg` : an array of output segment connectivity
- `irunbnd` : an array of land surface runoff routing to stream segments for UZF
- `sfrtop` : an array of stream top elevation
- `rchlen` : an array of reach length information
- `slope` : an array of reach slopes
- `aspect` : an array of cell aspects
- `reach_data` : a ModflowSfr2 compatible recarray of the `reach_data` parameter
- `segment_data` : a ModflowSfr2 compatible recarray of the ` segment_data` parameter
Let's inpect the `iseg` array:
```
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax)
# plot the watershed boundary on top
ib = pmv.plot_ibound(ibound=watershed)
pc = pmv.plot_array(strm_obj.iseg, masked_values=[0,])
plt.colorbar(pc, shrink=0.7)
plt.title("Sagehen 50m stream segments");
```
### Cascade routing
Cascade routing calculates the connectivity of each hyrologic response unit in PRMS and connects this information to PRMS stream cells. The cascade routing calculation method is based on the Number of Input Drainage Paths (NIDP) method and allows for many cells to flow to a single cell, but a single cell can only flow downgradient to one neighbor.
The `get_cascades` method performs the cascade routing calculations and has the following parameters:
**REQUIRED parameters**
- `streams` : Stream information object that is returned from the `make_streams()` method
**OPTIONAL parameters (required if watershed delineation has not been performed in the current script!)**
- `pour_point` : three seperate input methods can be used to define the pour point as described below
- list of [(xcoordinate, ycoordinate)] location that define the pour point
- list of [(row, column)] location that define the pour point
- shapefile name, file contains a single pour point that defines the basin outlet
- `modelgrid` : modelgrid instance from `GenerateFishnet` (flopy.discretization.StructuredGrid object)
- `fmt` : format of pour point input ("xy" for xy coordinates, "rowcol" for row column, "shp" for shapefile)
This example demonstrates how to create cascades when watershed delineation has not been performed in the current script.
```
# read in our pour point from a shapefile as an xy coordinate
with shapefile.Reader(shp_file) as r:
shape = r.shape(0)
pour_point = shape.points
print(pour_point)
```
Now that the pour point is loaded cascade routing information can be calculated
```
cascades = fa.get_cascades(
strm_obj,
pour_point,
modelgrid,
fmt="xy"
)
```
**Data returned by the `get_cascades` method**
The `get_cascades` method returns a `_Cascades` object that contains the following information as attributes:
- `ncascade` : number of PRMS cascades
- `hru_up_id` : numpy array of hru_up_id information for the hru_up_id parameter in PRMS
- `hru_down_id` : numpy array of hru_down_id information for the hru_down_id parameter in PRMS
- `hru_pct_up` : numpy array of hru_pct_up information for the hru_pct_up parameter in PRMS
- `hru_strmseg_down_id` : numpy array of hru_strmseg_down_id for the hru_strmseg_down_id parameter in PRMS
Each of these data sets can be accessed using the following convention:
```
# get ncascades
print(cascades.ncascade)
```
## Saving the stream information and cascade routing information for later use
The builder methods allow the user to save the stream information and cascade routing information as binary objects. This allows the user to pick up where they left off in another session or script.
These objects can be written to binary file by using the `write()` built in method.
*In the next tutorial we will load this information and begin building model files*
```
strm_obj.write(os.path.join(output_ws, "sagehen_50m_streams.bin"))
cascades.write(os.path.join(output_ws, "sagehen_50m_cascades.bin"))
```
| github_jupyter |
```
%matplotlib notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
## Rationale
For some certain loss functions, such the the negative evidence lower bound (NELBO) in variational inference, they are generally analytically intractable and thus unavailable in closed-form. As such, we might need to resort to taking stochastic estimates of the loss function. In these situations, it is very important to study and understand the robustness of the estimations we are making, particularly in terms of bias and variance. When proposing a new estimator, we may be interested in evaluating the loss at a fined-grained level - not only per batch, but perhaps even per data-point.
This notebook explores storing the recorded losses in Pandas Dataframes. The recorded losses are 3d, with dimensions corresponding to epochs, batches, and data-points. Specifically, they are of shape `(n_epochs, n_batches, batch_size)`. Instead of using the deprecated Panel functionality from Pandas, we explore the preferred MultiIndex Dataframe.
Lastly, we play around with various data serialization formats supported out-of-the-box by Pandas. This might be useful if the training is GPU-intensive, so the script runs and records the loss remotely on a supercomputer, and we must write the results to file, download them and finally analyze them locally. This is usually trivial, but it is unclear what the behaviour is for more complex MultiIndex dataframes. We restrict our attention to the CSV format, which is human-friendly but very slow and inefficient, and the HDF5, which is basically diametrically opposed - it's basically completely inscrutable, but is very fast and takes up laess space.
### Synthetic Data
```
# create some noise
a = np.random.randn(50, 600, 100)
a.shape
# create some noise with higher variance and add bias.
b = 2. * np.random.randn(*a.shape) + 1.
b.shape
# manufacture some loss function
# there are n_epochs * n_batchs * batch_size
# recorded values of the loss
loss = 10 / np.linspace(1, 100, a.size)
loss.shape
```
### MultiIndex Dataframe
```
# we will create the indices from the
# product of these iterators
list(map(range, a.shape))
# create the MultiIndex
index = pd.MultiIndex.from_product(
list(map(range, a.shape)),
names=['epoch', 'batch', 'datapoint']
)
# create the dataframe that records the two losses
df = pd.DataFrame(
dict(loss1=loss+np.ravel(a),
loss2=loss+np.ravel(b)),
index=index
)
df
```
### Visualization
In this contrived scenario, `loss2` is more biased and has higher variance.
```
# some basic plotting
fig, ax = plt.subplots()
df.groupby(['epoch', 'batch']).mean().plot(ax=ax)
plt.show()
```
### CSV Read/Write
```
%%time
df.to_csv('losses.csv')
!ls -lh losses.csv
%%time
df_from_csv = pd.read_csv('losses.csv', index_col=['epoch', 'batch', 'datapoint'], float_precision='high')
# does not recover exactly due to insufficient floating point precision
df_from_csv.equals(df)
# but it has recovered it up to some tiny epsilon
((df-df_from_csv)**2 < 1e-25).all()
```
### HDF5 Read/Write
HDF5 writing is orders of magnitude faster.
```
%%time
df.to_hdf('store.h5', key='losses')
```
Furthermore, the file sizes are significantly smaller.
```
!ls -lh store.h5
%%time
df_from_hdf = pd.read_hdf('store.h5', key='losses')
```
Lastly, it is far more numerical precise.
```
df.equals(df_from_hdf)
```
| github_jupyter |
# How to create and run a gap-filled FBA from PATRIC
The PATRIC (the Pathosystems Resource Integration Center) contains the best collection of well annotated genomes. They also happen to have been annotated by RAST, and so we should be able to use those integrations directly.
Here we'll walk through taking a genome from PATRIC, building a model, and running it. PATRIC also has model reconstruction built in, but when I tried it (05/24/16) it was not working.
As usual, we'll start by loading some modules that we'll need for our analysis.
```
import sys
import os
import copy
import PyFBA
import re
import inspect
inspect.getfile(PyFBA)
```
# Find a genome and download the annotations
You need to find your genome in PATRIC and download the annotations.
Once you have identified the genome you would like to build the model for, choose _Feature Table_ from the menu bar:
<img src="img/patric_ft.png">
Next, choose _Download_ and save as a _text file (.txt)_.
<img src="img/patric_dl.png">
That will save a file called _FeatureTable.txt_ to your Downloads location. That file has the following columns:
| Genome | Genome ID | Accession | PATRIC ID | RefSeq Locus Tag | Alt Locus Tag | Feature ID |
| Annotation | Feature Type | Start | End | Length | Strand | FIGfam ID |
| PATRIC genus-specific families (PLfams) | PATRIC cross-genus families (PGfams) | Protein ID | AA Length | Gene Symbol | Product | GO
The key columns are PATRIC ID (Column 3) and Product (Column 19) [Column numbers are 0 based!]
Now that we know that, we need to convert these feature names into functional roles. The key here is to split on adjoiners, such as ' / ', ' # ', and ' @ '.
```
assigned_functions = {}
with open(os.path.join('workspace/Citrobacter_sedlakii_genome_features.txt'), 'r') as f:
for l in f:
p=l.strip().split("\t")
assigned_functions[p[3]]=PyFBA.parse.roles_of_function(p[19])
roles = set([i[0] for i in [list(j) for j in assigned_functions.values()]])
print("There are {} unique roles in this genome".format(len(roles)))
```
Next, we convert those roles to reactions. We start with a dict of roles and reactions, but we only need a list of unique reactions, so we convert the keys to a set.
```
roles_to_reactions = PyFBA.filters.roles_to_reactions(roles, organism_type="Gram_Negative", verbose=False)
```
If you toggle `verbose=True`, you will see that there are a lot of roles that we skip, even though we have an EC number for them: for whatever reason, the annotation is not quite right. We can check for those too, because our model seed parsed data has EC numbers with reactions.
```
# ecr2r = PyFBA.filters.roles_to_ec_reactions(roles, organism_type="Gram_Negative", verbose=False)
ecr2r = set()
```
We combine `roles_to_reactions` and `ecr2r` and figure out what the unique set of reactions is for our genome.
```
roles_to_reactions.update(ecr2r)
reactions_to_run = set()
for role in roles_to_reactions:
reactions_to_run.update(roles_to_reactions[role])
print("There are {}".format(len(reactions_to_run)) +
" unique reactions associated with this genome".format(len(reactions_to_run)))
```
### Read all the reactions and compounds in our database
We read all the reactions, compounds, and enzymes in the [ModelSEEDDatabase](https://github.com/ModelSEED/ModelSEEDDatabase) into three data structures. Note, the first time you call this it is a bit slow as it has to parse the files, but if we've parsed them once, we don't need to do it again!
We modify the reactions specifically for Gram negative models (there are also options for Gram positive models, Mycobacterial models, general microbial models, and plant models).
```
compounds, reactions, enzymes = \
PyFBA.parse.model_seed.compounds_reactions_enzymes('gramnegative')
print(f"There are {len(compounds):,} compounds, {len(reactions):,} reactions, and {len(enzymes):,} enzymes in total")
for r in reactions:
for c in reactions[r].all_compounds():
if c.uptake_secretion:
print(f"US: {c}")
```
#### Update reactions to run, making sure that all reactions are in the list!
There are some reactions that come from functional roles that do not appear in the reactions list. We're working on tracking these down, but for now we just check that all reaction IDs in *reactions_to_run* are in *reactions*, too.
```
tempset = set()
for r in reactions_to_run:
if r in reactions:
tempset.add(r)
else:
sys.stderr.write("Reaction ID {} is not in our reactions list. Skipped\n".format(r))
reactions_to_run = tempset
```
### Test whether these reactions grow on ArgonneLB media
We can test whether this set of reactions grows on ArgonneLB media. The media is the same one we used above, and you can download the [ArgonneLB.txt](https://raw.githubusercontent.com/linsalrob/PyFBA/master/media/ArgonneLB.txt) and text file and put it in the same directory as this iPython notebook to run it.
(Note: we don't need to convert the media components, because the media and compounds come from the same source.)
```
media = PyFBA.parse.read_media_file("/home/redwards/test_media/ArgonneLB.txt")
print("Our media has {} components".format(len(media)))
```
### Define a biomass equation
The biomass equation is the part that says whether the model will grow! This is a [metabolism.reaction.Reaction](https://github.com/linsalrob/PyFBA/blob/master/PyFBA/metabolism/reaction.py) object.
```
biomass_equation = PyFBA.metabolism.biomass_equation()
biomass_equation.equation
with open('rbad.txt', 'w') as out:
for r in reactions_to_run:
out.write(f"{r}\n")
```
### Run the FBA
With the reactions, compounds, reactions_to_run, media, and biomass model, we can test whether the model grows on this media.
```
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
print(f"There are {len(reactions_to_run)} reactions to run")
upsr = 0
for r in reactions_to_run:
if r.startswith('upsr'):
upsr += 1
print(f"There are {upsr} uptake secretion reactions in reactions_to_run")
upsr = 0
for r in reactions:
if r.startswith('upsr'):
upsr += 1
print(f"There are {upsr} uptake secretion reactions in reactions")
```
# Will gap filling work?
These are the reactions from the C. sedlakii SBML file, and so if we add these, we should get growth!
```
sbml_addnl = {'rxn00868', 'rxn01923', 'rxn02268', 'rxn10215', 'rxn10219', 'rxn08089', 'rxn10212', 'rxn08083', 'rxn10214', 'rxn10211', 'rxn10218', 'rxn08086', 'rxn10217', 'rxn08087', 'rxn08088', 'rxn08085', 'rxn10216', 'rxn08084', 'rxn10213', 'rxn05572', 'rxn05565', 'rxn00541', 'rxn10155', 'rxn10157', 'rxn05536', 'rxn05544', 'rxn12848', 'rxn12851', 'rxn05539', 'rxn05541', 'rxn05537', 'rxn05543', 'rxn12849', 'rxn05533', 'rxn05540', 'rxn05534', 'rxn05547', 'rxn05546', 'rxn05542', 'rxn05535', 'rxn12850', 'rxn05545', 'rxn05538', 'rxn05168', 'rxn05179', 'rxn05161', 'rxn03061', 'rxn09313', 'rxn08354', 'rxn08356', 'rxn09315', 'rxn05549', 'rxn05160', 'rxn05644', 'rxn05330', 'rxn05335', 'rxn05334', 'rxn05329', 'rxn05333', 'rxn05332', 'rxn05331', 'rxn05415', 'rxn05381', 'rxn05386', 'rxn05427', 'rxn05431', 'rxn05373', 'rxn05377', 'rxn05398', 'rxn05419', 'rxn05402', 'rxn05369', 'rxn05361', 'rxn05394', 'rxn05406', 'rxn05365', 'rxn05390', 'rxn05423', 'rxn05462', 'rxn05411', 'rxn03492', 'rxn04050', 'rxn08258', 'rxn04713', 'rxn00990', 'rxn00875', 'rxn08471', 'rxn05737', 'rxn08467', 'rxn10067', 'rxn08468', 'rxn08469', 'rxn08470', 'rxn02160', 'rxn05422', 'rxn05372', 'rxn05341', 'rxn05376', 'rxn05342', 'rxn05337', 'rxn05385', 'rxn05397', 'rxn05340', 'rxn05461', 'rxn05368', 'rxn05418', 'rxn05393', 'rxn05336', 'rxn05426', 'rxn05364', 'rxn05430', 'rxn05410', 'rxn05339', 'rxn05401', 'rxn05338', 'rxn05360', 'rxn05414', 'rxn05405', 'rxn05389', 'rxn05380', 'rxn03164', 'rxn05229', 'rxn07586', 'rxn05054', 'rxn04384', 'rxn00503', 'rxn00183', 'rxn05187', 'rxn05515', 'rxn02056', 'rxn09134', 'rxn09125', 'rxn09157', 'rxn09128', 'rxn09142', 'rxn09161', 'rxn09147', 'rxn09164', 'rxn09152', 'rxn09124', 'rxn09131', 'rxn09133', 'rxn09138', 'rxn09143', 'rxn09153', 'rxn09160', 'rxn09158', 'rxn09148', 'rxn09144', 'rxn09150', 'rxn09130', 'rxn09149', 'rxn09163', 'rxn09159', 'rxn09132', 'rxn09127', 'rxn09140', 'rxn09145', 'rxn09137', 'rxn09154', 'rxn09151', 'rxn09146', 'rxn09123', 'rxn09139', 'rxn09126', 'rxn09141', 'rxn09135', 'rxn09136', 'rxn09155', 'rxn09162', 'rxn09129', 'rxn09156', 'rxn02949', 'rxn03241', 'rxn03245', 'rxn02911', 'rxn02167', 'rxn03250', 'rxn02934', 'rxn03240', 'rxn03247', 'rxn05316', 'rxn09687', 'rxn05198', 'rxn09688', 'rxn05199', 'rxn05200', 'rxn09685', 'rxn05318', 'rxn05205', 'rxn05621', 'rxn05656', 'rxn05585', 'rxn05172', 'rxn05594', 'rxn05552', 'rxn05599', 'rxn05512', 'rxn05620', 'rxn01277', 'rxn05518', 'rxn05145', 'rxn05460', 'rxn05396', 'rxn05363', 'rxn05359', 'rxn05367', 'rxn05417', 'rxn05421', 'rxn05392', 'rxn05413', 'rxn05349', 'rxn05388', 'rxn05429', 'rxn05371', 'rxn05400', 'rxn05425', 'rxn05409', 'rxn05404', 'rxn05375', 'rxn05379', 'rxn05384', 'rxn04139', 'rxn00640', 'rxn05507', 'rxn05506', 'rxn01893', 'rxn00671', 'rxn00501', 'rxn10340', 'rxn10334', 'rxn10337', 'rxn10338', 'rxn10341', 'rxn10335', 'rxn10342', 'rxn10339', 'rxn10336', 'rxn00160', 'rxn01285', 'rxn04143', 'rxn01847', 'rxn01103', 'rxn00227', 'rxn05175', 'rxn05163', 'rxn05958', 'rxn05683', 'rxn05484', 'rxn02933', 'rxn04750', 'rxn03244', 'rxn01451', 'rxn03239', 'rxn03246', 'rxn03242', 'rxn03249', 'rxn06777', 'rxn05500', 'rxn01637', 'rxn01122', 'rxn04602', 'rxn02416', 'rxn04601', 'rxn04928', 'rxn05596', 'rxn02775', 'rxn04046', 'rxn07589', 'rxn03491', 'rxn10117', 'rxn10119', 'rxn08333', 'rxn04673', 'rxn10308', 'rxn10311', 'rxn10315', 'rxn10309', 'rxn10307', 'rxn10312', 'rxn10310', 'rxn10314', 'rxn08040', 'rxn10313', 'rxn12147', 'rxn03931', 'rxn03916', 'rxn04674', 'rxn03397', 'rxn10094', 'rxn02286', 'rxn00555', 'rxn08709', 'rxn04052', 'rxn03512', 'rxn04045', 'rxn12224', 'rxn09188', 'rxn02359', 'rxn02008', 'rxn03643', 'rxn09177', 'rxn12512', 'rxn07587', 'rxn02507', 'rxn05202', 'rxn08291', 'rxn06865', 'rxn00303', 'rxn00222', 'rxn09978', 'rxn09979', 'rxn07588', 'rxn03919', 'rxn03435', 'rxn02187', 'rxn02186', 'rxn03436', 'rxn03068', 'rxn05317', 'rxn01219', 'rxn00364', 'rxn03514', 'rxn04048', 'rxn02792', 'rxn00350', 'rxn02791', 'rxn00171', 'rxn01000', 'rxn00675', 'rxn00175', 'rxn00986', 'rxn03932', 'rxn08712', 'rxn04113', 'rxn04996', 'rxn08756', 'rxn08352', 'rxn06023', 'rxn03136', 'rxn00800', 'rxn05165', 'rxn05181', 'rxn08194', 'rxn09180', 'rxn00670', 'rxn00173', 'rxn03644', 'rxn08619', 'rxn09289', 'rxn00776', 'rxn01360', 'rxn08335', 'rxn08336', 'rxn12500', 'rxn02287', 'rxn02774', 'rxn09167', 'rxn08708', 'rxn05156', 'rxn05151', 'rxn01629', 'rxn12146', 'rxn01123', 'rxn05147', 'rxn05173', 'rxn08707', 'rxn00927', 'rxn01299', 'rxn01226', 'rxn01545', 'rxn02476', 'rxn02011', 'rxn05201', 'rxn01895', 'rxn04604', 'rxn00830', 'rxn01403', 'rxn00179', 'rxn03991', 'rxn03990', 'rxn03975', 'rxn03974', 'rxn00818', 'rxn03838', 'rxn00817', 'rxn02596', 'rxn05555', 'rxn00056', 'rxn00212', 'rxn06979', 'rxn11544', 'rxn03918', 'rxn05559', 'rxn08345', 'rxn00509', 'rxn00006', 'rxn00834', 'rxn05293', 'rxn00634', 'rxn08618', 'rxn06848', 'rxn09997', 'rxn05938', 'rxn04783', 'rxn05206', 'rxn00102', 'rxn05937', 'rxn01644', 'rxn02938', 'rxn00792', 'rxn08711', 'rxn03513', 'rxn04047', 'rxn01265', 'rxn03394', 'rxn00777', 'rxn01106', 'rxn07492', 'rxn03538', 'rxn01480', 'rxn00119', 'rxn01517', 'rxn01966', 'rxn01132', 'rxn05162', 'rxn02277', 'rxn08257', 'rxn01352', 'rxn03540', 'rxn00789', 'rxn00508', 'rxn04386', 'rxn10481', 'rxn05528', 'rxn06077', 'rxn01671', 'rxn02929', 'rxn03917', 'rxn03135', 'rxn00469', 'rxn00791', 'rxn00756', 'rxn03087', 'rxn01329', 'rxn01917', 'rxn01879', 'rxn02285', 'rxn08710', 'rxn07438', 'rxn02321', 'rxn00787', 'rxn01289', 'rxn00851', 'rxn05297', 'rxn00062', 'rxn04132', 'rxn04133', 'rxn05319', 'rxn05467', 'rxn05468', 'rxn02374', 'rxn03012', 'rxn05064', 'rxn02666', 'rxn04457', 'rxn04456', 'rxn01664', 'rxn02916', 'rxn05667', 'rxn10571', 'rxn05195', 'rxn05645', 'rxn05144', 'rxn02988', 'rxn01256', 'rxn12604', 'rxn05039', 'rxn10904', 'rxn05499', 'rxn01152', 'rxn05691', 'rxn12893', 'rxn11116', 'rxn00880', 'rxn05593', 'rxn05469', 'rxn00186', 'rxn05694', 'rxn05491', 'rxn05682', 'rxn01748', 'rxn00327', 'rxn01746', 'rxn09656'}
r2r_plussbml = copy.copy(reactions_to_run)
print(f"Before adding sbml reactions there were {len(r2r_plussbml)}")
r2r_plussbml.update(sbml_addnl)
print(f"After adding sbml reactions there were {len(r2r_plussbml)}")
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, r2r_plussbml,
media, biomass_equation, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
print(f"Before adding upsr reactions there were {len(r2r_plussbml)} reactions")
for r in reactions:
if r.startswith('upsr'):
r2r_plussbml.update({r})
print(f"After adding upsr reactions there were {len(r2r_plussbml)} reactions")
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, r2r_plussbml,
media, biomass_equation, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
# seems like we need EX_cpd00034
upsr = 0
for r in reactions_to_run:
if r.startswith('EX'):
upsr += 1
print(f"There are {upsr} EX reactions in reactions_to_run")
upsr = 0
for r in reactions:
if r.startswith('EX'):
upsr += 1
print(f"There are {upsr} EX reactions in reactions")
biomass_equation = PyFBA.metabolism.biomass_equation('standard')
biomass_equation.equation
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, r2r_plussbml,
media, biomass_equation, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
uptake_secretion_reactions
all_compounds = compounds
# Filter for compounds that are boundary compounds
filtered_compounds = set()
for c in all_compounds:
if not compounds[c].uptake_secretion:
filtered_compounds.add(c)
print(f"There are {len(all_compounds)} total compounds and {len(filtered_compounds)} filtered compounds")
without_ex = set()
with open('rwex.txt', 'r') as fin:
for l in fin:
l = l.strip()
without_ex.add(l)
without_ex
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, without_ex,
media, biomass_equation, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
len(without_ex)
len(reactions_to_run)
```
# it is the biomass model that is the problem
Lets take the biomass model from the SBML and see if this work.
```
sbml_equation = '(0.00778132482043096) cpd00063: Ca2 (location: c) + (0.352889948968272) cpd00156: L_Valine (location: e) + (0.00778132482043096) cpd00030: Mn2 (location: e) + (0.00778132482043096) cpd00205: K (location: c) + (0.428732289454499) cpd00035: L_Alanine (location: e) + (0.128039715997337) cpd00060: L_Methionine (location: e) + (0.15480760087483) cpd00066: L_Phenylalanine (location: c) + (0.00778132482043096) cpd00017: S_Adenosyl_L_methionine (location: c) + (0.00778132482043096) cpd00010: CoA (location: c) + (0.0609084652443221) cpd15665: Peptidoglycan_polymer_n_subunits (location: c) + (0.0841036156544863) cpd00052: CTP (location: c) + (0.00778132482043096) cpd10516: fe3 (location: e) + (0.01468498342018) cpd00357: TTP (location: c) + (0.00778132482043096) cpd00099: Cl_ (location: e) + (0.01468498342018) cpd00356: dCTP (location: c) + (0.00778132482043096) cpd10515: Fe2 (location: e) + (0.00778132482043096) cpd00254: Mg (location: c) + (0.242249358141304) cpd00322: L_Isoleucine (location: e) + (0.00778132482043096) cpd00058: Cu2 (location: e) + (0.00778132482043096) cpd00149: Co2 (location: c) + (0.201205267995816) cpd00041: L_Aspartate (location: e) + (1) cpd17043: RNA_transcription (location: c) + (0.219496655995436) cpd00023: L_Glutamate (location: e) + (0.219496655995436) cpd00053: L_Glutamine (location: e) + (0.376088782528765) cpd00107: L_Leucine (location: e) + (0.00778132482043096) cpd00220: Riboflavin (location: e) + (0.179790960093822) cpd00054: L_Serine (location: e) + (0.0472899299502361) cpd00065: L_Tryptophan (location: e) + (0.0609084652443221) cpd02229: Bactoprenyl_diphosphate (location: c) + (0.00778132482043096) cpd11493: ACP (location: c) + (1) cpd17041: Protein_biosynthesis (location: c) + (0.184698405654696) cpd00129: L_Proline (location: e) + (0.135406821203723) cpd00038: GTP (location: c) + (0.01468498342018) cpd00241: dGTP (location: c) + (1) cpd17042: DNA_replication (location: c) + (0.211466290532188) cpd00161: L_Threonine (location: e) + (40.1101757365074) cpd00002: ATP (location: c) + (0.00778132482043096) cpd00016: Pyridoxal_phosphate (location: c) + (0.00778132482043096) cpd00048: Sulfate (location: e) + (0.00778132482043096) cpd00003: NAD (location: c) + (0.01468498342018) cpd00115: dATP (location: c) + (0.115101904973216) cpd00069: L_Tyrosine (location: e) + (0.00778132482043096) cpd00015: FAD (location: c) + (0.201205267995816) cpd00132: L_Asparagine (location: e) + (0.00778132482043096) cpd00006: NADP (location: c) + (35.5386858537513) cpd00001: H2O (location: e) + (0.0762884719008526) cpd00084: L_Cysteine (location: c) + (0.0794113918032267) cpd00119: L_Histidine (location: e) + (0.285970236774541) cpd00039: L_Lysine (location: e) + (0.0908319049068452) cpd00062: UTP (location: c) + (0.00778132482043096) cpd00034: Zn2 (location: e) + (0.247156803702178) cpd00051: L_Arginine (location: e) + (0.510820469745475) cpd00033: Glycine (location: e) > (40) cpd00008: ADP (location: c) + (39.9922186751796) cpd00009: Phosphate (location: e) + (0.00778132482043096) cpd12370: apo_ACP (location: c) + (1) cpd11416: Biomass (location: c) + (40) cpd00067: H (location: e) + (0.0609084652443221) cpd15666: Peptidoglycan_polymer_n_1_subunits (location: c) + (0.405833094852252) cpd00012: PPi (location: e)'
sbml_left_compounds = {'cpd00066: L_Phenylalanine (location: c)' : 0.15480760087483, 'cpd00016: Pyridoxal_phosphate (location: c)' : 0.00778132482043096, 'cpd00132: L_Asparagine (location: e)' : 0.201205267995816, 'cpd00156: L_Valine (location: e)' : 0.352889948968272, 'cpd00099: Cl_ (location: e)' : 0.00778132482043096, 'cpd00038: GTP (location: c)' : 0.135406821203723, 'cpd00003: NAD (location: c)' : 0.00778132482043096, 'cpd17041: Protein_biosynthesis (location: c)' : 1.0, 'cpd00033: Glycine (location: e)' : 0.510820469745475, 'cpd00322: L_Isoleucine (location: e)' : 0.242249358141304, 'cpd00254: Mg (location: c)' : 0.00778132482043096, 'cpd17043: RNA_transcription (location: c)' : 1.0, 'cpd00048: Sulfate (location: e)' : 0.00778132482043096, 'cpd10515: Fe2 (location: e)' : 0.00778132482043096, 'cpd02229: Bactoprenyl_diphosphate (location: c)' : 0.0609084652443221, 'cpd11493: ACP (location: c)' : 0.00778132482043096, 'cpd00161: L_Threonine (location: e)' : 0.211466290532188, 'cpd00006: NADP (location: c)' : 0.00778132482043096, 'cpd00060: L_Methionine (location: e)' : 0.128039715997337, 'cpd00119: L_Histidine (location: e)' : 0.0794113918032267, 'cpd00052: CTP (location: c)' : 0.0841036156544863, 'cpd00051: L_Arginine (location: e)' : 0.247156803702178, 'cpd15665: Peptidoglycan_polymer_n_subunits (location: c)' : 0.0609084652443221, 'cpd00017: S_Adenosyl_L_methionine (location: c)' : 0.00778132482043096, 'cpd00030: Mn2 (location: e)' : 0.00778132482043096, 'cpd10516: fe3 (location: e)' : 0.00778132482043096, 'cpd00065: L_Tryptophan (location: e)' : 0.0472899299502361, 'cpd00084: L_Cysteine (location: c)' : 0.0762884719008526, 'cpd00023: L_Glutamate (location: e)' : 0.219496655995436, 'cpd17042: DNA_replication (location: c)' : 1.0, 'cpd00356: dCTP (location: c)' : 0.01468498342018, 'cpd00035: L_Alanine (location: e)' : 0.428732289454499, 'cpd00069: L_Tyrosine (location: e)' : 0.115101904973216, 'cpd00220: Riboflavin (location: e)' : 0.00778132482043096, 'cpd00129: L_Proline (location: e)' : 0.184698405654696, 'cpd00357: TTP (location: c)' : 0.01468498342018, 'cpd00205: K (location: c)' : 0.00778132482043096, 'cpd00149: Co2 (location: c)' : 0.00778132482043096, 'cpd00063: Ca2 (location: c)' : 0.00778132482043096, 'cpd00054: L_Serine (location: e)' : 0.179790960093822, 'cpd00001: H2O (location: e)' : 35.5386858537513, 'cpd00010: CoA (location: c)' : 0.00778132482043096, 'cpd00015: FAD (location: c)' : 0.00778132482043096, 'cpd00062: UTP (location: c)' : 0.0908319049068452, 'cpd00107: L_Leucine (location: e)' : 0.376088782528765, 'cpd00241: dGTP (location: c)' : 0.01468498342018, 'cpd00053: L_Glutamine (location: e)' : 0.219496655995436, 'cpd00039: L_Lysine (location: e)' : 0.285970236774541, 'cpd00034: Zn2 (location: e)' : 0.00778132482043096, 'cpd00058: Cu2 (location: e)' : 0.00778132482043096, 'cpd00002: ATP (location: c)' : 40.1101757365074, 'cpd00041: L_Aspartate (location: e)' : 0.201205267995816, 'cpd00115: dATP (location: c)' : 0.01468498342018}
sbml_right_compounds = {'cpd00067: H (location: e)' : 40.0, 'cpd00012: PPi (location: e)' : 0.405833094852252, 'cpd00008: ADP (location: c)' : 40.0, 'cpd11416: Biomass (location: c)' : 1.0, 'cpd12370: apo_ACP (location: c)' : 0.00778132482043096, 'cpd00009: Phosphate (location: e)' : 39.9922186751796, 'cpd15666: Peptidoglycan_polymer_n_1_subunits (location: c)' : 0.0609084652443221}
sbml_biomass = PyFBA.metabolism.Reaction('sbml_biomass', 'sbml_biomass')
sbml_biomass.equation = sbml_equation
parsecomp = re.compile('^(cpd\\d+): (.*?) \(location: (.)\)')
for c in sbml_left_compounds:
m = parsecomp.match(c)
if not m:
sys.stderr.write(f"Can't parse {c}\n")
if m.group(1) in compounds:
if False and compounds[m.group(1)] != m.group(2):
sys.stderr.write(f"We had |{compounds[m.group(1)]}| for {m.group(1)} in the SBML, but now have |{m.group(2)}|\n")
newcomp = PyFBA.metabolism.CompoundWithLocation.from_compound(compounds[m.group(1)], m.group(3))
sbml_biomass.add_left_compounds({newcomp})
sbml_biomass.set_left_compound_abundance(newcomp, sbml_left_compounds[c])
else:
print(f"{m.group(1)} not found")
for c in sbml_right_compounds:
m = parsecomp.match(c)
if not m:
sys.stderr.write(f"Can't parse {c}\n")
if m.group(1) in compounds:
if True and compounds[m.group(1)] != m.group(2):
sys.stderr.write(f"We had |{compounds[m.group(1)]}| for {m.group(1)} in the SBML, but now have |{m.group(2)}|\n")
newcomp = PyFBA.metabolism.CompoundWithLocation.from_compound(compounds[m.group(1)], m.group(3))
sbml_biomass.add_right_compounds({newcomp})
sbml_biomass.set_right_compound_abundance(newcomp, sbml_right_compounds[c])
else:
print(f"{m.group(1)} not found")
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, sbml_biomass, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
```
# Add the missing reactions
```
all_reactions = {'rxn00868', 'rxn01923', 'rxn02268', 'rxn10215', 'rxn10219', 'rxn08089', 'rxn10212', 'rxn08083', 'rxn10214', 'rxn10211', 'rxn10218', 'rxn08086', 'rxn10217', 'rxn08087', 'rxn08088', 'rxn08085', 'rxn10216', 'rxn08084', 'rxn10213', 'rxn05572', 'rxn05565', 'rxn00541', 'rxn10155', 'rxn10157', 'rxn05536', 'rxn05544', 'rxn12848', 'rxn12851', 'rxn05539', 'rxn05541', 'rxn05537', 'rxn05543', 'rxn12849', 'rxn05533', 'rxn05540', 'rxn05534', 'rxn05547', 'rxn05546', 'rxn05542', 'rxn05535', 'rxn12850', 'rxn05545', 'rxn05538', 'rxn05168', 'rxn05179', 'rxn05161', 'rxn09313', 'rxn08354', 'rxn08356', 'rxn09315', 'rxn05549', 'rxn05160', 'rxn05644', 'rxn05330', 'rxn05335', 'rxn05334', 'rxn05329', 'rxn05333', 'rxn05332', 'rxn05331', 'rxn05415', 'rxn05381', 'rxn05386', 'rxn05427', 'rxn05431', 'rxn05373', 'rxn05377', 'rxn05398', 'rxn05419', 'rxn05402', 'rxn05369', 'rxn05361', 'rxn05394', 'rxn05406', 'rxn05365', 'rxn05390', 'rxn05423', 'rxn05462', 'rxn05411', 'rxn03492', 'rxn04050', 'rxn08258', 'rxn04713', 'rxn00990', 'rxn00875', 'rxn08471', 'rxn05737', 'rxn08467', 'rxn10067', 'rxn08468', 'rxn08469', 'rxn08470', 'rxn01302', 'rxn01301', 'rxn05422', 'rxn05372', 'rxn05341', 'rxn05376', 'rxn05342', 'rxn05337', 'rxn05385', 'rxn05397', 'rxn05340', 'rxn05461', 'rxn05368', 'rxn05418', 'rxn05393', 'rxn05336', 'rxn05426', 'rxn05364', 'rxn05430', 'rxn05410', 'rxn05339', 'rxn05401', 'rxn05338', 'rxn05360', 'rxn05414', 'rxn05405', 'rxn05389', 'rxn05380', 'rxn03164', 'rxn05229', 'rxn07586', 'rxn05054', 'rxn04384', 'rxn00503', 'rxn00183', 'rxn05187', 'rxn05515', 'rxn02056', 'rxn09134', 'rxn09125', 'rxn09157', 'rxn09128', 'rxn09142', 'rxn09161', 'rxn09147', 'rxn09164', 'rxn09152', 'rxn09124', 'rxn09131', 'rxn09133', 'rxn09138', 'rxn09143', 'rxn09153', 'rxn09160', 'rxn09158', 'rxn09148', 'rxn09144', 'rxn09150', 'rxn09130', 'rxn09149', 'rxn09163', 'rxn09159', 'rxn09132', 'rxn09127', 'rxn09140', 'rxn09145', 'rxn09137', 'rxn09154', 'rxn09151', 'rxn09146', 'rxn09123', 'rxn09139', 'rxn09126', 'rxn09141', 'rxn09135', 'rxn09136', 'rxn09155', 'rxn09162', 'rxn09129', 'rxn09156', 'rxn02949', 'rxn03241', 'rxn03245', 'rxn02911', 'rxn02167', 'rxn03250', 'rxn02934', 'rxn03240', 'rxn03247', 'rxn05316', 'rxn09687', 'rxn05198', 'rxn09688', 'rxn05199', 'rxn05200', 'rxn09685', 'rxn05318', 'rxn05205', 'rxn05621', 'rxn05656', 'rxn05585', 'rxn05172', 'rxn05594', 'rxn05552', 'rxn05599', 'rxn05512', 'rxn05620', 'rxn01277', 'rxn05518', 'rxn05145', 'rxn05460', 'rxn05396', 'rxn05363', 'rxn05359', 'rxn05367', 'rxn05417', 'rxn05421', 'rxn05392', 'rxn05413', 'rxn05349', 'rxn05388', 'rxn05429', 'rxn05371', 'rxn05400', 'rxn05425', 'rxn05409', 'rxn05404', 'rxn05375', 'rxn05379', 'rxn05384', 'rxn04139', 'rxn00640', 'rxn05507', 'rxn05506', 'rxn01893', 'rxn00671', 'rxn00501', 'rxn10340', 'rxn10334', 'rxn10337', 'rxn10338', 'rxn10341', 'rxn10335', 'rxn10342', 'rxn10339', 'rxn10336', 'rxn00160', 'rxn01285', 'rxn04143', 'rxn01847', 'rxn01103', 'rxn00227', 'rxn05175', 'rxn05163', 'rxn05683', 'rxn05484', 'rxn02933', 'rxn04750', 'rxn03244', 'rxn01451', 'rxn03239', 'rxn03246', 'rxn03242', 'rxn03249', 'rxn06777', 'rxn05500', 'rxn01637', 'rxn01122', 'rxn04602', 'rxn02416', 'rxn04601', 'rxn04928', 'rxn05596', 'rxn02762', 'rxn02521', 'rxn02522', 'rxn03483', 'rxn02775', 'rxn04046', 'rxn07589', 'rxn03491', 'rxn10117', 'rxn10119', 'rxn08333', 'rxn04673', 'rxn10308', 'rxn10311', 'rxn10315', 'rxn10309', 'rxn10307', 'rxn10312', 'rxn10310', 'rxn10314', 'rxn08040', 'rxn10313', 'rxn12147', 'rxn03931', 'rxn03916', 'rxn04674', 'rxn03397', 'rxn10094', 'rxn02286', 'rxn02474', 'rxn00555', 'rxn08709', 'rxn04052', 'rxn03512', 'rxn12224', 'rxn09188', 'rxn02359', 'rxn02008', 'rxn08179', 'rxn08178', 'rxn03643', 'rxn09177', 'rxn12512', 'rxn07587', 'rxn02507', 'rxn08291', 'rxn06865', 'rxn00303', 'rxn00222', 'rxn09978', 'rxn09979', 'rxn07588', 'rxn04413', 'rxn03537', 'rxn03536', 'rxn03919', 'rxn03435', 'rxn02187', 'rxn02186', 'rxn03436', 'rxn03068', 'rxn05317', 'rxn01219', 'rxn00364', 'rxn03514', 'rxn04048', 'rxn00544', 'rxn02792', 'rxn00350', 'rxn02791', 'rxn05221', 'rxn00675', 'rxn00175', 'rxn00986', 'rxn01507', 'rxn02400', 'rxn01670', 'rxn00363', 'rxn00708', 'rxn01218', 'rxn01521', 'rxn01445', 'rxn00913', 'rxn01145', 'rxn00132', 'rxn01961', 'rxn00831', 'rxn08712', 'rxn04113', 'rxn04996', 'rxn08756', 'rxn08352', 'rxn06023', 'rxn02449', 'rxn05165', 'rxn05181', 'rxn08194', 'rxn01093', 'rxn09180', 'rxn03644', 'rxn08619', 'rxn09289', 'rxn00776', 'rxn01360', 'rxn08335', 'rxn08336', 'rxn12500', 'rxn02287', 'rxn02774', 'rxn09167', 'rxn08708', 'rxn05156', 'rxn05151', 'rxn01629', 'rxn12146', 'rxn01123', 'rxn05147', 'rxn05173', 'rxn08707', 'rxn00927', 'rxn01299', 'rxn01226', 'rxn01545', 'rxn02476', 'rxn02011', 'rxn05201', 'rxn01895', 'rxn04604', 'rxn00830', 'rxn00179', 'rxn03991', 'rxn03990', 'rxn03975', 'rxn03974', 'rxn00818', 'rxn03838', 'rxn00817', 'rxn02596', 'rxn05555', 'rxn00056', 'rxn06979', 'rxn11544', 'rxn03918', 'rxn05559', 'rxn08345', 'rxn00509', 'rxn00205', 'rxn00006', 'rxn02473', 'rxn00834', 'rxn05293', 'rxn00105', 'rxn00634', 'rxn08618', 'rxn06848', 'rxn09997', 'rxn05938', 'rxn04783', 'rxn05206', 'rxn00102', 'rxn01644', 'rxn02938', 'rxn00792', 'rxn08711', 'rxn03513', 'rxn04047', 'rxn01265', 'rxn01404', 'rxn03394', 'rxn00777', 'rxn01106', 'rxn07492', 'rxn03538', 'rxn01480', 'rxn00119', 'rxn01517', 'rxn01966', 'rxn01132', 'rxn05162', 'rxn02277', 'rxn08257', 'rxn05197', 'rxn01352', 'rxn03540', 'rxn00789', 'rxn00508', 'rxn04386', 'rxn10481', 'rxn05528', 'rxn06077', 'rxn01671', 'rxn02929', 'rxn03917', 'rxn03135', 'rxn00469', 'rxn00756', 'rxn03087', 'rxn01329', 'rxn01917', 'rxn01879', 'rxn01538', 'rxn02285', 'rxn08710', 'rxn07438', 'rxn02321', 'rxn00787', 'rxn01289', 'rxn00851', 'rxn05297', 'rxn00062', 'rxn04132', 'rxn04133', 'rxn05319', 'rxn05467', 'rxn05468', 'rxn02374', 'rxn03012', 'rxn05064', 'rxn02666', 'rxn04457', 'rxn04456', 'rxn01664', 'rxn02916', 'rxn05667', 'rxn10571', 'rxn05195', 'rxn05645', 'rxn05144', 'rxn02988', 'rxn01256', 'rxn12604', 'rxn05039', 'rxn10904', 'rxn05499', 'rxn01152', 'rxn05691', 'rxn12893', 'rxn11116', 'rxn00880', 'rxn05593', 'rxn05469', 'rxn00186', 'rxn05694', 'rxn05491', 'rxn05682', 'rxn01748', 'rxn00327', 'rxn01746', 'rxn09656'}
print(f"Before updating there are {len(reactions_to_run)} reactions")
r2ra = copy.copy(reactions_to_run)
r2ra.update(all_reactions)
print(f"After updating there are {len(r2ra)} reactions")
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, sbml_biomass, verbose=True)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
new_reactions = PyFBA.gapfill.suggest_from_media(compounds, reactions,
reactions_to_run, media, verbose=False)
print(f"There are {len(new_reactions)} new reactions to add")
transrct = set()
for r in new_reactions:
if reactions[r].is_transport:
transrct.add(r)
print(f"There are {len(transrct)} new transport reactions")
reactions_to_run.update(transrct)
print(f"Before running FBA there are {len(reactions)} reactions")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print(f"After running FBA there are {len(reactions)} reactions")
print("Initial run has a biomass flux value of {} --> Growth: {}".format(value, growth))
print(f"There are {len(reactions_to_run)} reactions to run")
```
## Gap-fill the model
Since the model does not grow on ArgonneLB we need to gap-fill it to ensure growth. There are several ways that we can gap-fill, and we will work through them until we get growth.
As you will see, we update the *reactions_to_run list* each time, and keep the media and everything else consistent. Then we just need to run the FBA like we have done above and see if we get growth.
We also keep a copy of the original *reactions_to_run*, and a list with all the reactions that we are adding, so once we are done we can go back and bisect the reactions that are added.
```
added_reactions = []
original_reactions_to_run = copy.copy(reactions_to_run)
```
### Media import reactions
We need to make sure that the cell can import everything that is in the media... otherwise it won't be able to grow. Be sure to only do this step if you are certain that the cell can grow on the media you are testing.
```
update_type = 'media'
new_reactions = PyFBA.gapfill.suggest_from_media(compounds, reactions,
reactions_to_run, media, verbose=True)
added_reactions.append((update_type, new_reactions))
print(f"Before adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
reactions_to_run.update(new_reactions)
print(f"After adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
for r in reactions:
if reactions[r].is_transport:
print(r)
for r in reactions:
for c in reactions[r].left_compounds:
if c.location == 'e':
if not reactions[r].is_transport:
print(f"Check {r}")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print("Run has a biomass flux value of {} --> Growth: {}".format(value, growth))
```
### Essential reactions
There are ~100 reactions that are in every model we have tested, and we construe these to be essential for all models, so we typically add these next!
```
update_type = 'essential'
new_reactions = PyFBA.gapfill.suggest_essential_reactions()
added_reactions.append((update_type, new_reactions))
print(f"Before adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
reactions_to_run.update(new_reactions)
print(f"After adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print("Run has a biomass flux value of {} --> Growth: {}".format(value, growth))
```
### Subsystems
The reactions connect us to subsystems (see [Overbeek et al. 2014](http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3965101/)), and this test ensures that all the subsystems are complete. We add reactions required to complete the subsystem.
```
update_type = 'subsystems'
new_reactions = \
PyFBA.gapfill.suggest_reactions_from_subsystems(reactions,
reactions_to_run,
threshold=0.5)
added_reactions.append((update_type, new_reactions))
print(f"Before adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
reactions_to_run.update(new_reactions)
print(f"After adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print("Run has a biomass flux value of {} --> Growth: {}".format(value, growth))
pre_orphan=copy.copy(reactions_to_run)
pre_o_added=copy.copy(added_reactions)
print("Pre orphan has {} reactions".format(len(pre_orphan)))
```
### Orphan compounds
Orphan compounds are those compounds which are only associated with one reaction. They are either produced, or trying to be consumed. We need to add reaction(s) that complete the network of those compounds.
You can change the maximum number of reactions that a compound is in to be considered an orphan (try increasing it to 2 or 3).
```
update_type = 'orphan compounds'
new_reactions = PyFBA.gapfill.suggest_by_compound(compounds, reactions,
reactions_to_run,
max_reactions=1)
added_reactions.append((update_type, new_reactions))
print(f"Before adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
reactions_to_run.update(new_reactions)
print(f"After adding {update_type} reactions, we had {len(reactions_to_run)} reactions.")
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, reactions_to_run,
media, biomass_equation)
print("Run has a biomass flux value of {} --> Growth: {}".format(value, growth))
```
## Trimming the model
Now that the model has been shown to grow on ArgonneLB media after several gap-fill iterations, we should trim down the reactions to only the required reactions necessary to observe growth.
```
reqd_additional = set()
# Begin loop through all gap-filled reactions
while added_reactions:
ori = copy.copy(original_reactions_to_run)
ori.update(reqd_additional)
# Test next set of gap-filled reactions
# Each set is based on a method described above
how, new = added_reactions.pop()
sys.stderr.write("Testing reactions from {}\n".format(how))
# Get all the other gap-filled reactions we need to add
for tple in added_reactions:
ori.update(tple[1])
# Use minimization function to determine the minimal
# set of gap-filled reactions from the current method
new_essential = PyFBA.gapfill.minimize_additional_reactions(ori, new, compounds,
reactions, media,
biomass_equation)
sys.stderr.write("Saved {} reactions from {}\n".format(len(new_essential), how))
for r in new_essential:
sys.stderr.write(r + "\n")
# Record the method used to determine
# how the reaction was gap-filled
for new_r in new_essential:
reactions[new_r].is_gapfilled = True
reactions[new_r].gapfill_method = how
reqd_additional.update(new_essential)
# Combine old and new reactions
all_reactions = original_reactions_to_run.union(reqd_additional)
status, value, growth = PyFBA.fba.run_fba(compounds, reactions, all_reactions,
media, biomass_equation)
print("The biomass reaction has a flux of {} --> Growth: {}".format(value, growth))
```
| github_jupyter |
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
**Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
happyModel = None
### END CODE HERE ###
```
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
None
### END CODE HERE ###
```
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
None
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
**Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = None
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
```
| github_jupyter |
# Tutorial #1: Train an raw sound classification model with Azure Machine Learning
In this tutorial, you train a machine learning model on remote compute resources. You'll use the training and deployment workflow for Azure Machine Learning service (preview) in a Python Jupyter notebook. You can then use the notebook as a template to train your own machine learning model with your own data. This tutorial is part one of a two-part tutorial series.
This tutorial trains a CNN model using raw sound dataset captured by [SoundCaptureModule](https://github.com/ms-iotkithol-jp/MicCaptureIoTSoundSample) on Azure IoT Edge device with Azure Machine Learning. Sound dataset consist from raw format and csv format file for each timespan. The goal is to create a multi-class classifier to identify the major or minor code of guiter equipment.
Learn how to:
- Set up your development environment
- Access and examine the data
- Train a CNN model on a remote cluster
- Review training results, find and register the best model
- You'll learn how to select a model and deploy it in part two of this tutorial later.
## Prerequisites
See prerequisites in the Azure Machine Learning documentation.
## Set up your development environment
All the setup for your development work can be accomplished in a Python notebook. Setup includes:
- Importing Python packages
- Connecting to a workspace to enable communication between your local computer and remote resources
- Creating an experiment to track all your runs
- Creating a remote compute target to use for training
- Import packages
- Import Python packages you need in this session. Also display the Azure Machine Learning SDK version.
```
# l-1
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
from azureml.core import Workspace
# check core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
```
## Connect to workspace
Create a workspace object from the existing workspace. Workspace.from_config() reads the file config.json and loads the details into an object named ws.
```
# load workspace configuration from the config.json file in the current folder.
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, sep='\t')
```
## Create experiment
Create an experiment to track the runs in your workspace. A workspace can have muliple experiments.
```
experiment_name = 'raw-sound-major-miner-cnn'
from azureml.core import Experiment
exp = Experiment(workspace=ws, name=experiment_name)
```
Specify dataset and testset names and positions
```
# l-2
dataset_name = 'sound_data'
testset_name = 'sound_test'
data_folder_path = 'data-wav'
test_folder_path = 'test-wav'
soundDataDefFile = 'sounddata-wav.yml'
dataSrorageConfigFile = 'data-storage-config.yml'
```
## Create or Attach existing compute resource
By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.
<b>Creation of compute takes approximately 5 minutes</b>. If the AmlCompute with that name is already in your workspace the code will skip the creation process.
```
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
import os
# choose a name for your cluster
compute_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME", "cpu-cluster")
compute_min_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MIN_NODES", 0)
compute_max_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MAX_NODES", 4)
# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6
vm_size = os.environ.get("AML_COMPUTE_CLUSTER_SKU", "STANDARD_D3_V2")
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("found compute target: " + compute_name)
else:
print("creating new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size,
min_nodes = compute_min_nodes,
max_nodes = compute_max_nodes)
# create the cluster
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# can poll for a minimum number of nodes and for a specific timeout.
# if no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# For a more detailed view of current AmlCompute status, use get_status()
print(compute_target.get_status().serialize())
```
You now have the necessary packages and compute resources to train a model in the cloud.
## Explore data
Before you train a model, you need to understand the data that you are using to train it. In this section you learn how to:
- Download the captured raw sound is CSV files dataset
- Split each csv file into chunks when captured and convert it to a format that is suitable for CNN training
- Create label dataset using user specified csv file. the csv file should be provided by user apart from sound csv files.
- Display some sounds
The format of sound scv file name is...
<i>deviceid</i>-sound-<i>yyyyMMddHHmmssffffff</i>.csv
The part of <i>yyyyMMddHHmmss</i> means that the start time of sound capturing so that user can specify the appropriate files by specifying the start and end times.
So the csv file for label dataset specification consist from following format record.
<i>label_name</i>,<i>start timestamp</i>,<i>end timestamp</i>
both timestamp format shold be following.
<i>yyyy</i>/<i>MM</i>/<i>dd</i> <i>HH</i>:<i>mm</i>:<i>ss</i>
In the following code label dataset definition csv files name are
- for training 'train-label-range.csv'
- for testing 'test-label-range.csv'
The following cell should be run only when dataset is ready or updated.
※ You can use sound dataset stored in [sample guitar raw sound dataset](https://egstorageiotkitvol5.blob.core.windows.net/sound-ml-data/raw-sound-data.zip). please use a dataset that uploads a set of files that be extracted from the zip file to the blob which you create by your own Azure account.
```
# l-3(onetime only)
import os
train_label_range_csv_file = 'train-label-range.csv'
test_label_range_csv_file = 'test-label-range.csv'
csv_files = [train_label_range_csv_file, test_label_range_csv_file]
parsed_specs =[]
for csv in csv_files:
specs ={}
parsed_specs.append(specs)
with open(csv,"rt") as f:
ranges = f.readlines()
for r in ranges:
spec = r.rstrip().split(',')
if not (spec[0] in specs):
specs[spec[0]] = []
specs[spec[0]].append([spec[1],spec[2]])
duration_for_train = parsed_specs[0]
duration_for_test = parsed_specs[1]
for i, ps in enumerate(parsed_specs):
print('spec for - ' + csv_files[i])
for d in duration_for_train.keys():
for s in duration_for_train[d]:
print(' {0}:{1}-{2}'.format(d,s[0],s[1]))
```
## Download guitar sound dataset from your own blob container
This code download only csv files that meet label specificated timespan criteria.
Before you run following code, please set <i>source_azure_storage_account_connection_string</i> and <i>source_container_name</i> to match your storage account that contains the sound data files.
The <i>source_azure_storage_account_connection_string</i> is configured into [data-storage-config.yml](./data-storage-config.yml)
The files satisfied criteria will be downloaded in data and test folder.
```
# l-4 (onetime only)
#!pip install -U azure-storage-blob>=12.2.0
#!pip list
import os
import datetime
import yaml
yml = {}
with open(dataSrorageConfigFile,'r') as ymlfile:
yml.update(yaml.safe_load(ymlfile))
#print('config - {}'.format(yml))
# Specify datasource
source_azure_storage_account_connection_string = yml['blob_connection_string']
source_container_name = 'edgesounds'
# Specify start and end time of duration for each chord
# duration_for_train = {
# 'major':[['2020/02/09 16:58:34', '2020/02/09 16:58:40'], ['2020/02/09 17:06:02','2020/02/09 17:06:10'], ['2020/02/09 16:59:42','2020/02/09 16:59:50'], ['2020/02/09 17:00:41','2020/02/09 17:00:49'],['2020/02/18 11:27:20', '2020/02/18 11:27:26'],['2020/02/18 11:28:05', '2020/02/18 11:28:11'],['2020/02/18 11:28:41', '2020/02/18 11:28:44'],['2020/02/18 11:29:18', '2020/02/18 11:29:20'],['2020/02/18 11:29:51', '2020/02/18 11:29:57'],['2020/02/18 11:30:25', '2020/02/18 11:30:29'],['2020/02/18 11:31:05', '2020/02/18 11:31:12']
# ],
# 'minor':[['2020/02/09 16:58:49', '2020/02/09 16:59:00'], ['2020/02/09 17:06:26','2020/02/09 17:06:36'], ['2020/02/09 16:59:57','2020/02/09 17:00:05'], ['2020/02/09 17:00:56','2020/02/09 17:01:03'],['2020/02/18 11:27:41', '2020/02/18 11:27:47'],['2020/02/18 11:28:22', '2020/02/18 11:28:26'],['2020/02/18 11:28:59', '2020/02/18 11:29:03'],['2020/02/18 11:29:33', '2020/02/18 11:29:38'],['2020/02/18 11:30:07', '2020/02/18 11:30:13'],['2020/02/18 11:30:45', '2020/02/18 11:30:49'],['2020/02/18 11:31:24', '2020/02/18 11:31:35']
# ]
# }
# duration_for_test = {
# 'major':[['2020/02/18 11:36:37', '2020/02/18 11:36:44'],['2020/02/18 11:37:12', '2020/02/18 11:37:18'],['2020/02/18 11:37:43', '2020/02/18 11:37:49'],['2020/02/18 11:38:18', '2020/02/18 11:38:23'],['2020/02/18 11:38:58', '2020/02/18 11:39:06'],['2020/02/18 11:39:36', '2020/02/18 11:39:40'],['2020/02/18 11:40:14', '2020/02/18 11:40:20']],
# 'minor':[['2020/02/18 11:36:56', '2020/02/18 11:37:01'],['2020/02/18 11:37:25', '2020/02/18 11:37:33'],['2020/02/18 11:38:00', '2020/02/18 11:38:08'],['2020/02/18 11:38:35', '2020/02/18 11:38:42'],['2020/02/18 11:39:15', '2020/02/18 11:39:21'],['2020/02/18 11:39:53', '2020/02/18 11:39:59'],['2020/02/18 11:40:34', '2020/02/18 11:40:41']]
# }
def pickup_target_files(target_folder_name, duration_for_target):
folder_for_label = {}
condition_for_label = {}
target_data = {}
# data store for traning
data_folder = os.path.join(os.getcwd(), target_folder_name)
for dflk in duration_for_target.keys():
folder_for_key = os.path.join(data_folder, dflk)
folder_for_label[dflk] = folder_for_key
os.makedirs(folder_for_key, exist_ok=True)
condition_for_label[dflk] = []
durs = duration_for_target[dflk]
for dur in durs:
dur_se = []
while len(dur)>0:
t = dur.pop(0)
ttime = datetime.datetime.strptime(t, '%Y/%m/%d %H:%M:%S')
tnum = ttime.strftime('%Y%m%d%H%M%S') + '000000'
dur_se.append(int(tnum))
condition_for_label[dflk].append(dur_se)
target_data[dflk] = []
return folder_for_label, condition_for_label, target_data
data_folder_name = 'data-wav'
test_folder_name = 'test-wav'
train_folder_for_label, train_condition_for_label, train_data = pickup_target_files(data_folder_name, duration_for_train)
test_folder_for_label, test_condition_for_label, test_data = pickup_target_files(test_folder_name, duration_for_test)
from azure.storage.blob import BlobServiceClient
import datetime
import re
import numpy as np
# Connect to our blob via the BlobService
blobServiceClient = BlobServiceClient.from_connection_string(source_azure_storage_account_connection_string)
containerClient = blobServiceClient.get_container_client(source_container_name)
def load_targeted_blobs(container, condition_for_target, folder_for_target, data_for_target ):
with open(soundDataDefFile, "wb") as ymlFile:
blobClient = containerClient.get_blob_client(soundDataDefFile)
stream = blobClient.download_blob()
blobContent = stream.readall()
ymlFile.write(blobContent)
target_blobs = []
loaded_num_of_files = {}
for blob in containerClient.list_blobs():
matching = re.findall('sound-[0-9]+.wav', blob.name)
if len(matching)>0:
target_blobs.append({'blob':blob, 'num-of-ts':int(re.findall('[0-9]+',blob.name)[0])})
for l in condition_for_target.keys():
for cfl in condition_for_target[l]:
filtered = list(filter(lambda b: cfl[0] <= b['num-of-ts'] and b['num-of-ts'] <= cfl[1], target_blobs))
data_for_target[l].append(filtered)
for l in data_for_target.keys():
num_of_files = 0
print('Label - '+l)
for dft in data_for_target[l]:
for ltd in dft:
blobClient = containerClient.get_blob_client(ltd['blob'])
stream = blobClient.download_blob()
wavFilePath = os.path.join(folder_for_target[l], ltd['blob'].name)
print(' Downloading - ' + ltd['blob'].name)
with open(wavFilePath,"wb") as wavFile:
blobContent = stream.readall()
wavFile.write(blobContent)
num_of_files = num_of_files + 1
loaded_num_of_files[l] = num_of_files
return loaded_num_of_files
result = load_targeted_blobs(containerClient, train_condition_for_label, train_folder_for_label, train_data)
for k in result.keys():
print('Loaded file for train:{0} - {1}'.format(k, result[k]))
result = load_targeted_blobs(containerClient, test_condition_for_label, test_folder_for_label, test_data)
for k in result.keys():
print('Loaded file for test:{0} - {1}'.format(k, result[k]))
import shutil
for fldr in [data_folder_name, test_folder_name]:
destFName = os.path.join(fldr,soundDataDefFile)
shutil.copy(soundDataDefFile, destFName)
```
### Create Training and Test data
reform data for training and test
<i>local jupyternotebook execution enable l-5 (onetime only)</i>
```
%%writefile loadwavsounds.py
import os
import librosa
import wave
from scipy.io.wavfile import read
import numpy as np
import random
import yaml
def take_fft(wav_rawdata, sample_rate, fft, mels):
fdata = np.array([float(d) for d in wav_rawdata])
melspec = librosa.feature.melspectrogram(
y = fdata, sr = sample_rate,
n_fft = fft, n_mels = mels
)
return melspec, fdata
def load_wavdata(wav_file_path, fft=2048, mels=128):
# print(wav_file_path)
wv = wave.open(wav_file_path,'rb')
wvinfo = wv.getparams()
fs, data = read(wav_file_path)
melspecs = []
rawdata = []
tdata = []
if (wvinfo.nchannels == 1):
tdata.append(data)
else:
tdata = data.T
if wvinfo.nchannels == len(tdata):
for data in tdata:
fftdata = take_fft(data, fs, fft, mels)
melspecs.append(fftdata[0])
rawdata.append(fftdata[1])
result = {}
result['fft-data'] = librosa.power_to_db(melspecs, ref=np.max)
result['raw-data'] = rawdata
result['sampling-rate'] = fs
result['fft-freq'] = fft
result['fft-mels'] = mels
return result
def load_csvdata(file):
m = load_wavdata(file)
return np.array(m[1]).T.astype(np.int16) / 32768
# data_folder = 'data'
# data_folder_path = os.path.join(os.getcwd(), data_folder)
def load_data_definition(data_def_file_path):
definition = {}
with open(data_def_file_path, "r") as ymlFile:
yml = yaml.safe_load(ymlFile)
definition.update(yml)
return definition
def reshape_dataset(sound_data, data_chunk):
dataset = np.zeros((len(sound_data), 1, 1, data_chunk))
for index, d in enumerate(sound_data):
dataset[index][0][0] = d
return dataset
def get_minimum_times(tdata):
shps = []
for l in tdata.keys():
for d in tdata[l]:
shps.append(d.shape[1])
return np.array(shps).min()
def divid_data_by_minimum_shape(unit, dlen):
resultset = []
shp = unit.shape
# print('original shape - {}'.format(shp))
if shp[1] == dlen:
resultset.append(unit)
else:
pl = 0
while pl + dlen <= shp[1]:
# print('++pl={}'.format(pl))
divided = unit[:,pl:pl+dlen]
resultset.append(divided)
# print('divided shape - {}'.format(divided.shape))
pl = pl + dlen
if (shp[1] % dlen) != 0:
pl = shp[1]
while pl - dlen >= 0:
# print('--pl={}'.format(pl))
divided = unit[:,pl-dlen:pl]
resultset.append(divided)
# print('divided shape - {}'.format(divided.shape))
pl = pl - dlen
return np.array(resultset)
def load_data(data_folder_path, fft=2048, mels=128, minimum=0):
tbdata ={}
rate = 0
num_of_data = 0
for df in os.listdir(path=data_folder_path):
if os.path.isfile(os.path.join(data_folder_path, df)):
continue
tbdata[df] = []
ldata_folder_path = os.path.join(data_folder_path,df)
for datafile in os.listdir(path=ldata_folder_path):
datafile_path = os.path.join(ldata_folder_path, datafile)
fftwav = load_wavdata(datafile_path, fft=fft, mels=mels)
tbdata[df].extend(fftwav['fft-data'])
fft = fftwav['fft-freq']
mels = fftwav['fft-mels']
rate = fftwav['sampling-rate']
mintsize = get_minimum_times(tbdata)
if mintsize < minimum:
print('minimum time range of dataset is {} but select minimum arg value:{} '.format(mintsize, minimum))
mintsize = minimum
else:
print('minimum time range of dataset is {}'.format(mintsize))
tdata ={}
num_of_data = 0
for l in tbdata.keys():
tdata[l] = []
# print('label:{}'.format(l))
# index = 0
for u in tbdata[l]:
# print('index:{}'.format(index))
# index = index +1
if u.shape[1] < mintsize:
print('this data is too short - {}'.format(u.shape))
continue
divided = divid_data_by_minimum_shape(u, mintsize)
tdata[l].extend(divided)
num_of_data = num_of_data + len(divided)
data_of_sounds = np.zeros((num_of_data,1, mels, mintsize),dtype=np.float)
label_of_sounds = np.zeros(num_of_data,dtype=int)
label_matrix_of_sounds = np.zeros((num_of_data, len(tdata.keys())))
labelname_of_sounds = np.empty(num_of_data,dtype=object)
index = 0
lindex = 0
labeling_for_train = {}
for l in tdata.keys():
for fftdata in tdata[l]:
data_of_sounds[index][0] = fftdata
label_of_sounds[index] = lindex
label_matrix_of_sounds[index, lindex] = 1.
labelname_of_sounds[index] = l
index = index + 1
labeling_for_train[l] = lindex
lindex = lindex + 1
print('num_of_data={},size of data_of_sounds={}'.format(num_of_data,len(data_of_sounds)))
indexx = np.arange(num_of_data)
random.shuffle(indexx)
data_of_sounds = data_of_sounds[indexx]
label_matrix_of_sounds = label_matrix_of_sounds[indexx]
label_of_sounds = label_of_sounds[indexx]
labelname_of_sounds = labelname_of_sounds[indexx]
# dataShape = data_of_sounds[0].shape
# data_of_sounds = data_of_sounds.reshape(len(data_of_sounds), dataShape[1], dataShape[2], dataShape[0])
# train_dataset is labeled sound data set
train_dataset = [data_of_sounds, label_of_sounds, labelname_of_sounds, rate, fft, mels]
return train_dataset
```
### Check sound data for training
You can confirm that content of sound data by following code.
```
# l-6
from loadwavsounds import load_data
import librosa, librosa.display
mels = 256
fft_freq = 4096
data_def_file = 'sounddata-wav.yml'
print('loading train data...')
train_dataset = load_data(data_folder_path, fft=fft_freq, mels=mels)
print('loading test data...')
test_dataset = load_data(test_folder_path, fft=fft_freq, mels=mels, minimum=train_dataset[0].shape[3])
print("Loaded {} of train data and {} of test data".format(len(train_dataset[0]),len(test_dataset[0])))
import matplotlib.pyplot as plt
#gx = np.arange(1,data_chunk,1)
sample_size = 9
#for d in train_dataset[0]:
# print('data shape - {}'.format(d.shape))
figure = plt.figure(figsize=(16,16))
print('rate={}'.format(len(train_dataset)))
for d in range(0,sample_size):
plt.subplot(3,3,d+1)
librosa.display.specshow(train_dataset[0][d][0],x_axis='time', y_axis='mel', fmax=train_dataset[3])
plt.colorbar(format='%+2.0f dB')
plt.title(train_dataset[2][d])
plt.show()
```
## Create Data store for training on remote computer
This logic execution is necessary for only when data is updated.
```
from azureml.core import Workspace, Datastore, Dataset
# retrieve current datastore in the workspace
datastore = ws.get_default_datastore()
# Upload files to dataset on datastore
datastore.upload(src_dir=data_folder_path,
target_path= dataset_name,
overwrite=True,
show_progress=True)
datastore.upload(src_dir=test_folder_path,
target_path= testset_name,
overwrite=True,
show_progress=True)
# create a FileDataset pointing to files in 'animals' folder and its subfolders recursively
datastore_paths = [(datastore, dataset_name)]
sound_ds = Dataset.File.from_files(path=datastore_paths)
teststore_paths = [(datastore, testset_name)]
sound_ts = Dataset.File.from_files(path=teststore_paths)
print(sound_ds)
# Register dataset to current workspace
sound_dataset = sound_ds.register(workspace=ws,
name=dataset_name,
description='sound classification training data')
sound_testset = sound_ts.register(workspace=ws,
name=testset_name,
description='sound classification test data')
```
### Construct CNN model
Following cell show a sample of CNN model for sound classification and training.
The cell logic will be run on this computing environment.
```
# l-7
# https://www.tensorflow.org/tutorials/images/intro_to_cnns?hl=ja
import os
import yaml
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
print('tensorflow version - '+tf.__version__)
dataShape = train_dataset[0][0].shape
# データを、reshape(256,5,1)すればいいらしい。
print('train data shape is - {}'.format(dataShape))
train_ds = train_dataset[0].reshape(len(train_dataset[0]),dataShape[1],dataShape[2],dataShape[0])
tdataShape = test_dataset[0][0].shape
print('test data shape is - {}'.format(tdataShape))
test_ds = test_dataset[0].reshape(len(test_dataset[0]), dataShape[1],dataShape[2],dataShape[0])
model = models.Sequential()
model.add(layers.Conv2D(16,input_shape=(dataShape[1],dataShape[2],1),kernel_size=(8,1),padding='same', strides=(4,2), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Conv2D(filters=16,input_shape=(1,128,5),kernel_size=(8, 1),padding='same', strides=(4,2), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Conv2D(filters=16,input_shape=(1, 16,5),kernel_size=(8,1),padding='same', strides=(4,1), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(2, activation='sigmoid'))
# Above code part is same as training logic on remote computer
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
hist = model.fit(train_ds, train_dataset[1], epochs=5, validation_data=(test_ds, test_dataset[1]))
test_loss, test_acc = model.evaluate(test_ds,test_dataset[1])
print('Test accuracy - '+str(test_acc))
for hk in hist.history.keys():
print(hk)
import matplotlib.pyplot as plt
epoch_list = list(range(1, len(hist.history['accuracy']) + 1))
plt.plot(epoch_list, hist.history['accuracy'], epoch_list, hist.history['val_accuracy'])
plt.legend(('Training Accuracy', "Validation Accuracy"))
plt.show()
predictions = model.predict(test_ds)
# Plot a random sample of 10 test images, their predicted labels and ground truth
figure = plt.figure(figsize=(20, 8))
for i, index in enumerate(np.random.choice(test_dataset[1], size=15, replace=False)):
ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
# Display each image
ax.plot(test_dataset[0][index][0][0])
librosa.display.specshow(test_dataset[0][index][0],x_axis='time', y_axis='mel', fmax=test_dataset[3])
predict_index = np.argmax(predictions[index])
true_index = np.argmax(test_dataset[1][index])
# print('{}-{}'.format(predict_index, true_index))
# Set the title for each image
ax.set_title("{} ({})".format(test_dataset[2][predict_index],
test_dataset[2][true_index]),
color=("green" if predict_index == true_index else "red"))
# When you need h5 format file, change .pkl -> .h5
model_path = 'sound-classification-wav-model'
#model_path_ext = '.h5'
model_path_ext = '.pkl'
output_dir = 'outputs'
os.makedirs(output_dir, exist_ok=True)
model_pathname = os.path.join(output_dir, model_path + model_path_ext)
model.save(model_pathname)
#model_output_path = 'outputs'
#os.makedirs(model_output_path, exist_ok=True)
#save_model_name = 'sound-classification-model'
#save_model_name_ext = ['pkl','h5']
#for ext in save_model_name_ext:
# save_model_path_name = save_model_name + "." + ext
# save_model_path = os.path.join(model_output_path, save_model_path_name)
# print('Saving learned model as {}'.format(save_model_path))
# model.save(save_model_path)
# Save target data config in sounddata.yml
yml = {}
with open(soundDataDefFile,'r') as ymlfile:
yml.update(yaml.safe_load(ymlfile))
yml['data-width'] = dataShape[2]
yml['fft-freq'] = fft_freq
yml['fft-mels'] = mels
with open(soundDataDefFile, 'w') as ymlfile:
yaml.dump(yml, ymlfile)
print('data width is saved in {}'.format(soundDataDefFile))
```
### Export learned model
exported model will be used in IoT Edge SoundClassifierService module
```
import os
import datetime
import tarfile
def compress_files(top, archive, dest_folder):
tarfilename = archive + '.tar.gz'
topbase = os.path.basename(top)
if tarfilename is None:
now = datetime.datetime.now()
tarfilename = '{0}-{1:%Y%m%d%H%M%S}.tar.gz'.format(topbase, now)
if dest_folder is not None:
tarfilename = os.path.join(dest_folder, tarfilename)
os.makedirs(dest_folder,exist_ok=True)
tar = tarfile.open(tarfilename, "w:gz")
for root, dirs, files in os.walk(top):
for filename in files:
parent = root[len(top):]
if parent.startswith('\\'):
parent = parent[1:]
archnameraw = os.path.join(parent,filename)
archname = os.path.join(topbase, archnameraw).replace('\\','/',-1)
tar.add(os.path.join(root, filename),archname)
tar.close()
return tarfilename
export_folder_path = 'export'
os.makedirs(export_folder_path,exist_ok=True)
if model_path_ext == '.pkl':
compressed = compress_files(model_pathname, model_path + model_path_ext, export_folder_path)
print('Learned model is exproted as ' + compressed)
```
### Convert and create tensorflow lite model for micro edge device.
Please refer https://www.tensorflow.org/lite/guide/inference and [/SoundAIonMicroEdge/README.md](../../SoundAIonMicroEdge/README.md) to use the converted file.
```
import tensorflow as tf
import os
model_file = 'sound-classification-wav-model.pkl' # this value should be same name of above logic
output_dir = 'outputs'
model_pathname = os.path.join(output_dir, model_file)
converter = tf.lite.TFLiteConverter.from_saved_model(model_pathname)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_model = converter.convert()
tflite_model_file = 'sound-classification-wav-model.tflite'
export_folder_path = 'export'
tflite_model_pathname = os.path.join(export_folder_path, tflite_model_file)
open(tflite_model_pathname, "wb").write(quantized_model)
```
### Create Training script
```
import os
script_folder = os.path.join(os.getcwd(), "sklearn-script")
os.makedirs(script_folder, exist_ok=True)
```
## CNN Model traing script
The following cell is the script of model definition, model training for running on remote computing cluster.
```
%%writefile $script_folder/train.py
import argparse
import os
import numpy as np
import glob
from sklearn.externals import joblib
from azureml.core import Run
from loadwavsounds import load_data
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--test-folder', type=str, dest='test_folder', help='test folder mounting point')
#parser.add_argument('--regularization', type=float, dest='reg', default=0.01, help='regularization rate')
args = parser.parse_args()
data_folder = args.data_folder
test_folder = args.test_folder
print('Data folder:{0}, Test folder:{1}'.format( data_folder, test_folder))
data_def_file = 'sounddata-wav.yml'
train_dataset = load_data(data_folder,data_def_file)
test_dataset = load_data(test_folder, data_def_file)
mels = 256
fft_freq = 4096
print('loading train data...')
train_dataset = load_data(data_folder, fft=fft_freq, mels=mels)
print('loading test data...')
test_dataset = load_data(test_folder, fft=fft_freq, mels=mels, minimum=train_dataset[0].shape[3])
print("Loaded {} of train data and {} of test data".format(len(train_dataset[0]),len(test_dataset[0])))
# get hold of the current run
run = Run.get_context()
print('tensorflow version - '+tf.__version__)
dataShape = train_dataset[0][0].shape
# データを、reshape(256,5,1)すればいいらしい。
print('train data shape is - {}'.format(dataShape))
train_ds = train_dataset[0].reshape(len(train_dataset[0]),dataShape[1],dataShape[2],dataShape[0])
tdataShape = test_dataset[0][0].shape
print('test data shape is - {}'.format(tdataShape))
test_ds = test_dataset[0].reshape(len(test_dataset[0]), dataShape[1],dataShape[2],dataShape[0])
model = models.Sequential()
model.add(layers.Conv2D(16,input_shape=(dataShape[1],dataShape[2],1),kernel_size=(8,1),padding='same', strides=(4,2), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Conv2D(filters=16,input_shape=(1,128,5),kernel_size=(8, 1),padding='same', strides=(4,2), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Conv2D(filters=16,input_shape=(1, 16,5),kernel_size=(8,1),padding='same', strides=(4,1), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(1,2), padding='same'))
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(2, activation='sigmoid'))
# Above code part is same as training logic on remote computer
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
hist = model.fit(train_ds, train_dataset[1], epochs=5, validation_data=(test_ds, test_dataset[1]))
test_loss, test_acc = model.evaluate(test_ds,test_dataset[1])
print('Test accuracy - '+str(test_acc))
for hk in hist.history.keys():
print(hk)
# Save learned model as .pkl and .h5
model_output_path = 'outputs'
os.makedirs(model_output_path, exist_ok=True)
save_model_name = 'sound-classification-model'
save_model_name_ext = ['pkl','h5']
for ext in save_model_name_ext:
save_model_path_name = save_model_name + "." + ext
save_model_path = os.path.join(model_output_path, save_model_path_name)
print('Saving learned model as {}'.format(save_model_path))
model.save(save_model_path)
# Save target data config in sounddata.yml
yml = {}
with open(soundDataDefFile,'r') as ymlfile:
yml.update(yaml.safe_load(ymlfile))
yml['data-width'] = dataShape[2]
yml['fft-mels'] = mels
yml['fft-freq'] = fft-freq
with open(soundDataDefFile, 'w') as ymlfile:
yaml.dump(yml, ymlfile)
print('data width is saved in {}'.format(soundDataDefFile))
import shutil
shutil.copy('loadwavsounds.py', script_folder)
```
### Training and Learning!
train CNN model with sound dataset
```
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('my_env')
cd = CondaDependencies.create(pip_packages=['azureml-sdk','scikit-learn','azureml-dataprep[pandas,fuse]>=1.1.14','tensorflow==2.1.0','matplotlib', 'librosa', 'pyyaml'])
env.python.conda_dependencies = cd
from azureml.train.sklearn import SKLearn
from azureml.core import Dataset, Run
# Get a dataset by name
sound_dataset = Dataset.get_by_name(workspace=ws, name=dataset_name)
data_mount = sound_dataset.as_named_input('sound_data').as_mount()
test_dataset = Dataset.get_by_name(workspace=ws, name=testset_name)
test_mount = sound_dataset.as_named_input('sound_test').as_mount()
script_params = {
# to mount files referenced by mnist dataset
'--data-folder': data_mount,
'--test-folder': test_mount
}
est = SKLearn(source_directory=script_folder,
script_params=script_params,
compute_target=compute_target,
environment_definition=env,
entry_script='train.py')
```
### For debug
Following three blocks are used to check specified parameters and dataset validity.
When you don't need to debug, please go forward to ["Submit the job to the cluster"](#Submit-the-job-to-the-cluster)
```
%%writefile $script_folder/testargs.py
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--test-folder', type=str, dest='test_folder', help='test folder mounting point')
#parser.add_argument('--regularization', type=float, dest='reg', default=0.01, help='regularization rate')
args = parser.parse_args()
data_folder = args.data_folder
test_folder = args.test_folder
print('Data folder:{0}', 'Test folder:{1}'.format( data_folder, test_folder))
import os
chdir = os.getcwd()
print('Current Dir - '+chdir)
folders = {'data':data_folder,'test':test_folder}
for fld in folders.keys():
cfld = folders[fld]
print('Check content of {0} - {1}'.format(fld, cfld))
for f in os.listdir(cfld):
print(' '+f)
cdir = os.path.join(data_folder, f)
if os.path.isdir(cdir):
for cf in os.listdir(cdir):
print(' '+cf)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('my_env')
cd = CondaDependencies.create(pip_packages=['azureml-sdk','matplotlib','azureml-dataprep[pandas,fuse]>=1.1.14'])
env.python.conda_dependencies = cd
from azureml.train.sklearn import SKLearn
from azureml.core import Dataset, Run
# Get a dataset by name
sound_dataset = Dataset.get_by_name(workspace=ws, name=dataset_name)
data_mount = sound_dataset.as_named_input('sound_data').as_mount()
test_dataset = Dataset.get_by_name(workspace=ws, name=testset_name)
test_mount = sound_dataset.as_named_input('sound_test').as_mount()
print(data_mount)
print(test_mount)
script_params = {
# to mount files referenced by mnist dataset
'--data-folder': data_mount,
'--test-folder': test_mount
}
est = SKLearn(source_directory=script_folder,
script_params=script_params,
compute_target=compute_target,
environment_definition=env,
entry_script='testargs.py')
```
### Local Debug
```
# l-8
import numpy as np
from loadwavsounds import load_wavdata, divid_data_by_minimum_shape, load_data_definition
import os
import random
#data_def_file = 'sounddata-wav.yml'
#datafile = 'cherry-sound-20200218113643319806.csv'
data_def = load_data_definition(data_def_file)
#csv_dataset = parse_file(datafile,np.array([]),data_chunk)
#sound_dataset = np.zeros((csv_dataset[0], 1, 1, data_chunk))
#index = 0
#for d in csv_dataset[1]:
# sound_dataset[index][0][0] = d
# index = index + 1
test_data_files = []
for d in os.listdir(test_folder_path):
dname = os.path.join(test_folder_path, d)
if os.path.isdir(dname):
for f in os.listdir(dname):
if f.rfind('.wav') >= 0:
test_data_files.append(os.path.join(dname,f))
random.shuffle(test_data_files)
mels = data_def['fft-mels']
dataWidth = data_def['data-width']
fft=data_def['fft-freq']
print('load data by fft={},mels={},data-width={}'.format(fft, mels, dataWidth))
wavdata = load_wavdata(test_data_files[0], fft=fft, mels=mels)
wav_dataset = divid_data_by_minimum_shape(wavdata['fft-data'][0], dataWidth)
print('wav_dataset.shape - {}, len={}'.format(wav_dataset.shape, len(wav_dataset)))
data_of_sounds = np.zeros((len(wav_dataset),1, mels, dataWidth), dtype=np.float)
for index, wd in enumerate(wav_dataset):
data_of_sounds[index][0] = wd
dataShape = data_of_sounds[0].shape
data_of_sounds = data_of_sounds.reshape(len(data_of_sounds),dataShape[1],dataShape[2],dataShape[0])
print('test data shape - {}'.format(data_of_sounds.shape))
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
print('tensorflow version - '+tf.__version__)
#model_file_path ='outputs/sound-classification-model.h5'
model_folder_path = 'outputs'
model_file_name = 'sound-classification-model.pkl'
model_file_path = os.path.join(model_folder_path, model_file_name)
# model name should be used other style
model = models.load_model(model_file_path)
predicted = model.predict(data_of_sounds)
result = predicted.tolist()
for r in result:
print('{0}<->{1}'.format(r[0],r[1]))
```
### Submit the job to the cluster
Run the experiment by submitting the estimator object. And you can navigate to Azure portal to monitor the run.
```
run = exp.submit(config=est)
run
```
### Since the call is asynchronous, it returns a **Preparing** or **Running** state as soon as the job is started.
## Monitor a remote run
In total, the first run takes **approximately 10 minutes**. But for subsequent runs, as long as the dependencies (`conda_packages` parameter in the above estimator constructor) don't change, the same image is reused and hence the container start up time is much faster.
Here is what's happening while you wait:
- **Image creation**: A Docker image is created matching the Python environment specified by the estimator. The image is built and stored in the ACR (Azure Container Registry) associated with your workspace. Image creation and uploading takes **about 5 minutes**.
This stage happens once for each Python environment since the container is cached for subsequent runs. During image creation, logs are streamed to the run history. You can monitor the image creation progress using these logs.
- **Scaling**: If the remote cluster requires more nodes to execute the run than currently available, additional nodes are added automatically. Scaling typically takes **about 5 minutes.**
- **Running**: In this stage, the necessary scripts and files are sent to the compute target, then data stores are mounted/copied, then the entry_script is run. While the job is running, stdout and the files in the ./logs directory are streamed to the run history. You can monitor the run's progress using these logs.
- **Post-Processing**: The ./outputs directory of the run is copied over to the run history in your workspace so you can access these results.
You can check the progress of a running job in multiple ways. This tutorial uses a Jupyter widget as well as a `wait_for_completion` method.
### Jupyter widget
Watch the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
```
from azureml.widgets import RunDetails
RunDetails(run).show()
run.wait_for_completion(show_output=True) # specify True for a verbose log
```
### Evaluate the model output
```
print(run.get_metrics())
print(run.get_file_names())
```
## Are you happy with the model??? Register it in Azure Machine Learning to manage
```
# register model
model = run.register_model(model_name='sound_clasification_model', model_path='outputs/')
print(model.name, model.id, model.version, sep = '\t')
```
## Next step
In this Azure Machine Learning tutorial, you used Python to:
> * Set up your development environment
> * Access and examine the data
> * Train multiple models on a remote cluster using the tensorflow keras machine learning library
> * Review training details and register the best model
You are ready to deploy this registered model using the instructions in the next part of the tutorial series:
> [Tutorial 2 - Deploy models](ai-sound-major-miner-classification-part2-deploy.ipynb)
| github_jupyter |
```
from PIL import Image
from numpy import *
from pylab import *
# The data is still available from web archive
# https://web.archive.org/web/20161203110733/http://research.microsoft.com/en-us/um/cambridge/projects/visionimagevideoediting/segmentation/grabcut.htm
from scipy.misc import imresize
import graphcut
graphcut = reload(graphcut)
im = array(Image.open('229036.jpg'))
scale = 0.1
im = imresize(im, scale, interp='bilinear')
figure()
imshow(im)
axis('off')
show()
print im.shape
fg = [[16, 24], [32, 32]]
bg = [[0, 0], [48, 8]]
def create_msr_labels(size, fg, bg):
""" Create labels matrix for training from
user annotations. """
labels = zeros(size)
for xi in arange(bg[0][0], bg[1][0]):
for yi in arange(bg[0][1], bg[1][1]):
labels[yi, xi] = -1
for xi in arange(fg[0][0], fg[1][0]):
for yi in arange(fg[0][1], fg[1][1]):
labels[yi, xi] = 1
return labels
labels = create_msr_labels(im.shape[:2], fg, bg)
figure()
gray()
subplot(1, 3, 1)
imshow(im)
axis('off')
subplot(1, 3, 2)
imshow(labels)
axis('off')
show()
from pygraph.classes.digraph import digraph
from pygraph.algorithms.minmax import maximum_flow
import bayes
def build_bayes_graph(im, labels, sigma=1e-2, kappa=2, weight=1):
""" Build a graph from 4-neighborhood of pixels.
Foregraound and background is determined from
labels (1 for foreground, -1 for background, 0 othewise)
and is modeled with naive Bayes classifiers. """
m, n = im.shape[:2]
# RGB vector version (one pixel per row)
vim = im.astype('float')
vim = vim.reshape((-1, 3))
# RGB for foreground and background
foreground = im[labels == 1].reshape((-1, 3))
background = im[labels == -1].reshape((-1, 3))
train_data = [foreground, background]
# train naive Bayes classifier
bc = bayes.BayesClassifier()
bc.train(train_data, labels)
# get probabilities for all pixels
bc_lables, prob = bc.classify(vim)
prob_fg = prob[0]
prob_bg = prob[1]
# create graph with m*n+2 nodes
gr = digraph()
gr.add_nodes(range(m*n+2))
source = m*n # second to last is source
sink = m*n+1 # last node is sink
# normalize
pos = m*n/2-100
for i in range(vim.shape[0]):
vim[i] = vim[i] / linalg.norm(vim[i])
# go through all nodes and add edges
lb = labels.copy()
lb = lb.flatten()
for i in range(m*n):
# add edge from source
if lb[i]==1:
gr.add_edge((source, i), wt=weight)
elif lb[i]==-1:
gr.add_edge((i, sink), wt=weight)
elif (prob_fg[i]>prob_bg[i]):
gr.add_edge((source, i), wt=(prob_fg[i]/(prob_fg[i] + prob_bg[i])))
else:
# add edge to sink
gr.add_edge((i, sink), wt=(prob_bg[i]/(prob_fg[i] + prob_bg[i])))
# add edges to neighbors
if i % n != 0: # left exists
edge_wt = kappa*exp(-1.0*sum((vim[i] - vim[i-1])**2)/sigma)
gr.add_edge((i, i-1), wt=edge_wt)
if (i+1) % n != 0: # right exists
edge_wt = kappa*exp(-1.0*sum((vim[i] - vim[i+1])**2)/sigma)
gr.add_edge((i, i+1), wt=edge_wt)
if i//n != 0: # up exists
edge_wt = kappa*exp(-1.0*sum((vim[i] - vim[i-n])**2)/sigma)
gr.add_edge((i, i-n), wt=edge_wt)
if i//n != m-1: # down exists
edge_wt = kappa*exp(-1.0*sum((vim[i] - vim[i+n])**2)/sigma)
gr.add_edge((i, i+n), wt=edge_wt)
return gr
print labels
g = build_bayes_graph(im, labels, sigma=1e-2, kappa=2, weight=100)
res = graphcut.cut_graph(g, im.shape[:2])
figure()
graphcut.show_labeling(im, res)
show()
```
| github_jupyter |
<img src="../assets/UpLabel.png" width="400" align="left"><br><br><br><br>
# Introduction to UpLabel
UpLabel is a lightweight, Python-based and modular tool which serves to support your machine learning tasks by making the data labeling process more efficient and structured. UpLabel is presented and tested within the MLADS-Session *"Distributed and Automated Data Labeling using Active Learning: Insights from the Field"*.
## Session Description
High-quality training data is essential for succeeding at any supervised machine learning task. There are numerous open source tools that allow for a structured approach to labeling. Instead of randomly choosing labeling data, we make use of machine learning itself for continuously improving the training data quality. Based on the expertise of the labelers as well as the complexity of the data, labeling tasks can be distrubuted in an intelligent way. Based on a real-world example from one of our customers, we will show how to apply the latest technology to optimize the task of labeling data for NLP problems.
## Software Component and User Flow
The following images serve to illustrate the user labeler flow and the software component flow.
### Software Component Flow
---
<p><img src="../assets/MLADS_Components.png" width="60%" align="center"></p>
### User Flow
---
<p><img src="../assets/MLADS_UserFlow.png" width="60%" align="center"></p>
### Prepare Workspace
Required libraries are loaded below, for the most part they get imported by the main-script.
```
import matplotlib as plt
import sys
sys.path.append('../code')
import main
%matplotlib inline
```
## Task Setup
There are two possible ways to go for this session:
1. You can use our example data (German news data)
2. Or your own data, if you brought some.
#### If you want to use our example:
- Use 'lab' as your project reference below (see step *"Run Iteration #0"*). The example case will be loaded.
- Set the `dir` parameter to the folder, where the lab data is located, e.g. `C:/uplabel/data/lab/`
#### If you brought your own data:
- Either create a task config (either copy the code below and save it as `params.yml`) and save it in a subfolder of `task`
- The task can be named as you like
- Or simply rename the folder "sample" to your desired project name and use the sample file in it
- Set the `dir` parameter to the folder, where your data is going to be located
```yaml
data:
dir: ~/[YOUR DIRECTORY GOES HERE]/[projectname]
source: input.txt
cols: ['text','label']
extras: []
target_column: label
text_column: text
parameters:
task: cat
language: de
labelers: 3
min_split_size: 0
max_split_size : 300
quality: 1
estimate_clusters: True
quality_size: 0.1
overlap_size: 0.1
```
```
project_name = 'news_en'
```
## Run Iteration #0
- This is the start of the initial iteration of the UpLabel process.
- Feel free to create your own project, by adding a parameter file to `\tasks` and your data to `\data\[project name]`. Don't forget to update the `'project_name'` variable above, with the name of your task.
Note: you can add `'debug_iter_id=X'` to repeat an iteration, where X is your iteration number.
```
main.Main(project_name)
```
## Fun part: label your data
- After the first iteration, you can start labeling your data
- You can find the data splits in the folder you have set to the `dir`-parameter
- File names are named this way:
- `[original file name]-it_[iteration number]-split_[split number].xlsx`, like `data-it_1-split_1.xlsx`
- Open your data and label it!
## Run Iteration #1
```
main.Main(project_name, debug_iter_id=1)
```
## Label some more!
## Run Iteration #2
```
main.Main(project_name, debug_iter_id=2)
```
| github_jupyter |
```
# Refer to Random Forest
```
## Bagging
In parallel methods we fit the different considered learners independently from each others and, so, it is possible to train them concurrently. The most famous such approach is “bagging” (standing for “bootstrap aggregating”) that aims at producing an ensemble model that is more robust than the individual models composing it.
### Bootstrapping
Let’s begin by defining bootstrapping. This statistical technique consists in generating samples of size B (called bootstrap samples) from an initial dataset of size N by randomly drawing with replacement B observations.
<p>
<img src ="assets/6.png" height = 800px width = 800px/>
</p>
Under some assumptions, these samples have pretty good statistical properties: in first approximation, they can be seen as being drawn both directly from the true underlying (and often unknown) data distribution and independently from each others. So, they can be considered as representative and independent samples of the true data distribution (almost i.i.d. samples). The hypothesis that have to be verified to make this approximation valid are twofold. First, the size N of the initial dataset should be large enough to capture most of the complexity of the underlying distribution so that sampling from the dataset is a good approximation of sampling from the real distribution (representativity). Second, the size N of the dataset should be large enough compared to the size B of the bootstrap samples so that samples are not too much correlated (independence).
Bootstrap samples are often used, for example, to evaluate variance or confidence intervals of a statistical estimators. By definition, a statistical estimator is a function of some observations and, so, a random variable with variance coming from these observations. In order to estimate the variance of such an estimator, we need to evaluate it on several independent samples drawn from the distribution of interest. In most of the cases, considering truly independent samples would require too much data compared to the amount really available. We can then use bootstrapping to generate several bootstrap samples that can be considered as being “almost-representative” and “almost-independent” (almost i.i.d. samples). These bootstrap samples will allow us to approximate the variance of the estimator, by evaluating its value for each of them.
<p>
<img src ="assets/7.png" height = 800px width = 800px/>
</p>
### Bagging
When training a model, no matter if we are dealing with a classification or a regression problem, we obtain a function that takes an input, returns an output and that is defined with respect to the training dataset. Due to the theoretical variance of the training dataset (we remind that a dataset is an observed sample coming from a true unknown underlying distribution), the fitted model is also subject to variability: if another dataset had been observed, we would have obtained a different model.
**The idea of bagging is then simple: we want to fit several independent models and “average” their predictions in order to obtain a model with a lower variance. However, we can’t, in practice, fit fully independent models because it would require too much data. So, we rely on the good “approximate properties” of bootstrap samples (representativity and independence) to fit models that are almost independent.**
First, we create multiple bootstrap samples so that each new bootstrap sample will act as another (almost) independent dataset drawn from true distribution. Then, we can fit a weak learner for each of these samples and finally aggregate them such that we kind of “average” their outputs and, so, obtain an ensemble model with less variance that its components. Roughly speaking, as the bootstrap samples are approximatively independent and identically distributed (i.i.d.), so are the learned base models. Then, “averaging” weak learners outputs do not change the expected answer but reduce its variance (just like averaging i.i.d. random variables preserve expected value but reduce variance).
So, assuming that we have L bootstrap samples (approximations of L independent datasets) of size B denoted
<p>
<img src ="assets/8.png" height = 800px width = 800px/>
</p>
we can fit L almost independent weak learners (one on each dataset)
<p>
<img src ="assets/9.png" />
</p>
and then aggregate them into some kind of averaging process in order to get an ensemble model with a lower variance. For example, we can define our strong model such that
<p>
<img src ="assets/10.png" height = 800px width = 800px/>
</p>
There are several possible ways to aggregate the multiple models fitted in parallel. For a regression problem, the outputs of individual models can literally be averaged to obtain the output of the ensemble model. For classification problem the class outputted by each model can be seen as a vote and the class that receives the majority of the votes is returned by the ensemble model (this is called hard-voting). Still for a classification problem, we can also consider the probabilities of each classes returned by all the models, average these probabilities and keep the class with the highest average probability (this is called soft-voting). Averages or votes can either be simple or weighted if any relevant weights can be used.
Finally, we can mention that one of the big advantages of bagging is that it can be parallelised. As the different models are fitted independently from each others, intensive parallelisation techniques can be used if required.
<p>
<img src ="assets/11.png" height = 800px width = 800px/>
</p>
### Random forests
Learning trees are very popular base models for ensemble methods. Strong learners composed of multiple trees can be called “forests”. Trees that compose a forest can be chosen to be either shallow (few depths) or deep (lot of depths, if not fully grown). Shallow trees have less variance but higher bias and then will be better choice for sequential methods that we will described thereafter. Deep trees, on the other side, have low bias but high variance and, so, are relevant choices for bagging method that is mainly focused at reducing variance.
The random forest approach is a bagging method where deep trees, fitted on bootstrap samples, are combined to produce an output with lower variance. However, random forests also use another trick to make the multiple fitted trees a bit less correlated with each others: when growing each tree, instead of only sampling over the observations in the dataset to generate a bootstrap sample, we also sample over features and keep only a random subset of them to build the tree.
Sampling over features has indeed the effect that all trees do not look at the exact same information to make their decisions and, so, it reduces the correlation between the different returned outputs. Another advantage of sampling over the features is that it makes the decision making process more robust to missing data: observations (from the training dataset or not) with missing data can still be regressed or classified based on the trees that take into account only features where data are not missing. Thus, random forest algorithm combines the concepts of bagging and random feature subspace selection to create more robust models.
<p>
<img src ="assets/12.png" height = 800px width = 800px/>
</p>
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Using the SavedModel format
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/saved_model"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
A SavedModel contains a complete TensorFlow program, including weights and computation. It does not require the original model building code to run, which makes it useful for sharing or deploying (with [TFLite](https://tensorflow.org/lite), [TensorFlow.js](https://js.tensorflow.org/), [TensorFlow Serving](https://www.tensorflow.org/tfx/serving/tutorials/Serving_REST_simple), or [TensorFlow Hub](https://tensorflow.org/hub)).
This document dives into some of the details of how to use the low-level `tf.saved_model` api:
- If you are using a `tf.keras.Model` the `keras.Model.save(output_path)` method may be all you need: See the [Keras save and serialize](keras/save_and_serialize.ipynb)
- If you just want to save/load weights during training see the [guide to training checkpoints](./checkpoint.ipynb).
## Creating a SavedModel from Keras
For a quick introduction, this section exports a pre-trained Keras model and serves image classification requests with it. The rest of the guide will fill in details and discuss other ways to create SavedModels.
```
import os
import tempfile
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
tmpdir = tempfile.mkdtemp()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
file = tf.keras.utils.get_file(
"grace_hopper.jpg",
"https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg")
img = tf.keras.preprocessing.image.load_img(file, target_size=[224, 224])
plt.imshow(img)
plt.axis('off')
x = tf.keras.preprocessing.image.img_to_array(img)
x = tf.keras.applications.mobilenet.preprocess_input(
x[tf.newaxis,...])
```
You'll use an image of Grace Hopper as a running example, and a Keras pre-trained image classification model since it's easy to use. Custom models work too, and are covered in detail later.
```
labels_path = tf.keras.utils.get_file(
'ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
pretrained_model = tf.keras.applications.MobileNet()
result_before_save = pretrained_model(x)
decoded = imagenet_labels[np.argsort(result_before_save)[0,::-1][:5]+1]
print("Result before saving:\n", decoded)
```
The top prediction for this image is "military uniform".
```
mobilenet_save_path = os.path.join(tmpdir, "mobilenet/1/")
tf.saved_model.save(pretrained_model, mobilenet_save_path)
```
The save-path follows a convention used by TensorFlow Serving where the last path component (`1/` here) is a version number for your model - it allows tools like Tensorflow Serving to reason about the relative freshness.
You can load the SavedModel back into Python with `tf.saved_model.load` and see how Admiral Hopper's image is classified.
```
loaded = tf.saved_model.load(mobilenet_save_path)
print(list(loaded.signatures.keys())) # ["serving_default"]
```
Imported signatures always return dictionaries. To customize signature names and output dictionary keys, see [Specifying signatures during export](#specifying_signatures_during_export).
```
infer = loaded.signatures["serving_default"]
print(infer.structured_outputs)
```
Running inference from the SavedModel gives the same result as the original model.
```
labeling = infer(tf.constant(x))[pretrained_model.output_names[0]]
decoded = imagenet_labels[np.argsort(labeling)[0,::-1][:5]+1]
print("Result after saving and loading:\n", decoded)
```
## Running a SavedModel in TensorFlow Serving
SavedModels are usable from Python (more on that below), but production environments typically use a dedicated service for inference without running Python code. This is easy to set up from a SavedModel using TensorFlow Serving.
See the [TensorFlow Serving REST tutorial](https://www.tensorflow.org/tfx/tutorials/serving/rest_simple) for an end-to-end tensorflow-serving example.
## The SavedModel format on disk
A SavedModel is a directory containing serialized signatures and the state needed to run them, including variable values and vocabularies.
```
!ls {mobilenet_save_path}
```
The `saved_model.pb` file stores the actual TensorFlow program, or model, and a set of named signatures, each identifying a function that accepts tensor inputs and produces tensor outputs.
SavedModels may contain multiple variants of the model (multiple `v1.MetaGraphDefs`, identified with the `--tag_set` flag to `saved_model_cli`), but this is rare. APIs which create multiple variants of a model include [`tf.Estimator.experimental_export_all_saved_models`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#experimental_export_all_saved_models) and in TensorFlow 1.x `tf.saved_model.Builder`.
```
!saved_model_cli show --dir {mobilenet_save_path} --tag_set serve
```
The `variables` directory contains a standard training checkpoint (see the [guide to training checkpoints](./checkpoint.ipynb)).
```
!ls {mobilenet_save_path}/variables
```
The `assets` directory contains files used by the TensorFlow graph, for example text files used to initialize vocabulary tables. It is unused in this example.
SavedModels may have an `assets.extra` directory for any files not used by the TensorFlow graph, for example information for consumers about what to do with the SavedModel. TensorFlow itself does not use this directory.
## Saving a custom model
`tf.saved_model.save` supports saving `tf.Module` objects and its subclasses, like `tf.keras.Layer` and `tf.keras.Model`.
Let's look at an example of saving and restoring a `tf.Module`.
```
class CustomModule(tf.Module):
def __init__(self):
super(CustomModule, self).__init__()
self.v = tf.Variable(1.)
@tf.function
def __call__(self, x):
print('Tracing with', x)
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
module = CustomModule()
```
When you save a `tf.Module`, any `tf.Variable` attributes, `tf.function`-decorated methods, and `tf.Module`s found via recursive traversal are saved. (See the [Checkpoint tutorial](./checkpoint.ipynb) for more about this recursive traversal.) However, any Python attributes, functions, and data are lost. This means that when a `tf.function` is saved, no Python code is saved.
If no Python code is saved, how does SavedModel know how to restore the function?
Briefly, `tf.function` works by tracing the Python code to generate a ConcreteFunction (a callable wrapper around `tf.Graph`). When saving a `tf.function`, you're really saving the `tf.function`'s cache of ConcreteFunctions.
To learn more about the relationship between `tf.function` and ConcreteFunctions, see the [tf.function guide](../../guide/function).
```
module_no_signatures_path = os.path.join(tmpdir, 'module_no_signatures')
module(tf.constant(0.))
print('Saving model...')
tf.saved_model.save(module, module_no_signatures_path)
```
## Loading and using a custom model
When you load a SavedModel in Python, all `tf.Variable` attributes, `tf.function`-decorated methods, and `tf.Module`s are restored in the same object structure as the original saved `tf.Module`.
```
imported = tf.saved_model.load(module_no_signatures_path)
assert imported(tf.constant(3.)).numpy() == 3
imported.mutate(tf.constant(2.))
assert imported(tf.constant(3.)).numpy() == 6
```
Because no Python code is saved, calling a `tf.function` with a new input signature will fail:
```python
imported(tf.constant([3.]))
```
<pre>
ValueError: Could not find matching function to call for canonicalized inputs ((<tf.Tensor 'args_0:0' shape=(1,) dtype=float32>,), {}). Only existing signatures are [((TensorSpec(shape=(), dtype=tf.float32, name=u'x'),), {})].
</pre>
### Basic fine-tuning
Variable objects are available, and you can backprop through imported functions. That is enough to fine-tune (i.e. retrain) a SavedModel in simple cases.
```
optimizer = tf.optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10. - imported(tf.constant(2.))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
print("loss={:.2f} v={:.2f}".format(train_step(), imported.v.numpy()))
```
### General fine-tuning
A SavedModel from Keras provides [more details](https://github.com/tensorflow/community/blob/master/rfcs/20190509-keras-saved-model.md#serialization-details) than a plain `__call__` to address more advanced cases of fine-tuning. TensorFlow Hub recommends to provide the following of those, if applicable, in SavedModels shared for the purpose of fine-tuning:
* If the model uses dropout or another technique in which the forward pass differs between training and inference (like batch normalization), the `__call__` method takes an optional, Python-valued `training=` argument that defaults to `False` but can be set to `True`.
* Next to the `__call__` attribute, there are `.variable` and `.trainable_variable` attributes with the corresponding lists of variables. A variable that was originally trainable but is meant to be frozen during fine-tuning is omitted from `.trainable_variables`.
* For the sake of frameworks like Keras that represent weight regularizers as attributes of layers or sub-models, there can also be a `.regularization_losses` attribute. It holds a list of zero-argument functions whose values are meant for addition to the total loss.
Going back to the initial MobileNet example, you can see some of those in action:
```
loaded = tf.saved_model.load(mobilenet_save_path)
print("MobileNet has {} trainable variables: {}, ...".format(
len(loaded.trainable_variables),
", ".join([v.name for v in loaded.trainable_variables[:5]])))
trainable_variable_ids = {id(v) for v in loaded.trainable_variables}
non_trainable_variables = [v for v in loaded.variables
if id(v) not in trainable_variable_ids]
print("MobileNet also has {} non-trainable variables: {}, ...".format(
len(non_trainable_variables),
", ".join([v.name for v in non_trainable_variables[:3]])))
```
## Specifying signatures during export
Tools like TensorFlow Serving and `saved_model_cli` can interact with SavedModels. To help these tools determine which ConcreteFunctions to use, you need to specify serving signatures. `tf.keras.Model`s automatically specify serving signatures, but you'll have to explicitly declare a serving signature for our custom modules.
By default, no signatures are declared in a custom `tf.Module`.
```
assert len(imported.signatures) == 0
```
To declare a serving signature, specify a ConcreteFunction using the `signatures` kwarg. When specifying a single signature, its signature key will be `'serving_default'`, which is saved as the constant `tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY`.
```
module_with_signature_path = os.path.join(tmpdir, 'module_with_signature')
call = module.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
tf.saved_model.save(module, module_with_signature_path, signatures=call)
imported_with_signatures = tf.saved_model.load(module_with_signature_path)
list(imported_with_signatures.signatures.keys())
```
To export multiple signatures, pass a dictionary of signature keys to ConcreteFunctions. Each signature key corresponds to one ConcreteFunction.
```
module_multiple_signatures_path = os.path.join(tmpdir, 'module_with_multiple_signatures')
signatures = {"serving_default": call,
"array_input": module.__call__.get_concrete_function(tf.TensorSpec([None], tf.float32))}
tf.saved_model.save(module, module_multiple_signatures_path, signatures=signatures)
imported_with_multiple_signatures = tf.saved_model.load(module_multiple_signatures_path)
list(imported_with_multiple_signatures.signatures.keys())
```
By default, the output tensor names are fairly generic, like `output_0`. To control the names of outputs, modify your `tf.function` to return a dictionary that maps output names to outputs. The names of inputs are derived from the Python function arg names.
```
class CustomModuleWithOutputName(tf.Module):
def __init__(self):
super(CustomModuleWithOutputName, self).__init__()
self.v = tf.Variable(1.)
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def __call__(self, x):
return {'custom_output_name': x * self.v}
module_output = CustomModuleWithOutputName()
call_output = module_output.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
module_output_path = os.path.join(tmpdir, 'module_with_output_name')
tf.saved_model.save(module_output, module_output_path,
signatures={'serving_default': call_output})
imported_with_output_name = tf.saved_model.load(module_output_path)
imported_with_output_name.signatures['serving_default'].structured_outputs
```
## Load a SavedModel in C++
The C++ version of the SavedModel [loader](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/saved_model/loader.h) provides an API to load a SavedModel from a path, while allowing SessionOptions and RunOptions. You have to specify the tags associated with the graph to be loaded. The loaded version of SavedModel is referred to as SavedModelBundle and contains the MetaGraphDef and the session within which it is loaded.
```C++
const string export_dir = ...
SavedModelBundle bundle;
...
LoadSavedModel(session_options, run_options, export_dir, {kSavedModelTagTrain},
&bundle);
```
<a id=saved_model_cli/>
## Details of the SavedModel command line interface
You can use the SavedModel Command Line Interface (CLI) to inspect and
execute a SavedModel.
For example, you can use the CLI to inspect the model's `SignatureDef`s.
The CLI enables you to quickly confirm that the input
Tensor dtype and shape match the model. Moreover, if you
want to test your model, you can use the CLI to do a sanity check by
passing in sample inputs in various formats (for example, Python
expressions) and then fetching the output.
### Install the SavedModel CLI
Broadly speaking, you can install TensorFlow in either of the following
two ways:
* By installing a pre-built TensorFlow binary.
* By building TensorFlow from source code.
If you installed TensorFlow through a pre-built TensorFlow binary,
then the SavedModel CLI is already installed on your system
at pathname `bin/saved_model_cli`.
If you built TensorFlow from source code, you must run the following
additional command to build `saved_model_cli`:
```
$ bazel build tensorflow/python/tools:saved_model_cli
```
### Overview of commands
The SavedModel CLI supports the following two commands on a SavedModel:
* `show`, which shows the computations available from a SavedModel.
* `run`, which runs a computation from a SavedModel.
### `show` command
A SavedModel contains one or more model variants (technically, `v1.MetaGraphDef`s), identified by their tag-sets. To serve a model, you might wonder what kind of `SignatureDef`s are in each model variant, and what are their inputs and outputs. The `show` command let you examine the contents of the SavedModel in hierarchical order. Here's the syntax:
```
usage: saved_model_cli show [-h] --dir DIR [--all]
[--tag_set TAG_SET] [--signature_def SIGNATURE_DEF_KEY]
```
For example, the following command shows all available tag-sets in the SavedModel:
```
$ saved_model_cli show --dir /tmp/saved_model_dir
The given SavedModel contains the following tag-sets:
serve
serve, gpu
```
The following command shows all available `SignatureDef` keys for a tag set:
```
$ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve
The given SavedModel `MetaGraphDef` contains `SignatureDefs` with the
following keys:
SignatureDef key: "classify_x2_to_y3"
SignatureDef key: "classify_x_to_y"
SignatureDef key: "regress_x2_to_y3"
SignatureDef key: "regress_x_to_y"
SignatureDef key: "regress_x_to_y2"
SignatureDef key: "serving_default"
```
If there are *multiple* tags in the tag-set, you must specify
all tags, each tag separated by a comma. For example:
<pre>
$ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve,gpu
</pre>
To show all inputs and outputs TensorInfo for a specific `SignatureDef`, pass in
the `SignatureDef` key to `signature_def` option. This is very useful when you
want to know the tensor key value, dtype and shape of the input tensors for
executing the computation graph later. For example:
```
$ saved_model_cli show --dir \
/tmp/saved_model_dir --tag_set serve --signature_def serving_default
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict
```
To show all available information in the SavedModel, use the `--all` option.
For example:
<pre>
$ saved_model_cli show --dir /tmp/saved_model_dir --all
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
...
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict
</pre>
### `run` command
Invoke the `run` command to run a graph computation, passing
inputs and then displaying (and optionally saving) the outputs.
Here's the syntax:
```
usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET --signature_def
SIGNATURE_DEF_KEY [--inputs INPUTS]
[--input_exprs INPUT_EXPRS]
[--input_examples INPUT_EXAMPLES] [--outdir OUTDIR]
[--overwrite] [--tf_debug]
```
The `run` command provides the following three ways to pass inputs to the model:
* `--inputs` option enables you to pass numpy ndarray in files.
* `--input_exprs` option enables you to pass Python expressions.
* `--input_examples` option enables you to pass `tf.train.Example`.
#### `--inputs`
To pass input data in files, specify the `--inputs` option, which takes the
following general format:
```bsh
--inputs <INPUTS>
```
where *INPUTS* is either of the following formats:
* `<input_key>=<filename>`
* `<input_key>=<filename>[<variable_name>]`
You may pass multiple *INPUTS*. If you do pass multiple inputs, use a semicolon
to separate each of the *INPUTS*.
`saved_model_cli` uses `numpy.load` to load the *filename*.
The *filename* may be in any of the following formats:
* `.npy`
* `.npz`
* pickle format
A `.npy` file always contains a numpy ndarray. Therefore, when loading from
a `.npy` file, the content will be directly assigned to the specified input
tensor. If you specify a *variable_name* with that `.npy` file, the
*variable_name* will be ignored and a warning will be issued.
When loading from a `.npz` (zip) file, you may optionally specify a
*variable_name* to identify the variable within the zip file to load for
the input tensor key. If you don't specify a *variable_name*, the SavedModel
CLI will check that only one file is included in the zip file and load it
for the specified input tensor key.
When loading from a pickle file, if no `variable_name` is specified in the
square brackets, whatever that is inside the pickle file will be passed to the
specified input tensor key. Otherwise, the SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the *variable_name* will be used.
#### `--input_exprs`
To pass inputs through Python expressions, specify the `--input_exprs` option.
This can be useful for when you don't have data
files lying around, but still want to sanity check the model with some simple
inputs that match the dtype and shape of the model's `SignatureDef`s.
For example:
```bsh
`<input_key>=[[1],[2],[3]]`
```
In addition to Python expressions, you may also pass numpy functions. For
example:
```bsh
`<input_key>=np.ones((32,32,3))`
```
(Note that the `numpy` module is already available to you as `np`.)
#### `--input_examples`
To pass `tf.train.Example` as inputs, specify the `--input_examples` option.
For each input key, it takes a list of dictionary, where each dictionary is an
instance of `tf.train.Example`. The dictionary keys are the features and the
values are the value lists for each feature.
For example:
```bsh
`<input_key>=[{"age":[22,24],"education":["BS","MS"]}]`
```
#### Save output
By default, the SavedModel CLI writes output to stdout. If a directory is
passed to `--outdir` option, the outputs will be saved as `.npy` files named after
output tensor keys under the given directory.
Use `--overwrite` to overwrite existing output files.
| github_jupyter |
# Merging Tables With Different Join Types
**Take your knowledge of joins to the next level. In this chapter, you’ll work with TMDb movie data as you learn about left, right, and outer joins. You’ll also discover how to merge a table to itself and merge on a DataFrame index.**
```
import pandas as pd
import numpy as np
```
## Counting missing rows with left join
The Movie Database is supported by volunteers going out into the world, collecting data, and entering it into the database. This includes financial data, such as movie budget and revenue. If you wanted to know which movies are still missing data, you could use a left join to identify them. Practice using a left join by merging the `movies` table and the `financials` table.
```
movies = pd.read_pickle('movies.p')
financials = pd.read_pickle('financials.p')
movies.head()
financials.head()
```
- Merge the `movies` table, as the left table, with the `financials` table using a left join, and save the result to `movies_financials`.
- Count the number of rows in `movies_financials` with a null value in the `budget` column.
```
# Merge the movies table with the financials table with a left join
movies_financials = movies.merge(financials, on='id', how='left')
# Count the number of rows in the budget column that are missing
number_of_missing_fin = movies_financials['budget'].isnull().sum()
# Print the number of movies missing financials
print(number_of_missing_fin)
movies_financials.sort_values('budget', ascending=False).head(10)
movies_financials.sort_values('popularity', ascending=False).head(10)
```
## Enriching a dataset
Setting `how='left'` with the `.merge()method` is a useful technique for enriching or enhancing a dataset with additional information from a different table. In this exercise, you will start off with a sample of movie data from the movie series Toy Story. Your goal is to enrich this data by adding the marketing tag line for each movie. You will compare the results of a left join versus an inner join.
The `toy_story` DataFrame contains the *Toy Story* movies.
- Merge `toy_story` and `taglines` on the `id` column with a **left join**, and save the result as `toystory_tag_l`.
- With `toy_story` as the left table, merge to it `taglines` on the `id` column with an **inner join**, and save as `toystory_tag`.
```
toy_story = movies[movies['title'].str.contains('Toy Story')]
toy_story
taglines = pd.read_pickle('taglines.p')
taglines.head()
# Merge the toy_story and taglines tables with a left join
toystory_tag_l = toy_story.merge(taglines, on='id', how='left')
toystory_tag_l
# Print shape of toystory_tag_l
print(toystory_tag_l.shape)
# Merge the toy_story and taglines tables with a inner join
toystory_tag = toy_story.merge(taglines, on='id')
toystory_tag
# Print shape of toystory_tag
print(toystory_tag.shape)
```
## Right join to find unique movies
Most of the recent big-budget science fiction movies can also be classified as action movies. You are given a table of science fiction movies called `scifi_movies` and another table of action movies called `action_movies`. Your goal is to find which movies are considered only science fiction `movies`. Once you have this table, you can merge the movies table in to see the movie names. Since this exercise is related to science fiction movies, use a right join as your superhero power to solve this problem.
```
action_movies = pd.read_pickle('action_movies.p')
scifi_movies = pd.read_pickle('scifi_movies.p')
```
- Merge `action_movies` and `scifi_movies` tables with a right join on `movie_id`. Save the result as `action_scifi`.
- Update the merge to add suffixes, where `'_act'` and `'_sci'` are suffixes for the left and right tables, respectively.
- From `action_scifi`, subset only the rows where the `genre_act` column is null.
- Merge `movies` and `scifi_only` using the `id` column in the left table and the `movie_id` column in the right table with an inner join.
```
# Merge action_movies to scifi_movies with right join
action_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right'
,suffixes=['_act', '_sci'])
# Print the first few rows of action_scifi to see the structure
print(action_scifi.head(10))
# From action_scifi,
# select only the rows where the genre_act column is null
scifi_only = action_scifi[action_scifi['genre_act'].isnull()]
# Merge the movies and scifi_only tables with an inner join
movies_and_scifi_only = movies.merge(scifi_only, left_on='id', right_on='movie_id')
# Print the first few rows
movies_and_scifi_only.head()
```
## Popular genres with right join
What are the genres of the most popular movies? To answer this question, you need to merge data from the `movies` and `movie_to_genres` tables. In a table called `pop_movies`, the top 10 most popular movies in the `movies` table have been selected. To ensure that you are analyzing all of the popular movies, merge it with the `movie_to_genres` table using a right join. To complete your analysis, count the number of different genres. Also, the two tables can be merged by the movie ID. However, in `pop_movies` that column is called `id`, and in `movies_to_genres` it's called `movie_id`.
- Merge `movie_to_genres` and `pop_movies` using a **right join**. Save the results as `genres_movies`.
- Group `genres_movies` by `genre` and count the number of `id` values.
```
pop_movies = movies.sort_values('popularity', ascending=False).head(10)
movie_to_genres = pd.read_pickle('movie_to_genres.p')
# Use right join to merge the movie_to_genres and pop_movies tables
genres_movies = movie_to_genres.merge(pop_movies, how='right'
, right_on='id', left_on='movie_id')
# Count the number of genres
genre_count = genres_movies.groupby('genre').agg({'id':'count'})
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Plot a bar chart of the genre_count
genre_count.plot(kind='bar')
plt.show()
```
## Using outer join to select actors
One cool aspect of using an outer join is that, because it returns all rows from both merged tables and null where they do not match, you can use it to find rows that do not have a match in the other table. To try for yourself, you have been given two tables with a list of actors from two popular movies: *Iron Man 1* and *Iron Man 2*. Most of the actors played in both movies. Use an outer join to find actors who **did not** act in both movies.
The *Iron Man 1* table is called `iron_1_actors`, and *Iron Man 2* table is called `iron_2_actors`.

```
iron_1_actors = pd.read_pickle('iron_1_actors.p')
iron_2_actors = pd.read_pickle('iron_2_actors.p')
```
- Save to `iron_1_and_2` the merge of `iron_1_actors` (left) with `iron_2_actors` tables with an outer join on the id column, and set suffixes to `('_1','_2')`.
- Create an index that returns `True` if `name_1` or `name_2` are *null*, and `False` otherwise.
```
# Merge iron_1_actors to iron_2_actors on id with outer join using suffixes
iron_1_and_2 = iron_1_actors.merge(iron_2_actors, on='id', how='outer', suffixes=['_1', '_2'])
# Create an index that returns true if name_1 or name_2 are null
m = ((iron_1_and_2['name_1'].isnull()) | (iron_1_and_2['name_2'].isnull()))
# Print the first few rows of iron_1_and_2
iron_1_and_2[m].head()
iron_1_and_2[m].sample(8)
```
## Self join
Merging a table to itself can be useful when you want to compare values in a column to other values in the same column. In this exercise, you will practice this by creating a table that for each movie will list the movie director and a member of the crew on one row. You have been given a table called `crews`, which has columns `id`, `job`, and `name`. First, merge the table to itself using the movie ID. This merge will give you a larger table where for each movie, every job is matched against each other. Then select only those rows with a director in the left table, and avoid having a row where the director's job is listed in both the left and right tables. This filtering will remove job combinations that aren't with the director.
```
crews = pd.read_pickle('crews.p')
crews.head()
```
- To a variable called `crews_self_merged`, merge the `crews` table to itself on the `id` column using an inner join, setting the suffixes to `'_dir'` and `'_crew'` for the left and right tables respectively.
- Create a Boolean index, named `boolean_filter`, that selects rows from the left table with the *job* of `'Director'` and avoids rows with the *job* of `'Director'` in the right table.
- Use the `.head()` method to print the first few rows of `direct_crews`.
```
# Merge the crews table to itself
crews_self_merged = crews.merge(crews, on='id', suffixes=['_dir', '_crew'])
# Create a Boolean index to select the appropriate
boolean_filter = ((crews_self_merged['job_dir'] == 'Director')
& (crews_self_merged['job_crew'] != 'Director'))
# Print the first few rows of direct_crews
direct_crews = crews_self_merged[boolean_filter]
direct_crews.head()
direct_crews.sample(5)
```
## How does pandas handle self joins?
Select the **false** statement about merging a table to itself.
1. You can merge a table to itself with a right join.
2. Merging a table to itself can allow you to compare values in a column to other values in the same column.
3. ~The Pandas module limits you to one merge where you merge a table to itself. You cannot repeat this process over and over.~
4. Merging a table to itself is like working with two separate tables.
**Answer: 3**
Pandas treats a merge of a table to itself the same as any other merge. Therefore, it does not limit you from chaining multiple `.merge()` methods together.
## Index merge for movie ratings
To practice merging on indexes, you will merge `movies` and a table called `ratings` that holds info about movie ratings. Make sure your merge returns all of the rows from the `movies` table and not all the rows of `ratings` table need to be included in the result.
```
ratings = pd.read_pickle('ratings.p')
ratings.head()
```
- Merge `movies` and `ratings` on the index and save to a variable called `movies_ratings`, ensuring that all of the rows from the `movies` table are returned.
```
# Merge to the movies table the ratings table on the index
movies_ratings = movies.merge(ratings, on='id')
# Print the first few rows of movies_ratings
movies_ratings.head()
print(movies_ratings.shape)
```
## Do sequels earn more?
It is time to put together many of the aspects that you have learned in this chapter. In this exercise, you'll find out which movie sequels earned the most compared to the original movie. To answer this question, you will merge a modified version of the `sequels` and `financials` tables where their index is the movie ID. You will need to choose a merge type that will return all of the rows from the `sequels` table and not all the rows of `financials` table need to be included in the result. From there, you will join the resulting table to itself so that you can compare the revenue values of the original movie to the sequel. Next, you will calculate the difference between the two revenues and sort the resulting dataset.
```
sequels = pd.read_pickle('sequels.p')
financials = pd.read_pickle('financials.p')
```
- With the `sequels` table on the left, merge to it the `financials` table on index named `id`, ensuring that all the rows from the `sequels` are returned and some rows from the other table may not be returned, Save the results to `sequels_fin.
```
# Merge sequels and financials on index id
sequels_fin = sequels.merge(financials, on='id', how='left')
sequels_fin.head()
```
- Merge the `sequels_fin` table to itself with an inner join, where the left and right tables merge on `sequel` and `id` respectively with suffixes equal to `('_org','_seq')`, saving to `orig_seq`.
```
# Self merge with suffixes as inner join with left on sequel and right on id
orig_seq = sequels_fin.merge(sequels_fin, how='inner',
left_on='sequel', right_on='id',
suffixes=['_org', '_seq'])
# Add calculation to subtract revenue_org from revenue_seq
orig_seq['diff'] = orig_seq['revenue_seq'] - orig_seq['revenue_org']
```
- Select the `title_org`, `title_seq`, and `diff` columns of `orig_seq` and save this as `titles_diff`.
- Sort by `titles_diff` by `diff` in descending order and print the first few rows.
```
# Select the title_org, title_seq, and diff
titles_diff = orig_seq[['title_org', 'title_seq', 'diff']]
# Print the first rows of the sorted titles_diff
titles_diff.sort_values('diff', ascending=False).head()
```
***It is found out that Jurassic World had one of the highest of all, improvement in revenue compared to the original movie.***
| github_jupyter |
```
import bert_model as modeling
import re
import numpy as np
import pandas as pd
import collections
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from sklearn.model_selection import train_test_split
from unidecode import unidecode
from tqdm import tqdm
import time
rules_normalizer = {
'experience': 'pengalaman',
'bagasi': 'bagasi',
'kg': 'kampung',
'kilo': 'kilogram',
'g': 'gram',
'grm': 'gram',
'k': 'okay',
'abgkat': 'abang dekat',
'abis': 'habis',
'ade': 'ada',
'adoi': 'aduh',
'adoii': 'aduhh',
'aerodarat': 'kapal darat',
'agkt': 'angkat',
'ahh': 'ah',
'ailior': 'air liur',
'airasia': 'air asia x',
'airasiax': 'penerbangan',
'airline': 'penerbangan',
'airlines': 'penerbangan',
'airport': 'lapangan terbang',
'airpot': 'lapangan terbang',
'aje': 'sahaja',
'ajelah': 'sahajalah',
'ajer': 'sahaja',
'ak': 'aku',
'aq': 'aku',
'all': 'semua',
'ambik': 'ambil',
'amek': 'ambil',
'amer': 'amir',
'amik': 'ambil',
'ana': 'saya',
'angkt': 'angkat',
'anual': 'tahunan',
'apapun': 'apa pun',
'ape': 'apa',
'arab': 'arab',
'area': 'kawasan',
'aritu': 'hari itu',
'ask': 'tanya',
'astro': 'astro',
'at': 'pada',
'attitude': 'sikap',
'babi': 'khinzir',
'back': 'belakang',
'bag': 'beg',
'bang': 'abang',
'bangla': 'bangladesh',
'banyk': 'banyak',
'bard': 'pujangga',
'bargasi': 'bagasi',
'bawak': 'bawa',
'bawanges': 'bawang',
'be': 'jadi',
'behave': 'berkelakuan baik',
'belagak': 'berlagak',
'berdisiplin': 'berdisplin',
'berenti': 'berhenti',
'beskal': 'basikal',
'bff': 'rakan karib',
'bg': 'bagi',
'bgi': 'bagi',
'biase': 'biasa',
'big': 'besar',
'bike': 'basikal',
'bile': 'bila',
'binawe': 'binatang',
'bini': 'isteri',
'bkn': 'bukan',
'bla': 'bila',
'blom': 'belum',
'bnyak': 'banyak',
'body': 'tubuh',
'bole': 'boleh',
'boss': 'bos',
'bowling': 'boling',
'bpe': 'berapa',
'brand': 'jenama',
'brg': 'barang',
'briefing': 'taklimat',
'brng': 'barang',
'bro': 'abang',
'bru': 'baru',
'bruntung': 'beruntung',
'bsikal': 'basikal',
'btnggjwb': 'bertanggungjawab',
'btul': 'betul',
'buatlh': 'buatlah',
'buh': 'letak',
'buka': 'buka',
'but': 'tetapi',
'bwk': 'bawa',
'by': 'dengan',
'byr': 'bayar',
'bz': 'sibuk',
'camera': 'kamera',
'camni': 'macam ini',
'cane': 'macam mana',
'cant': 'tak boleh',
'carakerja': 'cara kerja',
'care': 'jaga',
'cargo': 'kargo',
'cctv': 'kamera litar tertutup',
'celako': 'celaka',
'cer': 'cerita',
'cheap': 'murah',
'check': 'semak',
'ciput': 'sedikit',
'cite': 'cerita',
'citer': 'cerita',
'ckit': 'sikit',
'ckp': 'cakap',
'class': 'kelas',
'cm': 'macam',
'cmni': 'macam ini',
'cmpak': 'campak',
'committed': 'komited',
'company': 'syarikat',
'complain': 'aduan',
'corn': 'jagung',
'couldnt': 'tak boleh',
'cr': 'cari',
'crew': 'krew',
'cube': 'cuba',
'cuma': 'cuma',
'curinyaa': 'curinya',
'cust': 'pelanggan',
'customer': 'pelanggan',
'd': 'di',
'da': 'dah',
'dn': 'dan',
'dahh': 'dah',
'damaged': 'rosak',
'dapek': 'dapat',
'day': 'hari',
'dazrin': 'dazrin',
'dbalingnya': 'dibalingnya',
'de': 'ada',
'deep': 'dalam',
'deliberately': 'sengaja',
'depa': 'mereka',
'dessa': 'desa',
'dgn': 'dengan',
'dh': 'dah',
'didunia': 'di dunia',
'diorang': 'mereka',
'diorng': 'mereka',
'direct': 'secara terus',
'diving': 'junam',
'dkt': 'dekat',
'dlempar': 'dilempar',
'dlm': 'dalam',
'dlt': 'padam',
'dlu': 'dulu',
'done': 'siap',
'dont': 'jangan',
'dorg': 'mereka',
'dpermudhkn': 'dipermudahkan',
'dpt': 'dapat',
'dr': 'dari',
'dri': 'dari',
'dsb': 'dan sebagainya',
'dy': 'dia',
'educate': 'mendidik',
'ensure': 'memastikan',
'everything': 'semua',
'ewahh': 'wah',
'expect': 'sangka',
'fb': 'facebook',
'fired': 'pecat',
'first': 'pertama',
'fkr': 'fikir',
'flight': 'kapal terbang',
'for': 'untuk',
'free': 'percuma',
'friend': 'kawan',
'fyi': 'untuk pengetahuan anda',
'gantila': 'gantilah',
'gantirugi': 'ganti rugi',
'gentlemen': 'lelaki budiman',
'gerenti': 'jaminan',
'gile': 'gila',
'gk': 'juga',
'gnti': 'ganti',
'go': 'pergi',
'gomen': 'kerajaan',
'goment': 'kerajaan',
'good': 'baik',
'ground': 'tanah',
'guarno': 'macam mana',
'hampa': 'mereka',
'hampeh': 'teruk',
'hanat': 'jahanam',
'handle': 'kawal',
'handling': 'kawalan',
'hanta': 'hantar',
'haritu': 'hari itu',
'hate': 'benci',
'have': 'ada',
'hawau': 'celaka',
'henpon': 'telefon',
'heran': 'hairan',
'him': 'dia',
'his': 'dia',
'hmpa': 'mereka',
'hntr': 'hantar',
'hotak': 'otak',
'hr': 'hari',
'i': 'saya',
'hrga': 'harga',
'hrp': 'harap',
'hu': 'sedih',
'humble': 'merendah diri',
'ibon': 'ikon',
'ichi': 'inci',
'idung': 'hidung',
'if': 'jika',
'ig': 'instagram',
'iklas': 'ikhlas',
'improve': 'menambah baik',
'in': 'masuk',
'isn t': 'tidak',
'isyaallah': 'insyallah',
'ja': 'sahaja',
'japan': 'jepun',
'jd': 'jadi',
'je': 'saja',
'jee': 'saja',
'jek': 'saja',
'jepun': 'jepun',
'jer': 'saja',
'jerr': 'saja',
'jez': 'saja',
'jg': 'juga',
'jgk': 'juga',
'jgn': 'jangan',
'jgnla': 'janganlah',
'jibake': 'celaka',
'jjur': 'jujur',
'job': 'kerja',
'jobscope': 'skop kerja',
'jogja': 'jogjakarta',
'jpam': 'jpam',
'jth': 'jatuh',
'jugak': 'juga',
'ka': 'ke',
'kalo': 'kalau',
'kalu': 'kalau',
'kang': 'nanti',
'kantoi': 'temberang',
'kasi': 'beri',
'kat': 'dekat',
'kbye': 'ok bye',
'kearah': 'ke arah',
'kecik': 'kecil',
'keja': 'kerja',
'keje': 'kerja',
'kejo': 'kerja',
'keksongan': 'kekosongan',
'kemana': 'ke mana',
'kene': 'kena',
'kenekan': 'kenakan',
'kesah': 'kisah',
'ketempat': 'ke tempat',
'kije': 'kerja',
'kijo': 'kerja',
'kiss': 'cium',
'kite': 'kita',
'kito': 'kita',
'kje': 'kerja',
'kjr': 'kerja',
'kk': 'okay',
'kmi': 'kami',
'kt': 'kat',
'tlg': 'tolong',
'kl': 'kuala lumpur',
'klai': 'kalau',
'klau': 'kalau',
'klia': 'klia',
'klo': 'kalau',
'klu': 'kalau',
'kn': 'kan',
'knapa': 'kenapa',
'kne': 'kena',
'ko': 'kau',
'kompom': 'sah',
'korang': 'kamu semua',
'korea': 'korea',
'korg': 'kamu semua',
'kot': 'mungkin',
'krja': 'kerja',
'ksalahan': 'kesalahan',
'kta': 'kita',
'kuar': 'keluar',
'kut': 'mungkin',
'la': 'lah',
'laa': 'lah',
'lahabau': 'celaka',
'lahanat': 'celaka',
'lainda': 'lain dah',
'lak': 'pula',
'last': 'akhir',
'le': 'lah',
'leader': 'ketua',
'leave': 'pergi',
'ler': 'lah',
'less': 'kurang',
'letter': 'surat',
'lg': 'lagi',
'lgi': 'lagi',
'lngsong': 'langsung',
'lol': 'hehe',
'lorr': 'lah',
'low': 'rendah',
'lps': 'lepas',
'luggage': 'bagasi',
'lumbe': 'lumba',
'lyak': 'layak',
'maap': 'maaf',
'maapkan': 'maafkan',
'mahai': 'mahal',
'mampos': 'mampus',
'mart': 'kedai',
'mau': 'mahu',
'mcm': 'macam',
'mcmtu': 'macam itu',
'memerlukn': 'memerlukan',
'mengembirakan': 'menggembirakan',
'mengmbilnyer': 'mengambilnya',
'mengtasi': 'mengatasi',
'mg': 'memang',
'mihak': 'memihak',
'min': 'admin',
'mingu': 'minggu',
'mintak': 'minta',
'mjtuhkn': 'menjatuhkan',
'mkyong': 'mak yong',
'mlibatkn': 'melibatkan',
'mmg': 'memang',
'mmnjang': 'memanjang',
'mmpos': 'mampus',
'mn': 'mana',
'mna': 'mana',
'mntak': 'minta',
'mntk': 'minta',
'mnyusun': 'menyusun',
'mood': 'suasana',
'most': 'paling',
'mr': 'tuan',
'msa': 'masa',
'msia': 'malaysia',
'mst': 'mesti',
'mu': 'awak',
'much': 'banyak',
'muko': 'muka',
'mum': 'emak',
'n': 'dan',
'nah': 'nah',
'nanny': 'nenek',
'napo': 'kenapa',
'nati': 'nanti',
'ngan': 'dengan',
'ngn': 'dengan',
'ni': 'ini',
'nie': 'ini',
'nii': 'ini',
'nk': 'nak',
'nmpk': 'nampak',
'nye': 'nya',
'ofis': 'pejabat',
'ohh': 'oh',
'oii': 'hoi',
'one': 'satu',
'online': 'dalam talian',
'or': 'atau',
'org': 'orang',
'orng': 'orang',
'otek': 'otak',
'p': 'pergi',
'paid': 'dah bayar',
'palabana': 'kepala otak',
'pasni': 'lepas ini',
'passengers': 'penumpang',
'passengger': 'penumpang',
'pastu': 'lepas itu',
'pd': 'pada',
'pegi': 'pergi',
'pekerje': 'pekerja',
'pekrja': 'pekerja',
'perabih': 'perabis',
'perkerja': 'pekerja',
'pg': 'pergi',
'phuii': 'puih',
'pikir': 'fikir',
'pilot': 'juruterbang',
'pk': 'fikir',
'pkerja': 'pekerja',
'pkerjaan': 'pekerjaan',
'pki': 'pakai',
'please': 'tolong',
'pls': 'tolong',
'pn': 'pun',
'pnh': 'pernah',
'pnt': 'penat',
'pnya': 'punya',
'pon': 'pun',
'priority': 'keutamaan',
'properties': 'harta benda',
'ptugas': 'petugas',
'pub': 'kelab malam',
'pulak': 'pula',
'puye': 'punya',
'pwrcuma': 'percuma',
'pyahnya': 'payahnya',
'quality': 'kualiti',
'quit': 'keluar',
'ramly': 'ramly',
'rege': 'harga',
'reger': 'harga',
'report': 'laporan',
'resigned': 'meletakkan jawatan',
'respect': 'hormat',
'rizal': 'rizal',
'rosak': 'rosak',
'rosok': 'rosak',
'rse': 'rasa',
'sacked': 'buang',
'sado': 'tegap',
'salute': 'sanjung',
'sam': 'sama',
'same': 'sama',
'samp': 'sampah',
'sbb': 'sebab',
'sbgai': 'sebagai',
'sblm': 'sebelum',
'sblum': 'sebelum',
'sbnarnya': 'sebenarnya',
'sbum': 'sebelum',
'sdg': 'sedang',
'sebb': 'sebab',
'sebijik': 'sebiji',
'see': 'lihat',
'seen': 'dilihat',
'selangor': 'selangor',
'selfie': 'swafoto',
'sempoi': 'cantik',
'senaraihitam': 'senarai hitam',
'seorg': 'seorang',
'service': 'perkhidmatan',
'sgt': 'sangat',
'shared': 'kongsi',
'shirt': 'kemeja',
'shut': 'tutup',
'sib': 'nasib',
'skali': 'sekali',
'sket': 'sikit',
'sma': 'sama',
'smoga': 'semoga',
'smpoi': 'cantik',
'sndiri': 'sendiri',
'sndr': 'sendiri',
'sndri': 'sendiri',
'sne': 'sana',
'so': 'jadi',
'sop': 'tatacara pengendalian piawai',
'sorang': 'seorang',
'spoting': 'pembintikan',
'sronok': 'seronok',
'ssh': 'susah',
'staff': 'staf',
'standing': 'berdiri',
'start': 'mula',
'steady': 'mantap',
'stiap': 'setiap',
'stress': 'stres',
'student': 'pelajar',
'study': 'belajar',
'studycase': 'kajian kes',
'sure': 'pasti',
'sykt': 'syarikat',
'tah': 'entah',
'taik': 'tahi',
'takan': 'tak akan',
'takat': 'setakat',
'takde': 'tak ada',
'takkan': 'tak akan',
'taknak': 'tak nak',
'tang': 'tentang',
'tanggungjawab': 'bertanggungjawab',
'taraa': 'sementara',
'tau': 'tahu',
'tbabit': 'terbabit',
'team': 'pasukan',
'terbaekk': 'terbaik',
'teruknye': 'teruknya',
'tgk': 'tengok',
'that': 'itu',
'thinking': 'fikir',
'those': 'itu',
'time': 'masa',
'tk': 'tak',
'tnggongjwb': 'tanggungjawab',
'tngok': 'tengok',
'tngu': 'tunggu',
'to': 'kepada',
'tosak': 'rosak',
'tp': 'tapi',
'tpi': 'tapi',
'tpon': 'telefon',
'transfer': 'pindah',
'trgelak': 'tergelak',
'ts': 'tan sri',
'tstony': 'tan sri tony',
'tu': 'itu',
'tuh': 'itu',
'tula': 'itulah',
'umeno': 'umno',
'unfortunately': 'malangnya',
'unhappy': 'tidak gembira',
'up': 'naik',
'upkan': 'naikkan',
'ur': 'awak',
'utk': 'untuk',
'very': 'sangat',
'viral': 'tular',
'vote': 'undi',
'warning': 'amaran',
'warranty': 'waranti',
'wassap': 'whatsapp',
'wat': 'apa',
'weii': 'wei',
'well': 'maklumlah',
'win': 'menang',
'with': 'dengan',
'wt': 'buat',
'x': 'tak',
'tw': 'tahu',
'ye': 'ya',
'yee': 'ya',
'yg': 'yang',
'yng': 'yang',
'you': 'awak',
'your': 'awak',
'sakai': 'selekeh',
'rmb': 'billion ringgit',
'rmj': 'juta ringgit',
'rmk': 'ribu ringgit',
'rm': 'ringgit',
}
permulaan = [
'bel',
'se',
'ter',
'men',
'meng',
'mem',
'memper',
'di',
'pe',
'me',
'ke',
'ber',
'pen',
'per',
]
hujung = ['kan', 'kah', 'lah', 'tah', 'nya', 'an', 'wan', 'wati', 'ita']
def naive_stemmer(word):
assert isinstance(word, str), 'input must be a string'
hujung_result = [e for e in hujung if word.endswith(e)]
if len(hujung_result):
hujung_result = max(hujung_result, key = len)
if len(hujung_result):
word = word[: -len(hujung_result)]
permulaan_result = [e for e in permulaan if word.startswith(e)]
if len(permulaan_result):
permulaan_result = max(permulaan_result, key = len)
if len(permulaan_result):
word = word[len(permulaan_result) :]
return word
def build_dataset(words, n_words):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 3)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def classification_textcleaning(string):
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
string = unidecode(string).replace('.', ' . ').replace(',', ' , ')
string = re.sub('[^A-Za-z ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string.lower()).strip()
string = [rules_normalizer.get(w, w) for w in string.split()]
string = [naive_stemmer(word) for word in string]
return ' '.join([word for word in string if len(word) > 1])
def str_idx(corpus, dic, maxlen, UNK = 3):
X = np.zeros((len(corpus), maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
X[i, -1 - no] = dic.get(k, UNK)
return X
classification_textcleaning('kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya')
df = pd.read_csv('sentiment-data-v2.csv')
Y = LabelEncoder().fit_transform(df.label)
with open('polarity-negative-translated.txt','r') as fopen:
texts = fopen.read().split('\n')
labels = [0] * len(texts)
with open('polarity-positive-translated.txt','r') as fopen:
positive_texts = fopen.read().split('\n')
labels += [1] * len(positive_texts)
texts += positive_texts
texts += df.iloc[:,1].tolist()
labels += Y.tolist()
assert len(labels) == len(texts)
import json
with open('bm-amazon.json') as fopen:
amazon = json.load(fopen)
with open('bm-imdb.json') as fopen:
imdb = json.load(fopen)
with open('bm-yelp.json') as fopen:
yelp = json.load(fopen)
texts += amazon['negative']
labels += [0] * len(amazon['negative'])
texts += amazon['positive']
labels += [1] * len(amazon['positive'])
texts += imdb['negative']
labels += [0] * len(imdb['negative'])
texts += imdb['positive']
labels += [1] * len(imdb['positive'])
texts += yelp['negative']
labels += [0] * len(yelp['negative'])
texts += yelp['positive']
labels += [1] * len(yelp['positive'])
import os
for i in [i for i in os.listdir('negative') if 'Store' not in i]:
with open('negative/'+i) as fopen:
a = json.load(fopen)
texts += a
labels += [0] * len(a)
import os
for i in [i for i in os.listdir('positive') if 'Store' not in i]:
with open('positive/'+i) as fopen:
a = json.load(fopen)
texts += a
labels += [1] * len(a)
for i in range(len(texts)):
texts[i] = classification_textcleaning(texts[i])
concat = ' '.join(texts).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])
size_layer = 256
num_layers = 2
embedded_size = 256
dimension_output = len(np.unique(labels))
learning_rate = 5e-4
maxlen = 80
batch_size = 32
tf.reset_default_graph()
sess = tf.InteractiveSession()
bert_config = modeling.BertConfig(
vocab_size = len(dictionary),
hidden_size = size_layer,
num_hidden_layers = num_layers,
num_attention_heads = size_layer // 4,
intermediate_size = size_layer * 2,
)
input_ids = tf.placeholder(tf.int32, [None, maxlen], name = 'Placeholder_input_ids')
input_mask = tf.placeholder(tf.int32, [None, maxlen], name = 'Placeholder_input_mask')
segment_ids = tf.placeholder(tf.int32, [None, maxlen], name = 'Placeholder_segment_ids')
label_ids = tf.placeholder(tf.int32, [None], name = 'Placeholder_label_ids')
is_training = tf.placeholder(tf.bool, name = 'Placeholder_is_training')
def create_model(
bert_config,
is_training,
input_ids,
input_mask,
segment_ids,
labels,
num_labels,
use_one_hot_embeddings,
reuse_flag = False,
):
model = modeling.BertModel(
config = bert_config,
is_training = is_training,
input_ids = input_ids,
input_mask = input_mask,
token_type_ids = segment_ids,
use_one_hot_embeddings = use_one_hot_embeddings,
)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
with tf.variable_scope('weights', reuse = reuse_flag):
output_weights = tf.get_variable(
'output_weights',
[num_labels, hidden_size],
initializer = tf.truncated_normal_initializer(stddev = 0.02),
)
output_bias = tf.get_variable(
'output_bias', [num_labels], initializer = tf.zeros_initializer()
)
with tf.variable_scope('loss'):
def apply_dropout_last_layer(output_layer):
output_layer = tf.nn.dropout(output_layer, keep_prob = 0.9)
return output_layer
def not_apply_dropout(output_layer):
return output_layer
output_layer = tf.cond(
is_training,
lambda: apply_dropout_last_layer(output_layer),
lambda: not_apply_dropout(output_layer),
)
logits = tf.matmul(output_layer, output_weights, transpose_b = True)
print(
'output_layer:',
output_layer.shape,
', output_weights:',
output_weights.shape,
', logits:',
logits.shape,
)
logits = tf.nn.bias_add(logits, output_bias, name = 'logits')
probabilities = tf.nn.softmax(logits)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = labels, logits = logits
)
loss = tf.reduce_mean(loss)
correct_pred = tf.equal(tf.argmax(logits, 1, output_type = tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return loss, logits, probabilities, model, accuracy
use_one_hot_embeddings = False
loss, logits, probabilities, model, accuracy = create_model(
bert_config,
is_training,
input_ids,
input_mask,
segment_ids,
label_ids,
dimension_output,
use_one_hot_embeddings,
)
global_step = tf.Variable(0, trainable = False, name = 'Global_Step')
optimizer = tf.contrib.layers.optimize_loss(
loss,
global_step = global_step,
learning_rate = learning_rate,
optimizer = 'Adam'
)
sess.run(tf.global_variables_initializer())
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'logits' in n.name
or 'alphas' in n.name)
and 'Adam' not in n.name
and 'beta' not in n.name
and 'OptimizeLoss' not in n.name
and 'Global_Step' not in n.name
]
)
strings.split(',')
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'bert/model.ckpt')
vectors = str_idx(texts, dictionary, maxlen)
train_X, test_X, train_Y, test_Y = train_test_split(
texts, labels, test_size = 0.2
)
from tqdm import tqdm
import time
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n' % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = str_idx(train_X[i : min(i + batch_size, len(train_X))], dictionary, maxlen)
batch_y = train_Y[i : min(i + batch_size, len(train_X))]
np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32)
np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32)
acc, cost, _ = sess.run(
[accuracy, loss, optimizer],
feed_dict = {
input_ids: batch_x,
label_ids: batch_y,
input_mask: np_mask,
segment_ids: np_segment,
is_training: True
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_X), batch_size), desc = 'test minibatch loop')
for i in pbar:
batch_x = str_idx(test_X[i : min(i + batch_size, len(test_X))], dictionary, maxlen)
batch_y = test_Y[i : min(i + batch_size, len(test_X))]
np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32)
np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32)
acc, cost = sess.run(
[accuracy, loss],
feed_dict = {
input_ids: batch_x,
label_ids: batch_y,
input_mask: np_mask,
segment_ids: np_segment,
is_training: False
},
)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
if test_acc > CURRENT_ACC:
print(
'epoch: %d, pass acc: %f, current acc: %f'
% (EPOCH, CURRENT_ACC, test_acc)
)
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
batch_x = str_idx(test_X[i : min(i + batch_size, len(test_X))], dictionary, maxlen)
batch_y = test_Y[i : min(i + batch_size, len(test_X))]
np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32)
np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32)
predict_Y += np.argmax(
sess.run(
logits,
feed_dict = {
input_ids: batch_x,
label_ids: batch_y,
input_mask: np_mask,
segment_ids: np_segment,
is_training: False,
},
),
1,
).tolist()
real_Y += batch_y
from sklearn import metrics
print(metrics.classification_report(real_Y, predict_Y, target_names = ['negative','positive']))
np_mask = np.ones((1, maxlen), dtype = np.int32)
np_segment = np.ones((1, maxlen), dtype = np.int32)
text = classification_textcleaning(
'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
)
new_vector = str_idx([text[0]], dictionary, maxlen)
sess.run(
tf.nn.softmax(logits),
feed_dict = {
input_ids: new_vector,
input_mask: np_mask,
segment_ids: np_segment,
is_training: False,
},
)
np_mask = np.ones((1, maxlen), dtype = np.int32)
np_segment = np.ones((1, maxlen), dtype = np.int32)
text = classification_textcleaning(
'kerajaan sebenarnya sangat sayangkan rakyatnya, tetapi sebenarnya benci'
)
new_vector = str_idx([text[0]], dictionary, maxlen)
sess.run(
tf.nn.softmax(logits),
feed_dict = {
input_ids: new_vector,
input_mask: np_mask,
segment_ids: np_segment,
is_training: False,
},
)
saver.save(sess, 'bert/model.ckpt')
import json
with open('bert-sentiment.json','w') as fopen:
fopen.write(json.dumps({'dictionary':dictionary,'reverse_dictionary':rev_dictionary}))
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('bert', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('bert/frozen_model.pb')
[n.name for n in g.as_graph_def().node]
placeholder_input_ids = g.get_tensor_by_name('import/Placeholder_input_ids:0')
placeholder_input_mask = g.get_tensor_by_name('import/Placeholder_input_mask:0')
placeholder_segment_ids = g.get_tensor_by_name('import/Placeholder_segment_ids:0')
placeholder_is_training = g.get_tensor_by_name('import/Placeholder_is_training:0')
loss_logits = g.get_tensor_by_name('import/loss/logits:0')
test_sess = tf.InteractiveSession(graph = g)
test_sess.run(
tf.nn.softmax(loss_logits),
feed_dict = {
placeholder_input_ids: new_vector,
placeholder_input_mask: np_mask,
placeholder_segment_ids: np_segment,
placeholder_is_training: False,
},
)
```
| github_jupyter |
## Traininig the High Level Feature classifier with TensorFlow/Keras using data in TFRecord format
**4.0 Tensorflow/Keras and Petastorm, HLF classifier** This notebooks trains a dense neural network for the particle classifier using High Level Features. It uses TensorFlow/Keras on a single node. Spark is not used in this case, data is read from TFRecord format.
To run this notebook we used the following configuration:
* *Software stack*: TensorFlow 1.14.0 or 2.0.0_rc0
* *Platform*: CentOS 7, Python 3.6
## Create the Keras model
```
import tensorflow as tf
import numpy as np
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
tf.version.VERSION
# only needed for TensorFlow 1.x
# tf.enable_eager_execution()
```
### Create model
```
def create_model(nh_1, nh_2, nh_3):
## Create model
model = Sequential()
model.add(Dense(nh_1, input_shape=(14,), activation='relu'))
model.add(Dense(nh_2, activation='relu'))
model.add(Dense(nh_3, activation='relu'))
model.add(Dense(3, activation='softmax'))
## Compile model
optimizer = 'Adam'
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"])
return model
keras_model = create_model(50,20,10)
keras_model.summary()
```
## Load data and train the Keras model
```
# test dataset
PATH = "<..PATH..>/testUndersampled_HLF_features.tfrecord"
files_test_dataset = tf.data.Dataset.list_files(PATH+"/part-r*", shuffle=False)
# training dataset
PATH = "<..PATH..>/trainUndersampled_HLF_features.tfrecord"
files_train_dataset = tf.data.Dataset.list_files(PATH+"/part-r*", seed=4242)
# test dataset
PATH = "<..EDIT_PATH..>/testUndersampled_HLF_features.tfrecord"
files_test_dataset = tf.data.Dataset.list_files(PATH+"/part-r*", shuffle=False)
# training dataset
PATH = "<..EDIT_PATH..>/trainUndersampled_HLF_features.tfrecord"
files_train_dataset = tf.data.Dataset.list_files(PATH+"/part-r*", seed=4242)
test_dataset=tf.data.TFRecordDataset(files_test_dataset)
train_dataset=tf.data.TFRecordDataset(files_train_dataset)
# use for debug
# for record in test_dataset.take(1):
# print(record)
# Function to decode TF records into the required features and labels
def decode(serialized_example):
deser_features = tf.io.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'encoded_label': tf.io.FixedLenFeature((3), tf.float32),
'HLF_input': tf.io.FixedLenFeature((14), tf.float32),
})
return(deser_features['HLF_input'], deser_features['encoded_label'])
parsed_test_dataset=test_dataset.map(decode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
parsed_train_dataset=train_dataset.map(decode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Show and example of the parsed data
for record in parsed_test_dataset.take(1):
print(record)
#tunables
shuffle_size = 100000
batch_size = 128
train=parsed_train_dataset.shuffle(shuffle_size)
train=train.cache()
train=train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
train=train.repeat()
train=train.batch(batch_size)
train
steps_per_epoch=3426083//batch_size # there are 3426083 samples in the training dataset
steps_per_epoch
test=parsed_test_dataset.repeat()
test=parsed_test_dataset.batch(batch_size)
validation_steps=856090//batch_size # there are 856090 samples in the test dataset
validation_steps
# train the Keras model
%time history = keras_model.fit(train, steps_per_epoch=steps_per_epoch, \
validation_data=test, validation_steps=validation_steps, \
epochs=5, verbose=1)
```
## Performance metrics
```
%matplotlib notebook
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Graph with loss vs. epoch
plt.figure()
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(loc='upper right')
plt.title("HLF classifier loss")
plt.show()
# Graph with accuracy vs. epoch
%matplotlib notebook
plt.figure()
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='validation')
plt.ylabel('Accuracy')
plt.xlabel('epoch')
plt.legend(loc='lower right')
plt.title("HLF classifier accuracy")
plt.show()
```
## Confusion Matrix
```
y_pred = history.model.predict(test, steps=validation_steps)
num_entries=y_pred[:,0].size
y_test_all = [labels.numpy() for features,labels in parsed_test_dataset.__iter__()]
y_true = np.stack(y_test_all[:num_entries])
from sklearn.metrics import accuracy_score
print('Accuracy of the HLF classifier: {:.4f}'.format(
accuracy_score(np.argmax(y_true, axis=1),np.argmax(y_pred, axis=1))))
import seaborn as sns
from sklearn.metrics import confusion_matrix
labels_name = ['qcd', 'tt', 'wjets']
labels = [0,1,2]
cm = confusion_matrix(np.argmax(y_true, axis=1), np.argmax(y_pred, axis=1), labels=labels)
## Normalize CM
cm = cm / cm.astype(np.float).sum(axis=1)
fig, ax = plt.subplots()
ax = sns.heatmap(cm, annot=True, fmt='g')
ax.xaxis.set_ticklabels(labels_name)
ax.yaxis.set_ticklabels(labels_name)
plt.xlabel('True labels')
plt.ylabel('Predicted labels')
plt.show()
```
## ROC and AUC
```
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Dictionary containign ROC-AUC for the three classes
roc_auc
%matplotlib notebook
# Plot roc curve
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.figure()
plt.plot(fpr[0], tpr[0], lw=2,
label='HLF classifier (AUC) = %0.4f' % roc_auc[0])
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Background Contamination (FPR)')
plt.ylabel('Signal Efficiency (TPR)')
plt.title('$tt$ selector')
plt.legend(loc="lower right")
plt.show()
```
| github_jupyter |
# Ted talks keyword labeling with pre-trained word embeddings
In this notebook, we'll use pre-trained [GloVe word embeddings](http://nlp.stanford.edu/projects/glove/) for keyword labeling using Keras (version $\ge$ 2 is required). This notebook is largely based on the blog post [Using pre-trained word embeddings in a Keras model](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html) by François Chollet.
**Note that using a GPU with this notebook is highly recommended.**
First, the needed imports. Keras tells us which backend (Theano, Tensorflow, CNTK) it will be using.
```
%matplotlib inline
from keras.preprocessing import sequence, text
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D
from keras.layers import LSTM
from keras.utils import to_categorical
from distutils.version import LooseVersion as LV
from keras import __version__
from keras import backend as K
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import xml.etree.ElementTree as ET
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('Using Keras version:', __version__, 'backend:', K.backend())
assert(LV(__version__) >= LV("2.0.0"))
```
## GloVe word embeddings
Let's begin by loading a datafile containing pre-trained word embeddings. The datafile contains 100-dimensional embeddings for 400,000 English words.
```
!wget --content-disposition -nc https://kannu.csc.fi/s/rrCNCRdJf9LZSCE/download
GLOVE_DIR = "/home/jovyan/machine-learning-scripts/courses/sr2018"
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
print('Examples of embeddings:')
for w in ['some', 'random', 'words']:
print(w, embeddings_index[w])
```
## Ted talks data set
Next we'll load the Ted talks data set.
The dataset contains transcripts and metadata of 2085 Ted talks. Each talk is annotated with a set of keywords. In this notebook, we'll use the 10 most common keywords.
```
!wget --content-disposition -nc https://kannu.csc.fi/s/zPtriL3qqnycDFK/download
TEXT_DATA_DIR = "/home/jovyan/machine-learning-scripts/courses/sr2018"
keywords = {"technology": 0, "culture": 1, "science": 2, "global issues": 3, "design": 4,
"business": 5, "entertainment": 6, "arts": 7, "education": 8, "politics": 9}
print('Processing xml')
tree = ET.parse(TEXT_DATA_DIR+"/ted_en-20160408.xml")
root = tree.getroot()
texts = [] # list of text samples
labels = [] # list of label ids
for i in root:
l = np.zeros(10)
for j in i.findall("./head/keywords"):
kws = j.text.split(",")
kws = [x.strip() for x in kws]
for k in kws:
if k in keywords:
l[keywords[k]] = 1.
labels.append(l)
for c in i.findall("./content"):
texts.append(c.text)
print('Found %s texts, %s labels.' % (len(texts), len(labels)))
```
First talk and its labels:
```
print(texts[0])
print('labels:', labels[0])
```
Vectorize the text samples into a 2D integer tensor:
```
MAX_NUM_WORDS = 10000
MAX_SEQUENCE_LENGTH = 1000
tokenizer = text.Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = sequence.pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of labels tensor:', labels.shape)
```
Split the data into a training set and a validation set:
```
VALIDATION_SPLIT = 0.2
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Shape of training data tensor:', x_train.shape)
print('Shape of training label tensor:', y_train.shape)
print('Shape of validation data tensor:', x_val.shape)
print('Shape of validation label tensor:', y_val.shape)
```
Prepare the pre-trained embedding matrix:
```
print('Preparing embedding matrix.')
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_dim = 100
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print('Shape of embedding matrix:', embedding_matrix.shape)
```
## 1-D CNN
### Initialization
```
print('Build model...')
model = Sequential()
model.add(Embedding(num_words,
embedding_dim,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False))
#model.add(Dropout(0.2))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop')
print(model.summary())
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
```
### Learning
```
%%time
epochs = 20
history = model.fit(x_train, y_train, batch_size=16,
epochs=epochs,
validation_data=(x_val, y_val))
plt.figure(figsize=(5,3))
plt.plot(history.epoch,history.history['loss'], label='training')
plt.plot(history.epoch,history.history['val_loss'], label='validation')
plt.title('loss')
plt.legend(loc='best');
```
To further analyze the results, we can produce the actual predictions for the validation data.
```
predictions = model.predict(x_val)
```
Let's look at the correct and predicted labels for some talks in the validation set.
```
threshold = 0.5
nb_talks = 10
inv_keywords = {v: k for k, v in keywords.items()}
for t in range(nb_talks):
print(t,':')
print(' correct: ', end='')
for idx in np.where(y_val[t]>0.5)[0].tolist():
sys.stdout.write('['+inv_keywords[idx]+'] ')
print()
print(' predicted: ', end='')
for idx in np.where(predictions[t]>threshold)[0].tolist():
sys.stdout.write('['+inv_keywords[idx]+'] ')
print()
```
Scikit-learn has some applicable [multilabel ranking metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#multilabel-ranking-metrics) we can try:
```
from sklearn.metrics import coverage_error, label_ranking_average_precision_score
print('Coverage:', coverage_error(y_val, predictions))
print('LRAP:', label_ranking_average_precision_score(y_val, predictions))
```
## LSTM
### Initialization
```
print('Build model...')
model = Sequential()
model.add(Embedding(num_words,
embedding_dim,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False))
#model.add(Dropout(0.2))
model.add(LSTM(128))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop')
print(model.summary())
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
```
### Learning
```
%%time
epochs = 3
history = model.fit(x_train, y_train, batch_size=16,
epochs=epochs,
validation_data=(x_val, y_val))
plt.figure(figsize=(5,3))
plt.plot(history.epoch,history.history['loss'], label='training')
plt.plot(history.epoch,history.history['val_loss'], label='validation')
plt.title('loss')
plt.legend(loc='best');
predictions = model.predict(x_val)
threshold = 0.5
nb_talks = 10
inv_keywords = {v: k for k, v in keywords.items()}
for t in range(nb_talks):
print(t,':')
print(' correct: ', end='')
for idx in np.where(y_val[t]>0.5)[0].tolist():
sys.stdout.write('['+inv_keywords[idx]+'] ')
print()
print(' predicted: ', end='')
for idx in np.where(predictions[t]>threshold)[0].tolist():
sys.stdout.write('['+inv_keywords[idx]+'] ')
print()
from sklearn.metrics import coverage_error, label_ranking_average_precision_score
print('Coverage:', coverage_error(y_val, predictions))
print('LRAP:', label_ranking_average_precision_score(y_val, predictions))
```
| github_jupyter |
# Masses of compact remnant from CO core masses
author: [M. Renzo](mrenzo@flatironinstitute.org)
```
import numpy as np
import sys
import scipy
from scipy.optimize import curve_fit
# optional for prettier plots
sys.path.append('/mnt/home/mrenzo/codes/python_stuff/plotFunc/')
from plotDefaults import set_plot_defaults_from_matplotlibrc
set_plot_defaults_from_matplotlibrc()
```
# Introduction
We want to develop a new mapping between star (and core) mass and compact object remnant for rapid population synthesis calculations.
Our aim is to have one way to calculate this across the entire mass range (from neutron stars to above the pair-instability black hole mass gap).
Moreover, we want the mapping to be continuous. This is not because it is a priori unphysical to have discontinuities, but because we don't want to artificially introduce features.
The idea is to calculate the mass of the compact object remnant as total mass minus varius mass loss terms:
$$ M_\mathrm{remnant} = M_\mathrm{tot} - \left( \Delta M_\mathrm{PPI} + \Delta M_\mathrm{NLW} + \Delta M_\mathrm{SN} + \Delta M_{\nu, \mathrm{core}} + \Delta M_\mathrm{lGRB} + \cdots \right) $$
In this way, pre-explosion binary interactions reduce $M_\mathrm{tot}$ already (and possibly modify the core masses), and then each mass loss process at core-collapse can be added separately.
This can also be extended to add, say, long gamma-ray burst mass loss (as a function of core-spin), etc.
Note that while "building" the compact object mass from the bottom up (e.g., the [Fryer et al. 2012](https://ui.adsabs.harvard.edu/abs/2012ApJ...749...91F/abstract) approach of starting with a proto neutron star
mass and accrete the fallback on it) makes it very difficult to use observationally informed values for some of the terms in parenthesis. Conversely, in our approach of "building" the compact object by removing
from the total mass the ejecta, we can easily use observationally informed quantities for each term here.
If one (or more) of these terms have a stochastic component, this can naturally produce the scatter in compact object masses expected because of the stochasticity in supernova explosions
(e.g., [Mandel & Mueller 2020](https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3214M/abstract)).
In the following, we explain and calculate each mass loss term separately.
## Pulsational-pair instability mass loss $\Delta M_\mathrm{PPI}\equiv M_\mathrm{PPI}(M_\mathrm{CO})$
This term represents the amount of mass lost in pulsational pair-instability SNe. Although the delay times between pulses (and core-collapse) can be very long (especially at the highest mass end),
this is treated as instantaneous mass loss at the time of core-collapse in rapid population synthesis calculations. We do not improve on this here.
Many codes use the fit from [Farmer et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...887...53F/abstract) which however is
discontinuous with [Fyer et al. 2012](https://ui.adsabs.harvard.edu/abs/2012ApJ...749...91F/abstract) typically used for core-collapse SNe.
However, this is not a fit to the amount of mass *lost*, which is what we need here. One is provided in [Renzo et al. 2020](https://ui.adsabs.harvard.edu/abs/2020A%26A...640A..56R/abstract),
but it does not contain the metallicity dependence, which is desirable.
Thus, we re-fit the Z-dependent data from [Farmer et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...887...53F/abstract).
Below, `datafile1.txt` is a cleaned up version of `datafile1.txt` available on [zenodo](https://zenodo.org/record/3346593).
We note that [Farmer et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...887...53F/abstract) simulated only He cores,
and [Renzo et al. 2020](https://ui.adsabs.harvard.edu/abs/2020A%26A...640A..56R/abstract) showed that the H-rich envelope,
if present, is likely to fly away during the first pulse.
Therefore to the amount of mass loss $\Delta M_\mathrm{PPI}$ we fit here one should *add any residual H-rich envelope present in the star at the time of pulsations*.
```
datafile = "datafile1.txt"
src = np.genfromtxt(datafile, skip_header=1)
with open(datafile, 'r') as f:
for i, line in enumerate(f):
if i==0:
col = line.split()
print(col)
break
def linear(x, a, b):
return a*x+b
def fitting_func_Z(data, a, b, c, d):
""" shifted cube plus square term, with the coefficient of the cubic term linear function in log10(Z) """
mco = data[0]
Z = data[1]
return linear(np.log10(Z),a,b)*(mco-c)**3+d*(mco-c)**2
fig=plt.figure(figsize=(12,20))
gs = gridspec.GridSpec(7, 1)
gs.update(wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
ax4 = fig.add_subplot(gs[3])
ax5 = fig.add_subplot(gs[4])
ax6 = fig.add_subplot(gs[5])
ax7 = fig.add_subplot(gs[6])
axes = [ax1,ax2,ax3,ax4,ax5,ax6,ax7]
rainbow = plt.cm.rainbow(np.linspace(0,1,8))
# --------------------------------------------------------------------------------------
# fit happens here!
# reload data
Mco = src[:, col.index("Mco")]
Z = src[:, col.index('Z')]
Mhe = src[:, col.index('Mhe')]
dMpulse = src[:, col.index('dMpulse')]
# fit only in the PPISN range -- neglect the Z dependence of this range
ind_for_fit = (Mco>=38) & (Mco<=60)
popt, pcov = curve_fit(fitting_func_Z, [Mco[ind_for_fit], Z[ind_for_fit]], dMpulse[ind_for_fit])
print(popt)
fit = "$\Delta M_\mathrm{PPI} = ("+f"{popt[0]:.4f}"+r"\log_{10}(Z)+"+f"{popt[1]:.4f})"+r"\times (M_\mathrm{CO}+"+f"{popt[2]:.1f}"+")^3"+f"{popt[3]:.4f}"+r"\times (M_\mathrm{CO}+"+f"{popt[2]:.1f}"+")^2$"
ax1.set_title(fit, fontsize=20)
# --------------------------------------------------------------------------------------
for i, metallicity in enumerate(sorted(np.unique(Z))):
ax = axes[i]
ax.axhline(0, 0,1,lw='1', c='k', ls='--', zorder=0)
# first plot data
x = Mco[Z==metallicity]
y = dMpulse[Z==metallicity]
ax.scatter(x, y, color=rainbow[i], label=r"$Z="+f"{metallicity:.0e}"+"$")
# then plot fit
ind_for_fit = (x>=38) & (x<=60)
x = x[ind_for_fit]
ax.plot(x, fitting_func_Z([x,[metallicity]*len(x)],*popt), c=rainbow[i])
# larger range to show the fit
xx = np.linspace(30,60,1000)
yy = fitting_func_Z([xx,[metallicity]*len(xx)],*popt)
ax.plot(xx, yy, c=rainbow[i], ls="--", lw=8, alpha=0.5, zorder=0)
# ----------
ax.legend(fontsize=20, handletextpad=0.1, frameon=True)
ax.set_ylim(-5,42)
ax.set_xlim(30,75)
if ax != ax7:
ax.set_xticklabels([])
ax4.set_ylabel(r"$\Delta M_\mathrm{PPI} \ [M_\odot]$")
ax7.set_xlabel(r"$M_\mathrm{CO} \ [M_\odot]$")
plt.savefig('fit1.png')
```
### Notes on the PPI mass loss formula
Therefore we recommend the fit above for $38<M_\mathrm{CO} / M_\odot<60$ and $\Delta M_\mathrm{PPI}=M_\mathrm{tot}$ for $60\leq M_\mathrm{CO} / M_\odot< 130$ and 0 above.
If the pre-pulse star has a H-rich envelope, the entirety of the H-rich envelope should be added to $\Delta M_\mathrm{PPI}$ - and then we set $\Delta M_\mathrm{NLW} =0$.
Note that our fit:
- neglects the mild Z-dependence of the edges of the gap (see [Farmer et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...887...53F/abstract))
- neglects the delay between pulses and intra-pulse binary interactions (see [Marchant et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...882...36M/abstract))
- the least massive BHs that can be made post-pulse might not be resolved properly (see [Marchant et al. 2019](https://ui.adsabs.harvard.edu/abs/2019ApJ...882...36M/abstract))
## Neutrino caused envelope losses $\Delta M_{\rm NLW}$
This is the mass loss caused by the [Nadhezin 1980](https://ui.adsabs.harvard.edu/abs/1980Ap%26SS..69..115N/abstract) -
[Lovegrove & Woosley](https://ui.adsabs.harvard.edu/search/p_=0&q=%5Elovegrove%202013%20&sort=date%20desc%2C%20bibcode%20desc) mechanism: the losses of
the neutrinos (see above) change the gravitational potential of the core and cause a shock wave that can
eject loosely bound envelopes. If the envelope is not present (because another mechanism has removed it)
before (e.g., binary interactions of pulsational pair instability), this should be zero
```
def delta_m_nadhezin_lovegrove_woosley(star):
""" See Nadhezin 1980, Lovegrove & Woosley 2013, Fernandez et al. 2018, Ivanov & Fernandez 2021 """
""" this should also be zero post-PPISN """
if star == RSG:
""" if H-rich and large radius """
return star.mtot - star.mhe
else:
return 0
```
## Core-collapse SN mass loss $\Delta M_\mathrm{SN}\equiv\Delta M_\mathrm{SN}(M_\mathrm{CO})$
This is a very uncertain amount of mass loss: the supernova ejecta.
We still use the *delayed* algorithm from [Fryer et al. 2012](https://ui.adsabs.harvard.edu/abs/2012ApJ...749...91F/abstract) though these results should be revisited.
```
def delta_m_SN(star):
""" this is Fryer+12 """
```
## Neutrino core losses $\Delta M_{\nu, \mathrm{core}}\equiv \Delta M_{\nu, \mathrm{core}}(M_\mathrm{remnant})$
When a core collapses it releases about $10^{53}$ ergs of gravitational potential energy to neutrinos.
These leave the core. The neutrino emission is estimated following [Fryer et al. 2012](https://ui.adsabs.harvard.edu/abs/2012ApJ...749...91F/abstract), but
we cap it at $10^{54}\ \mathrm{erg}/c^2\simeq0.5\,M_\odot$.
```
def delta_m_neutrino_core_losses(m_compact_object):
""" the amount of mass lost to neutrinos correspond to the minimum between 0.1 times the compact object and 0.5Msun~10^54 ergs/c^2 """
return min(0.1*m_compact_object, 0.5)
```
# Miscellanea and sanity checks
One should always check that:
$$ M_{\rm remnant} \leq M_{\rm tot} $$
The fallback fraction, for kick-related problems can than be easily calculated as:
$$ f_b = (M_{\rm tot}-M_{\rm remnant})/M_{\rm tot} $$
Moreover, if the PPISN remove the H-rich envelope, than $\Delta M_{\rm NLW}=0$ (there is no envelope to be lost!)
```
# Farmer+19 Eq. 1
def farmer19(mco, Z=0.001):
"""
gets CO core mass in Msun units, returns the value of Eq. 1 from Farmer+19
If a metallicity Z is not given, assume the baseline value of Farmer+19
N.B. this fit is accurate at ~20% level
"""
mco = np.atleast_1d(mco)
# initialize at zero, takes care of PISN
m_remnant = np.zeros(len(mco))
# overwrite low mass
i = mco<38
m_remnant[i] = mco[i]+4
# overwrite PPISN
j = (mco >= 38) & (mco<=60)
# fit coefficients
a1 = -0.096
a2 = 8.564
a3 = -2.07
a4 = -152.97
m_remnant[j] = a1*mco[j]**2+a2*mco[j]+a3*np.log10(Z)+a4
# overwrite the highest most masses -- direct collapse
k = mco >= 130
m_remnant[k] = mco[k]
return m_remnant
# minimum post PPI BH mass
a1 = -0.096
a2 = 8.564
a3 = -2.07
a4 = -152.97
mco = 60
m_remnant = a1*mco**2+a2*mco+a3*np.log10(0.001)+a4
print(m_remnant)
fig=plt.figure()
gs = gridspec.GridSpec(100, 110)
ax = fig.add_subplot(gs[:,:])
mco = np.linspace(25, 250, 2000)
m_bh = farmer19(mco)
ax.scatter(mco, m_bh)
ax.set_xlabel(r"$M_\mathrm{CO} \ [M_\odot]$")
ax.set_ylabel(r"$M_\mathrm{remnant}\ [M_\odot]$")
```
| github_jupyter |
# Úkol č. 2 - předzpracování dat a binární klasifikace (do 10. listopadu)
* Cílem thoto úkolu je vyzkoušet si naučit prediktivní model pro binární klasifikaci.
* Budete se muset vypořádat s příznaky, které jsou různých typů a které bude třeba nějakým způsobem převést do číselné reprezentace.
> **Úkoly jsou zadány tak, aby Vám daly prostor pro invenci. Vymyslet _jak přesně_ budete úkol řešit, je důležitou součástí zadání a originalita či nápaditost bude také hodnocena!**
## Zdroj dat
Budeme se zabývat predikcí přežití pasažérů Titaniku.
K dispozici máte trénovací data v souboru **data.csv** a data na vyhodnocení v souboru **evaluation.csv**.
#### Seznam příznaků:
* survived - zda přežil, 0 = Ne, 1 = Ano, **vysvětlovaná proměnná**, kterou chcete predikovat
* pclass - Třída lodního lístku, 1 = první, 2 = druhá, 3 = třetí
* name - jméno
* sex - pohlaví
* age - věk v letech
* sibsp - počet sourozenců / manželů, manželek na palubě
* parch - počet rodičů / dětí na palubě
* ticket - číslo lodního lístku
* fare - cena lodního lístku
* cabin - číslo kajuty
* embarked - místo nalodění, C = Cherbourg, Q = Queenstown, S = Southampton
* home.dest - Bydliště/Cíl
## Pokyny k vypracování
**Základní body zadání**, za jejichž (poctivé) vypracování získáte **8 bodů**:
* V Jupyter notebooku načtěte data ze souboru **data.csv**. Vhodným způsobem si je rozdělte na trénovací, testovací a případně i validační množinu (preferujeme ale použití cross-validation).
* Projděte si jednotlivé příznaky a transformujte je do vhodné podoby pro použití ve vybraném klasifikačním modelu.
* Podle potřeby si můžete vytvářet nové příznaky (na základě existujících), například tedy můžete vytvořit příznak měřící délku jména. Některé příznaky můžete také úplně zahodit.
* Nějakým způsobem se vypořádejte s chybějícími hodnotami.
* Následně si vyberte vhodný klasifikační model z přednášek. Najděte vhodné hyperparametry a určete jeho přesnost (accuracy) na trénovací množině. Také určete jeho přesnost na testovací/vaidační množině.
* Načtěte vyhodnocovací data ze souboru **evaluation.csv**. Napočítejte predikce pro tyto data (vysvětlovaná proměnná v nich již není). Vytvořte **results.csv** soubor, ve kterém tyto predikce uložíte do dvou sloupců: ID, predikce přežití. Tento soubor nahrajte do repozitáře.
**Další body zadání** za případné další body (můžete si vybrat, maximum bodů za úkol je každopádně 12 bodů):
* (až +4 body) Aplikujte všechny klasifikační modely z přednášek a určete (na základě přesnosti na validační množině), který je nejlepší. Přesnost tohoto nejlepšího modelu odhadněte pomocí testovací množiny. K predikcím na vyhodnocovacích datech využijte tento model.
* (až +4 body) Zkuste použít nějaké (alespoň dvě) netriviální metody doplňování chybějících hodnot u věku. Zaměřte na vliv těchto metod na přesnost predikce výsledného modelu. K predikcím na vyhodnocovacích datech využijte ten přístup, který Vám vyjde jako nejlepší.
## Poznámky k odevzdání
* Řiďte se pokyny ze stránky https://courses.fit.cvut.cz/BI-VZD/homeworks/index.html.
* Odevzdejte nejen Jupyter Notebook, ale i _csv_ soubor(y) s predikcemi pro vyhodnocovací data.
* Opravující Vám může umožnit úkol dodělat či opravit a získat tak další body. **První verze je ale důležitá a bude-li odbytá, budete za to penalizováni**
## Podpůrné funkce
Podpůrné funkce na práci s daty
```
from sklearn.model_selection import cross_val_score
def ConvertNameToVector(name):
titles = ['Sir.','Master.','Mr.','Mrs.','Miss.']
tmp = -1
for i in range(len(titles)):
if titles[i] in name:
tmp = i
return [len(name),tmp]
def ConvertEmbarkedToVector(embarked):
if embarked == 'C':
return [1,0,0]
if embarked == 'Q':
return [0,1,0]
if embarked == 'S':
return [0,0,1]
return [0,0,0]
def ConvertEmbarkedToVector(embarked):
if embarked == 'C':
return [1,0,0]
if embarked == 'Q':
return [0,1,0]
if embarked == 'S':
return [0,0,1]
return [0,0,0]
def ConvertFile(path): #Write missing values as -1
with open(path,'r') as f:
lines = [line.rstrip('\n') for line in f]
X = []
Y = []
for line in lines[1:]:
cnt = 0
tmpArr = line.split(',')
if not 'evaluation' in path:
Y.append(int(tmpArr[1]))
else:
cnt -= 1
Y.append(int(tmpArr[0]))
tmpX = []
pClass = -1 if tmpArr[2+cnt] == '' else (int(tmpArr[2+cnt]))
name = ''
while tmpArr[3+cnt] != 'male' and tmpArr[3+cnt] != 'female' and tmpArr[3+cnt] != '':
name += tmpArr[3+cnt]
cnt += 1
tmpX.extend(ConvertNameToVector(name))
cnt -= 1
maleXFemale = -1 if tmpArr[4+cnt] == '' else (1 if tmpArr[4+cnt] == 'male' else 0)
tmpX.append(pClass)
tmpX.append(maleXFemale)
for i in range(5+cnt,10+cnt):
try:
val = -1 if tmpArr[i] == '' else (float(tmpArr[i]))
except Exception as ex: # cabin with string at the beginning
tmp = tmpArr[i].split(' ')
if len(tmp) == 1:
val = -1
else:
val = float(tmp[-1])
tmpX.append(val)
tmpX.extend(ConvertEmbarkedToVector(tmpArr[11+cnt]))
X.append(tmpX)
return X, Y
```
## Odhad chybějících hodnot věků lidí
Funkce FixMissingAges1 a FixMissingAges2 jsou dvě netriviální metody doplňování chybějících hodnot u věku.
* Funkce FixMissingAges1 určuje chybějící hodnotu věku takovým způsobem, aby měla množina doplněných věku podobnou distribuci věku jako původní již vyplněná množina věků.
* Funkce FixMissingAges2 predikuje chybějící hodnotu věku regresním modelem. Regresní model byl naučený na datech (příznacích) které obsahovali hodnotu věku.
Z těchto dvou funkcí měli klasifikační modeli větši přesnost s použitím funkce FixMissingAges1
```
currentAges = []
# This one achieved higher accuracy
def FixMissingAges1(X, test=False): # Predict missing ages using distribution of current ages
import random
if not test:
for i in range(len(X)):
if X[i][4] != -1:
currentAges.append(X[i][4])
for i in range(len(X)):
if X[i][4] == -1:
X[i][4] = currentAges[random.randrange(0,len(currentAges))]
return X
def FixMissingAges2(X): # Predict missing ages using regression
from sklearn.ensemble import RandomForestRegressor
regressionArr, y = [],[]
regressionTestArr = []
for i in range(len(X)):
if X[i][4] != -1:
tmp = []
for j in range(len(X[i])):
if j!=4:
tmp.append(X[i][j])
regressionArr.append(tmp)
y.append(X[i][4])
for i in range(len(X)):
if X[i][4] == -1:
tmp = []
for j in range(len(X[i])):
if j!=4:
tmp.append(X[i][j])
regressionTestArr.append(tmp)
rfRegressor = RandomForestRegressor(100)
rfRegressor.fit(regressionArr,y)
predictions = rfRegressor.predict(regressionTestArr)
counter = 0
for i in range(len(X)):
if X[i][4] == -1:
X[i][4] = predictions[counter]
counter += 1
return X
```
## Normalizace dat
Funkce myNorm nejprve doplní množinu dat o chybějící a pak normalizuje data. Funkce jsou normalizována tak aby každý příznak měl hodnotu mezi 0 a 1. Toho je docíleno tak, že pro každý příznak je nalezena maximální hodnota v trénovací množině a tímto polem nejvyšších hodnot příznáků je pak dělena celá trénovací množina.
Toto pole nejvyšších hodnot příznáků trénovací množiny (pole "myMaxes") je pak použito na normalizaci testovací množiny
```
myMaxes = []
def myNorm(X, test=False):
FixMissingAges1(X,test)
if not test:
for i in range(len(X[0])):
myMaxes.append(0)
for item in X:
for i in range(len(item)):
if item[i] > myMaxes[i]:
myMaxes[i] = item[i]
for j in range(len(X)):
for i in range(len(X[j])):
X[j][i] /= float(myMaxes[i])
return X
```
## Načtení dat
```
x_train, y_train = ConvertFile('data.csv')
x_train = myNorm(x_train)
x_test, y_test = ConvertFile('evaluation.csv')
x_test = myNorm(x_test, True)
```
### SVM
```
from sklearn import svm
clf = svm.SVC(max_iter=1000,gamma='auto')
print('SVM accuracy:',cross_val_score(clf, x_train, y_train,cv=10).mean())
```
### Extremely randomised trees
```
from sklearn.ensemble import ExtraTreesClassifier
trees = ExtraTreesClassifier(100,class_weight='balanced')
print('Extremely randomised trees accuracy:',cross_val_score(trees, x_train, y_train,cv=10).mean())
```
### Random forest
```
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(100)
print('Random Forest accuracy:',cross_val_score(forest, x_train, y_train,cv=10).mean())
```
### k-nearest neighbors
```
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
print('kNN accuracy:',cross_val_score(knn, x_train, y_train,cv=10).mean())
```
## Ladění hyperparametrů
Jelikož se Random Forest ukázal jako nejpřesnější, budeme hledat které jeho parametry produkují nejlepší klasifikační přesnost
```
# Find optimal hyper parameters for Random Forest
nOfEstimators = [10,50,100,150]
bestIndexEst = 0
max_depth = [4,8,16,32,64]
bestIndexDepth = 0
max_features = ['log2','sqrt']
bestIndexFeat = 0
bestCurrentVal = 0
for i in range(len(nOfEstimators)):
for j in range(len(max_depth)):
for k in range(len(max_features)):
forest = RandomForestClassifier(n_estimators=nOfEstimators[i],max_depth=max_depth[j],max_features=max_features[k])
val = cross_val_score(forest, x_train, y_train,cv=10).mean()
print(str(i*(len(max_depth)*len(max_features)) + j*len(max_features) + k) +'/'+str(len(nOfEstimators)*len(max_depth)*len(max_features))+ ' searching hyperparameters')
if val > bestCurrentVal:
bestCurrentVal = val
bestIndexEst = i
bestIndexDepth = j
bestIndexFeat = k
print('Best cross-validation accuracy:',bestCurrentVal)
print('Best parameters:')
print('Estimators:',nOfEstimators[bestIndexEst])
print('Max depth:',max_depth[bestIndexDepth])
print('Max features:',max_features[bestIndexFeat])
```
## Výsledná predikce
Nyní natrénujeme Random Forest s nalezenými optimálními hyperparametry na celé trénovací množině. Výsledný model použijeme na klasifikaci testovací množiny.
```
forest = RandomForestClassifier(n_estimators=nOfEstimators[bestIndexEst],max_depth=max_depth[bestIndexDepth],max_features=max_features[bestIndexFeat])
forest.fit(x_train, y_train)
predictions = forest.predict(x_test)
with open('results.csv','w') as f:
for i in range(len(predictions)):
line = str(y_test[i]) + ',' + str(predictions[i]) + '\n'
f.write(line)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Blackman9t/Advanced-Data-Science/blob/master/assignment3_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Assignment 3
Welcome to Assignment 3. This will be even more fun. Now we will calculate statistical measures.
## You only have to pass 4 out of 7 functions
Just make sure you hit the play button on each cell from top to down. There are seven functions you have to implement. Please also make sure than on each change on a function you hit the play button again on the corresponding cell to make it available to the rest of this notebook.
All functions can be implemented using DataFrames, ApacheSparkSQL or RDDs. We are only interested in the result. You are given the reference to the data frame in the "df" parameter and in case you want to use SQL just use the "spark" parameter which is a reference to the global SparkSession object. Finally if you want to use RDDs just use "df.rdd" for obtaining a reference to the underlying RDD object. But we discurage using RDD at this point in time.
Let's start with the first function. Please calculate the minimal temperature for the test data set you have created. We've provided a little skeleton for you in case you want to use SQL. Everything can be implemented using SQL only if you like.
```
# Let's point Colaboratory to our Google Drive
from google.colab import drive
drive.mount('/content/gdrive')
```
Now it is time to grab a PARQUET file and create a dataframe out of it. Using SparkSQL you can handle it like a database.
```
!wget https://github.com/IBM/coursera/blob/master/coursera_ds/washing.parquet?raw=true
!mv washing.parquet?raw=true washing.parquet
```
### Install spark-related dependencies in Colab
```
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q http://apache.osuosl.org/spark/spark-2.4.4/spark-2.4.4-bin-hadoop2.7.tgz
!tar xf spark-2.4.4-bin-hadoop2.7.tgz
!pip install -q findspark
!pip install pyspark
# Set up required environment variables
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.4.4-bin-hadoop2.7"
```
### Next, we set up a SparkContext connection
```
from pyspark import SparkConf, SparkContext
try:
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)
print('SparkContext Initialised Successfully!')
except Exception as e:
print(e)
```
**Let's view the connection**
```
sc
```
### Next we set up a Spark Session named spark
```
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('My App').getOrCreate()
spark
```
### Let's now read the washing df into a Spark DataFrame so we can view it
```
df = spark.read.parquet('washing.parquet')
df.createOrReplaceTempView('washing')
df.show()
```
**Let's confirm the washing table is in the spark session tables catalog**
```
spark.catalog.listTables()
def minTemperature(df,spark):
#TODO Please enter your code here, you are not required to use the template code below
#some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
return spark.sql("SELECT min(temperature) as mintemp from washing").first().mintemp
```
Please now do the same for the mean of the temperature
```
def meanTemperature(df,spark):
#TODO Please enter your code here, you are not required to use the template code below
#some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
return spark.sql("SELECT mean(temperature) as meantemp from washing").first().meantemp
```
Please now do the same for the maximum of the temperature
```
def maxTemperature(df,spark):
#TODO Please enter your code here, you are not required to use the template code below
#some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
return spark.sql("SELECT max(temperature) as maxtemp from washing").first().maxtemp
```
Please now do the same for the standard deviation of the temperature
```
def sdTemperature(df,spark):
from pyspark.sql.functions import stddev
#TODO Please enter your code here, you are not required to use the template code below
#some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
#https://spark.apache.org/docs/2.3.0/api/sql/
# Let's fill the NAN values in the temperature column to 0
stats = df.select(stddev(col('temperature')).alias('std')).collect()
std = stats[0]['std']
#return spark.sql("SELECT STDEV([temperature]) as sdtemp from washing").first().sdtemp
return std
```
Please now do the same for the skew of the temperature. Since the SQL statement for this is a bit more complicated we've provided a skeleton for you. You have to insert custom code at four positions in order to make the function work. Alternatively you can also remove everything and implement if on your own. Note that we are making use of two previously defined functions, so please make sure they are correct. Also note that we are making use of python's string formatting capabilitis where the results of the two function calls to "meanTemperature" and "sdTemperature" are inserted at the "%s" symbols in the SQL string.
```
df.count()
def skewTemperature(df,spark):
return spark.sql("""
SELECT
(
1/2058.0
) *
SUM (
POWER(washing.temperature - %s,3)/POWER(%s,3)
)
as sktemperature from washing
""" %(meanTemperature(df,spark),sdTemperature(df,spark))).first().sktemperature
skew_temperature = skewTemperature(df,spark)
print(skew_temperature)
```
Kurtosis is the 4th statistical moment, so if you are smart you can make use of the code for skew which is the 3rd statistical moment. Actually only two things are different.
```
def kurtosisTemperature(df,spark):
return spark.sql("""
SELECT
(
1/2058.0
) *
SUM (
POWER(washing.temperature-%s,4)/POWER(%s,4)
)
as ktemperature from washing
""" %(meanTemperature(df,spark),sdTemperature(df,spark))).first().ktemperature
```
Just a hint. This can be solved easily using SQL as well, but as shown in the lecture also using RDDs.
```
def correlationTemperatureHardness(df,spark):
#TODO Please enter your code here, you are not required to use the template code below
#some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
#https://spark.apache.org/docs/2.3.0/api/sql/
return spark.sql("SELECT corr(temperature,hardness) as temperaturehardness from washing").first().temperaturehardness
```
Now let's test the functions you've implemented
```
min_temperature = minTemperature(df,spark)
print(min_temperature)
mean_temperature = meanTemperature(df,spark)
print(mean_temperature)
max_temperature = maxTemperature(df,spark)
print(max_temperature)
sd_temperature = sdTemperature(df,spark)
print(sd_temperature)
# To check the standard deviation of the temperature column using pandas dataframe
pdf = df.toPandas()
pdf.temperature.fillna(0, inplace=True)
print(pdf.loc[:,"temperature"].std())
skew_temperature = skewTemperature(df,spark)
print(skew_temperature)
kurtosis_temperature = kurtosisTemperature(df,spark)
print(kurtosis_temperature)
correlation_temperature = correlationTemperatureHardness(df,spark)
print(correlation_temperature)
```
Congratulations, you are done, please submit this notebook to the grader.
We have to install a little library in order to submit to coursera first.
Then, please provide your email address and obtain a submission token on the grader’s submission page in coursera, then execute the subsequent cells
### Note: We've changed the grader in this assignment and will do so for the others soon since it gives less errors
This means you can directly submit your solutions from this notebook
```
!rm -f rklib.py
!wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py
from rklib import submitAll
import json
key = "Suy4biHNEeimFQ479R3GjA"
email = 'sisokels@gmail.com'
token = 'PgHloDdn6XIpW8PB'
parts_data = {}
parts_data["FWMEL"] = json.dumps(min_temperature)
parts_data["3n3TK"] = json.dumps(mean_temperature)
parts_data["KD3By"] = json.dumps(max_temperature)
parts_data["06Zie"] = json.dumps(sd_temperature)
parts_data["Qc8bI"] = json.dumps(skew_temperature)
parts_data["LoqQi"] = json.dumps(kurtosis_temperature)
parts_data["ehNGV"] = json.dumps(correlation_temperature)
submitAll(email, token, key, parts_data)
```
| github_jupyter |
# Dogs-vs-cats classification with ViT
In this notebook, we'll finetune a [Vision Transformer](https://arxiv.org/abs/2010.11929) (ViT) to classify images of dogs from images of cats using TensorFlow 2 / Keras and HuggingFace's [Transformers](https://github.com/huggingface/transformers).
**Note that using a GPU with this notebook is highly recommended.**
First, the needed imports.
```
%matplotlib inline
from transformers import ViTFeatureExtractor, TFViTForImageClassification
from transformers.utils import check_min_version
from transformers import __version__ as transformers_version
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from PIL import Image
import os, sys
import pathlib
from natsort import natsorted
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
check_min_version("4.13.0.dev0")
print('Using TensorFlow version:', tf.__version__,
'Keras version:', tf.keras.__version__,
'Transformers version:', transformers_version)
```
## Data
The training dataset consists of 2000 images of dogs and cats, split in half. In addition, the validation set consists of 1000 images, and the test set of 22000 images. Here are some random training images:

```
datapath = "/media/data/dogs-vs-cats/train-2000/"
nimages = {'train':2000, 'validation':1000, 'test':22000}
```
### Image paths and labels
```
def get_paths(dataset):
data_root = pathlib.Path(datapath+dataset)
image_paths = list(data_root.glob('*/*'))
image_paths = [str(path) for path in image_paths]
image_count = len(image_paths)
assert image_count == nimages[dataset], "Found {} images, expected {}".format(image_count, nimages[dataset])
return image_paths
image_paths = dict()
image_paths['train'] = get_paths('train')
image_paths['validation'] = get_paths('validation')
image_paths['test'] = get_paths('test')
label_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/') if item.is_dir())
label_to_index = dict((name, index) for index,name in enumerate(label_names))
def get_labels(dataset):
return [label_to_index[pathlib.Path(path).parent.name]
for path in image_paths[dataset]]
image_labels = dict()
image_labels['train'] = get_labels('train')
image_labels['validation'] = get_labels('validation')
image_labels['test'] = get_labels('test')
```
### Data loading
We now define a function to load the images.
```
def pil_loadimg(path: str):
with open(path, "rb") as f:
im = Image.open(f)
return im.convert("RGB")
def pil_loader(imglist: list):
res = []
for i in imglist:
res.append(pil_loadimg(i))
return res
```
Next we specify the pre-trained ViT model we are going to use. The model [`"google/vit-base-patch16-224"`](https://huggingface.co/google/vit-base-patch16-224) is pre-trained on ImageNet-21k (14 million images, 21,843 classes) at resolution 224x224, and fine-tuned on ImageNet 2012 (1 million images, 1,000 classes) at resolution 224x224.
We'll use a pre-trained ViT feature extractor that matches the ViT model to preprocess the input images.
```
VITMODEL = 'google/vit-base-patch16-224'
feature_extractor = ViTFeatureExtractor.from_pretrained(VITMODEL)
```
We load and preprocess the training and validation images:
```
%%time
inputs_train = feature_extractor(images=pil_loader(image_paths['train']),
return_tensors="tf")
inputs_validation = feature_extractor(images=pil_loader(image_paths['validation']),
return_tensors="tf")
```
### TF Datasets
Let's now define our TF `Dataset`s for training, validation, and test data.
```
BATCH_SIZE = 32
dataset_train = tf.data.Dataset.from_tensor_slices((inputs_train.data, image_labels['train']))
dataset_train = dataset_train.shuffle(len(dataset_train)).batch(BATCH_SIZE,
drop_remainder=True)
dataset_validation = tf.data.Dataset.from_tensor_slices((inputs_validation.data,
image_labels['validation']))
dataset_validation = dataset_validation.batch(BATCH_SIZE, drop_remainder=True)
```
## Model
### Initialization
```
model = TFViTForImageClassification.from_pretrained(VITMODEL, num_labels=1,
ignore_mismatched_sizes=True)
LR = 1e-5
optimizer = tf.keras.optimizers.Adam(learning_rate=LR)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
metric = 'accuracy'
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
print(model.summary())
```
### Learning
```
%%time
EPOCHS = 4
history = model.fit(dataset_train,
validation_data=dataset_validation,
epochs=EPOCHS, verbose=2) #callbacks=callbacks)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,3))
ax1.plot(history.epoch,history.history['loss'], label='training')
ax1.plot(history.epoch,history.history['val_loss'], label='validation')
ax1.set_title('loss')
ax1.set_xlabel('epoch')
ax1.legend(loc='best')
ax2.plot(history.epoch,history.history['accuracy'], label='training')
ax2.plot(history.epoch,history.history['val_accuracy'], label='validation')
ax2.set_title('accuracy')
ax2.set_xlabel('epoch')
ax2.legend(loc='best');
```
### Inference
```
%%time
scores = model.evaluate(dataset_validation, verbose=2)
print("Validation set %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
```
| github_jupyter |
```
from pyspark.ml import Transformer, Estimator, Pipeline
from pyspark.ml.classification import LogisticRegression
from mmlspark.downloader import ModelDownloader
import os, sys, time
model = ModelDownloader(spark, "dbfs:/models/").downloadByName("ResNet50")
# Load the images
# use flowers_and_labels.parquet on larger cluster in order to get better results
imagesWithLabels = spark.read.parquet("wasbs://publicwasb@mmlspark.blob.core.windows.net/flowers_and_labels2.parquet") \
.withColumnRenamed("bytes","image").sample(.1)
imagesWithLabels.printSchema()
```

```
from mmlspark.opencv import ImageTransformer
from mmlspark.image import UnrollImage, ImageFeaturizer
from mmlspark.stages import *
# Make some featurizers
it = ImageTransformer()\
.setOutputCol("scaled")\
.resize(height = 60, width = 60)
ur = UnrollImage()\
.setInputCol("scaled")\
.setOutputCol("features")
dc1 = DropColumns().setCols(["scaled", "image"])
lr1 = LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels")
dc2 = DropColumns().setCols(["features"])
basicModel = Pipeline(stages=[it, ur, dc1, lr1, dc2])
resnet = ImageFeaturizer()\
.setInputCol("image")\
.setOutputCol("features")\
.setModelLocation(model.uri)\
.setLayerNames(model.layerNames)\
.setCutOutputLayers(1)
dc3 = DropColumns().setCols(["image"])
lr2 = LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels")
dc4 = DropColumns().setCols(["features"])
deepModel = Pipeline(stages=[resnet, dc3, lr2, dc4])
```

### How does it work?

### Run the experiment
```
def timedExperiment(model, train, test):
start = time.time()
result = model.fit(train).transform(test).toPandas()
print("Experiment took {}s".format(time.time() - start))
return result
train, test = imagesWithLabels.randomSplit([.8,.2])
train.count(), test.count()
basicResults = timedExperiment(basicModel, train, test)
deepResults = timedExperiment(deepModel, train, test)
```
### Plot confusion matrix.
```
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np
def evaluate(results, name):
y, y_hat = results["labels"],results["prediction"]
y = [int(l) for l in y]
accuracy = np.mean([1. if pred==true else 0. for (pred,true) in zip(y_hat,y)])
cm = confusion_matrix(y, y_hat)
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
plt.text(40, 10,"$Accuracy$ $=$ ${}\%$".format(round(accuracy*100,1)),fontsize=14)
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
plt.xlabel("$Predicted$ $label$", fontsize=18)
plt.ylabel("$True$ $Label$", fontsize=18)
plt.title("$Normalized$ $CM$ $for$ ${}$".format(name))
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
evaluate(deepResults,"CNTKModel + LR")
plt.subplot(1,2,2)
evaluate(basicResults,"LR")
# Note that on the larger dataset the accuracy will bump up from 44% to >90%
display(plt.show())
```
| github_jupyter |
# Demonstration of the Metrics To-Date
For a complete list of metrics and their documentation, please see the API Metrics [documentation](../API/simulation_api.md#metrics-computation).
This demonstration will rely on the results produced in the "How To" notebook.
```
from pprint import pprint
import pandas as pd
from wombat.core import Simulation
from wombat.core.library import load_yaml
pd.set_option("display.float_format", '{:,.2f}'.format)
pd.set_option("display.max_rows", 1000)
pd.set_option("display.max_columns", 1000)
```
## Setup
The simulations from the How To notebook are going to be rerun as it is not recommended to create a Metrics class from scratch due to the
large number of inputs that are required and the initialization is provided in the simulation API's run method.
```
simulation_name = "dinwoodie_base"
sim = Simulation(simulation_name, "DINWOODIE", "base.yaml")
sim.run()
# For convenience only
metrics = sim.metrics
```
## Availability
There are two methods to produce availability, which have their own function calls:
- energy: `production_based_availability`
- time: `time_based_availability`
Here, we will go through the various input definitions to get time-based availability data as both methods use the same inputs, and provide outputs in the same format.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by` options:
- windfarm: computed across all turbines
- turbine: computed for each turbine
```
# Project total at the whole windfarm level
total = metrics.time_based_availability(frequency="project", by="windfarm")
print(f"Project total: {total * 100:.1f}%")
# Project total at the turbine level
metrics.time_based_availability(frequency="project", by="turbine")
# Project annual totals at the windfarm level
metrics.time_based_availability(frequency="annual", by="windfarm")
# Project monthly totals at the windfarm level
metrics.time_based_availability(frequency="monthly", by="windfarm")
# Project month-by-year totals at the windfarm level
# NOTE: This is limited to the first two years for cleanliness of the notebook
metrics.time_based_availability(frequency="month-year", by="windfarm").head(24)
```
## Capacity Factor
Here, we will go through the various input definitions to get capacity factor data. The inputs are very similar to that of the availability calculation.
`which` options:
- net: net capcity factor, actual production
- gross: gross capacity factor, potential production
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by` options:
- windfarm: computed across all turbines
- turbine: computed for each turbine
```
# Project total at the whole windfarm level
cf = metrics.capacity_factor(which="net", frequency="project", by="windfarm")
print(f" Net Capacity Factor: {cf:.2f}%")
cf = metrics.capacity_factor(which="gross", frequency="project", by="windfarm")
print(f"Gross Capacity Factor: {cf:.2f}%")
# Project total at the turbine level
metrics.capacity_factor(which="net", frequency="project", by="turbine")
# Project annual totals at the windfarm level
metrics.capacity_factor(which="net", frequency="annual", by="windfarm")
# Project monthly totals at the windfarm level
metrics.capacity_factor(which="net", frequency="monthly", by="windfarm")
# Project month-by-year totals at the windfarm level
# NOTE: This is limited to the first two years for cleanliness of the notebook
metrics.capacity_factor(which="net", frequency="month-year", by="windfarm").head(24)
```
## Task Completion Rate
Here, we will go through the various input definitions to get the task completion rates. The inputs are very similar to that of the availability calculation.
`which` options:
- scheduled: scheduled maintenance only (classified as maintenace tasks in inputs)
- unscheduled: unscheduled maintenance only (classified as failure events in inputs)
- both:
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
```
# Project total at the whole windfarm level
total = metrics.task_completion_rate(which="scheduled", frequency="project")
print(f" Scheduled Task Completion Rate: {total * 100:.0f}%")
total = metrics.task_completion_rate(which="unscheduled", frequency="project")
print(f"Unscheduled Task Completion Rate: {total * 100:.0f}%")
total = metrics.task_completion_rate(which="both", frequency="project")
print(f" Overall Task Completion Rate: {total * 100:.0f}%")
# Project annual totals at the windfarm level
metrics.task_completion_rate(which="both", frequency="annual")
# Project monthly totals at the windfarm level
metrics.task_completion_rate(which="both", frequency="monthly")
# Project month-by-year totals at the windfarm level
# NOTE: This is limited to the first two years for cleanliness of the notebook
metrics.task_completion_rate(which="both", frequency="month-year").head(24)
```
## Equipment Costs
Here, we will go through the various input definitions to get the equipment cost data.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by_equipment` options:
- `True`: computed across all equipment used
- `False`: computed for each piece of equipment
```
# Project total at the whole windfarm level
total = metrics.equipment_costs(frequency="project", by_equipment=False)
print(f"Project total: ${total / metrics.project_capacity:,.2f}/MW")
# Project totals at the equipment level
metrics.equipment_costs(frequency="project", by_equipment=True)
# Project annual totals at the windfarm level
metrics.equipment_costs(frequency="annual", by_equipment=False)
# Project monthly totals at the equipment level
metrics.equipment_costs(frequency="monthly", by_equipment=True)
# Project month-by-year totals at the equipment level
# NOTE: This is limited to the two years only
metrics.equipment_costs(frequency="month-year", by_equipment=True).head(24)
```
## Service Equipment Utilization Rate
Here, we will go through the various input definitions to get the service equipment utiliztion rates.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
```
# Project totals at the project level
total = metrics.service_equipment_utilization(frequency="project")
total
# Annualized project totals
total = metrics.service_equipment_utilization(frequency="annual")
total
```
## Labor Costs
Here, we will go through the various input definitions to get the labor cost data.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by_type` options:
- `True`: computed across each labor type
- `False`: computed for both labor types used
```
# Project total at the whole windfarm level
total = metrics.labor_costs(frequency="project", by_type=False)
print(f"Project total: ${total / metrics.project_capacity:,.2f}/MW")
# Project totals for each type of labor
# NOTE: Only salaried labor was defined for thesese analyses
metrics.labor_costs(frequency="project", by_type=True)
# Project annual totals for all labor
metrics.labor_costs(frequency="annual", by_type=False)
# Project monthly totals for all labor
metrics.labor_costs(frequency="monthly", by_type=False)
# Project month-by-year totals for all labor
# NOTE: This is limited to the first two years only
metrics.labor_costs(frequency="month-year", by_type=False).head(24)
```
## Equipment and Labor Costs
Here, we will go through the various input definitions to get the equipment and labor cost data broken out by expense categories.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by_category` options:
- `True`: computed across as equipment, plus each labor type, plus totals
- `False`: computed as single total
##### **NOTE:** For this breakdown the expense category (reason) is distributed across the rows in addition to time.
`reason` definitions:
- Maintenance: routine maintenance
- Repair: unscheduled maintenance, ranging from inspections to replacements
- Weather Delay: Any delays caused by unsafe weather conditions
- No Requests: Equipment and labor is active, but there are no repairs or maintenance tasks to be completed
- Not in Shift: Any time outside of the operating hours of the windfarm
```
# Project totals
metrics.equipment_labor_cost_breakdowns(frequency="project", by_category=False)
# Project totals by each category
metrics.equipment_labor_cost_breakdowns(frequency="project", by_category=True)
# Project annual totals
# NOTE: This is limited to the first two years
metrics.equipment_labor_cost_breakdowns(frequency="annual", by_category=False).head(10)
# Project monthly totals
# NOTE: This is limited to the first two years
metrics.equipment_labor_cost_breakdowns(frequency="monthly", by_category=False).head(10)
# Project month-by-year totals
# NOTE: This is limited to the first two years
metrics.equipment_labor_cost_breakdowns(frequency="month-year", by_category=False).head(20)
```
## Component
Here, we will go through the various input definitions to get the component cost data broken out by various categories.
**NOTE**: It should be noted that the the component costs will not sum up to the whole project operations costs because of delays that are not associated with any repair or maintenance task, such as no requests needing to be processed.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by_category` options:
- `True`: computed across each cost category (includes total)
- `False`: computed as single total
`by_action` options:
- `True`: computed by each of "repair", "maintenance", and "delay"
- `False`: computed as single total
##### **NOTE:** For this breakdown the expense category (reason) is distributed across the rows in addition to time.
`action` definitions:
- maintenance: routine maintenance
- repair: unscheduled maintenance, ranging from inspections to replacements
- delay: Any delays caused by unsafe weather conditions or not being able to finish a process within a single shift
```
# Project totals by component
metrics.component_costs(frequency="project", by_category=False, by_action=False)
# Project totals by each category and action type
metrics.component_costs(frequency="project", by_category=True, by_action=True)
# Project annual totals by category
# NOTE: This is limited to the first two years
metrics.component_costs(frequency="annual", by_category=True, by_action=False).head(28)
# Project monthly totals
# NOTE: This is limited to the first two months
metrics.component_costs(frequency="monthly", by_category=True, by_action=False).head(28)
# Project month-by-year totals
# NOTE: This is limited to the first two months
metrics.component_costs(frequency="month-year", by_category=True, by_action=False).head(28)
```
## Fixed Cost Impacts
Here, we will go through the various input definitions to get the fixed cost data
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
`resolution` options:
- high: computed across the lowest itemized cost levels
- medium: computed across overarching cost levels
- low: computed as single total
```
# The resolution hierarchy for fixed costs
pprint(metrics.fixed_costs.hierarchy)
# Project totals at the highest level
metrics.project_fixed_costs(frequency="project", resolution="low")
# Project totals at the medium level
metrics.project_fixed_costs(frequency="project", resolution="medium")
# Project totals at the lowest level
metrics.project_fixed_costs(frequency="project", resolution="high")
# Project annualized totals at the medium level
metrics.project_fixed_costs(frequency="annual", resolution="medium")
```
## Process Times
There are no inputs for the process timing as it is a slow calculation, so aggregation is left to the user for now. The results corresond to the number of hours required to complete any of the repair or maintenance activities.
```
# Project totals at the project level
total = metrics.process_times()
total
```
## Power Production
Here, we will go through the various input definitions to get the power production data.
`frequency` options:
- project: computed across the whole simulation
- annual: computed on a yearly basis
- monthly: computed across years on a monthly basis
- month-year: computed on a month-by-year basis
`by_turbine` options:
- `True`: computed for each turbines
- `False`: computed for the whole windfarm
```
# Project total at the whole windfarm level
total = metrics.power_production(frequency="project", by_turbine=False)
total
# Project totals at the turbine level
metrics.power_production(frequency="project", by_turbine=True)
# Project annual totals for the windfarm
metrics.power_production(frequency="annual", by_turbine=False)
# Project monthly totals for the windfarm
metrics.power_production(frequency="monthly", by_turbine=False)
# Project month-by-year totals for the windfarm
# NOTE: This is limited to the first two years only
metrics.power_production(frequency="month-year", by_turbine=False).head(24)
```
## PySAM-Powered Results
For a number of project financial metrics, the PySAM library is utilized.
<div class="alert alert-block alert-warning">
<b>NOTE:</b> If a "SAM_settings" file is not provided to the simulation, then the following metrics will not be able to be calculated and will raise a `NotImplementedError`.
</div>
With the above warning in mind, the appropriate simulation outputs are provided as inputs to PySAM upon initialization to ensure all values are aligned.
### Net Present Value (NPV)
```
try:
npv = metrics.pysam_npv()
print(f"NPV: ${npv:,.0f}")
except NotImplementedError as e:
print(e)
```
### Real Levelized Cost of Energy (LCOE)
```
try:
lcoe = metrics.pysam_lcoe_real()
print(f"Real LCOE: ${lcoe:,.2f}/kW")
except NotImplementedError as e:
print(e)
```
### Nominal Levelized Cost of Energy (LCOE)
```
try:
lcoe = metrics.pysam_lcoe_nominal()
print(f"Nominal LCOE: ${lcoe:,.2f}/kW")
except NotImplementedError as e:
print(e)
```
### After-tax Internal Return Rate (IRR)
```
try:
npv = metrics.pysam_irr()
print(f"IRR: {npv:,.1f}%")
except NotImplementedError as e:
print(e)
```
### One Data Frame to Rule Them All
For this demonstration we will manually load a PySAM settings file and trigger the setup for demonstration purposes, but it should be noted that this practice should be avoided.
```
SAM_settings = "SAM_Singleowner_defaults.yaml"
metrics.sam_settings = load_yaml(sim.env.data_dir / "windfarm", SAM_settings)
metrics._setup_pysam()
metrics.pysam_all_outputs()
sim.env.cleanup_log_files(log_only=False)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from timeit import default_timer as timer
from datetime import timedelta
from itertools import count
import os
import sys
import pickle
from timeit import default_timer as timer
from datetime import timedelta
from IPython.display import clear_output
import gym
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential, optimizers
import tensorflow_probability as tfp
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# tf.config.list_physical_devices(device_type='GPU')
seed = 1
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed) # 为了禁止hash随机化,使得实验可复现。
tf.random.set_seed(seed)
# tensorflow 如何设置在GPU上能够复现结果还不太清楚怎么弄
# hyperparameter
training_env_seed = 123
lr = 1e-4
gamma = 0.99
```
# Policy
```
class Policy(keras.Model):
def __init__(self, action_dim):
super(Policy, self).__init__()
self.action_dim = action_dim
self.affine1 = layers.Dense(128)
self.dropout = layers.Dropout(rate=0.6)
self.affine2 = layers.Dense(self.action_dim)
def call(self, obs, training=None):
x = tf.convert_to_tensor(obs, dtype=tf.float32)
x = self.affine1(x)
x = self.dropout(x)
x = tf.nn.relu(x)
x = tf.nn.softmax(self.affine2(x))
return x
```
# Agent
```
class Agent(object):
def __init__(self, env_name, policy=Policy):
self.env = gym.make(env_name)
self.action_dim = self.env.action_space.n
self.policy = policy(self.action_dim)
self.optimizer = optimizers.Adam(learning_rate=lr)
self.gamma = gamma
def get_action(self, obs, training=None):
prob = self.policy(np.expand_dims(obs, 0), training=training)
m = tfp.distributions.Categorical(probs=prob)
action = m.sample()
return int(action.numpy()[0])
def train(self, obses, rewards, actions):
R = 0
discounted_returns = []
for r in rewards[::-1]:
R = self.gamma * R + r
discounted_returns.insert(0, R)
for obs, reward_to_go, action in zip(obses, discounted_returns, actions):
with tf.GradientTape() as tape:
p = self.policy(np.expand_dims(obs, 0))
loss = self.compute_loss(p, reward_to_go, action)
grads = tape.gradient(loss, self.policy.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.policy.trainable_variables))
def compute_loss(self, prob, reward_to_go, action):
m = tfp.distributions.Categorical(prob)
log_prob = m.log_prob(action)
loss = reward_to_go * -log_prob
return loss
def eval_(self, env, n_trajs=5):
returns = []
for i in range(n_trajs):
ep_return = 0
obs = env.reset()
for step in range(10000):
action = self.get_action(obs, training=False)
obs, reward, done, _ =env.step(action)
ep_return += reward
if done:
returns.append(ep_return)
break
return np.array(returns).mean()
def render(self, env):
obs = env.reset()
for _ in range(10000):
env.render()
action = self.get_action(obs, training=False)
obs, reward, done, _ = env.step(action)
if done:
break
def save(self, step):
self.policy.save_weights('./reinforce_{}.ckpt'.format(step))
def load(self, path):
if os.path.isfile(path):
self.policy.load_weights(path)
else:
print('No "{}" exits for loading'.format(path))
```
## tensorflow和pytorch在写法上会有不同,如果按照pytorch相同的写法,会导致在计算梯度:tape.gradient(loss, self.policy.trainable_variables)时获取不到相应的梯度
# Trainging Loop
```
env_name = 'CartPole-v0'
env_eval = gym.make(env_name)
agent_reinforce = Agent(env_name, Policy)
start = timer()
running_returns = []
for episode in count(1):
total_reward = 0
obs = agent_reinforce.env.reset()
obses = []
actions = []
rewards = []
done = False
while not done:
action = agent_reinforce.get_action(obs)
obses.append(obs)
actions.append(action)
obs, reward, done, _ = agent_reinforce.env.step(action)
rewards.append(reward)
total_reward += reward
if done:
agent_reinforce.train(obses, rewards, actions)
running_returns.append(total_reward)
print('total reward after {} episodes is {}'.format(episode, total_reward))
if episode % 10 == 0:
clear_output(True)
plt.plot(pd.Series(running_returns).rolling(100, 20).mean())
plt.title('episide:{}, time:{}, returns'.format(episode, timedelta(seconds=int(timer()-start))))
plt.show()
if np.array(running_returns)[-20:].mean() > 195:
eval_return = agent_reinforce.eval_(env_eval, 100)
if eval_return > 195:
print('Solved, the eval return is {}'.format(eval_return))
break
agent_reinforce.policy.save_weights('./reinforce_cartpole.ckpt')
```
| github_jupyter |
# 5.1 Introduction to pandas Data Structures
```
import pandas as pd
import numpy as np
obj = pd.Series([1,2,3])
obj
obj.values
obj.index
obj2 = pd.Series([4, 7, -4, 9], index=['d','b','a','c'])
obj2
obj2.index
obj2['a']
obj2['d']=1
obj2
obj2[['a','b','c']]
obj2[obj2>2]
obj2 *2
np.exp(obj2)
'b' in obj2
'e' in obj2
sdata = {'Ohio':3500, 'Texas':71000,'Oregon':16000, 'Utah':5000}
obj3 = pd.Series(sdata)
obj3
states = ['California','Ohio','Oregon','Texas']
obj4 = pd.Series(sdata, index=states)
obj4
pd.isnull(obj4)
pd.notnull(obj)
obj3
obj4
obj3 + obj4
obj4.name = 'population'
obj4.index.name = 'state'
obj4
obj
# Series index can be altered in-place by assignment
obj.index = ['Bob', 'Steve', 'Jeff']
obj
```
### DataFrame
```
df = pd.DataFrame(obj4)
df
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
frame
# If you specify a sequence of coulumns, the dataframe will be arranged in that order
pd.DataFrame(data, columns= ['year','state', 'pop'])
frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],
index= ['one', 'two', 'three', 'four', 'five', 'six'])
frame2.index.name = 'index_name'
frame2
frame2['state']
frame2.year
frame2.loc['one']
frame2['debt'] = 16.5
frame2
frame2['debt'] = np.arange(6)
frame2
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2
frame2['eastern'] = frame2.state == 'Ohio'
frame2
del frame2['eastern']
frame2.columns
pop = {'Nevada': {2001:2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2002: 3.6}}
frame3 = pd.DataFrame(pop)
frame3
frame3.T
pd.DataFrame(pop, index=['2001', '2002' ,'2003'])
pdata = {'Ohio': frame3['Ohio'][:-1],
'Nevada': frame3['Nevada'][:2]}
pd.DataFrame(pdata)
frame3.index.name='year'
frame3.columns.name = 'state'
frame3
frame3.values
frame2.values
```
### Index Objects
```
obj = pd.Series(range(3), index=['a', 'b', 'c'])
index = obj.index
index
index[1:]
labels = pd.Index(np.arange(3))
labels
obj2 = pd.Series([1.5, -2.5, 0], index=labels)
obj2
obj2.index is labels
frame3
frame3.columns
'Ohio' in frame3.columns
2001 in frame3.index
dup_labels = pd.Index(['foo', 'foo', 'bar', 'bar'])
dup_labels
```
# 5.2 Essential Functionality
### Reindexing
```
obj = pd.Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])
obj2 = obj.reindex(['a', 'b', 'c', 'd', 'e'])
obj2
obj3 = pd.Series(['blue', 'purple', 'yellow'], index=[0,2,4])
obj3
obj3.reindex(range(6), method='ffill')
frame = pd.DataFrame(np.arange(9).reshape((3,3)),
index=['a', 'c', 'd'],
columns=['Ohio', 'Texas', 'California'])
frame
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
frame2
states = ['Texas', 'Ohio', 'California']
frame.reindex(columns=states)
frame.loc[['a', 'c', 'd'], states]
```
### Dropping Entries from an Axis
```
obj = pd.Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])
obj
new_obj = obj.drop('c')
new_obj
obj.drop(['d', 'c'], inplace=True)
obj
# drop with dataframe
data = pd.DataFrame(np.arange(16).reshape((4,4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data
data.drop(['Ohio', 'Colorado']) # does not do 'inplace'
data.drop('two', axis=1)
data.drop(['two', 'four'], axis='columns')
```
### Indexing, Selection and Filtering
```
obj = pd.Series(np.arange(4.), index=['a', 'b', 'c', 'd'])
obj
obj['b']
obj[1]
obj[2:4]
obj[['b', 'a', 'd']]
obj[[1, 3]]
obj[obj<2]
obj['b':'c']
obj['b':'c']=5
obj
data
data['two']
data[['three', 'four']]
data[:2]
data[data['three'] > 5]
data < 5
data[data < 5] = 0
data
```
### Selection with loc and iloc
```
data.loc['Colorado']
data.loc['Colorado', ['two', 'three']]
data.loc['Colorado', :]
data.iloc[2, [3, 0, 1]]
data.iloc[2]
data.iloc[[1, 2], [3, 0, 1]]
data.loc[:'Utah', 'two']
data.iloc[:, :3][data.three > 3]
data.loc['Colorado', 'two']
data.at['Colorado', 'two']
```
### Integer Indexes
```
ser = pd.Series(np.arange(3.))
ser
ser[1]
ser.loc[:1]
ser.iloc[1]
```
### Arithmetic and Data Alignment
```
s1 = pd.Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])
s2 = pd.Series([2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g'])
s1
s2
s1+s2
df1 = pd.DataFrame(np.arange(9.).reshape((3,3)),
columns=list('bcd'),
index=['Ohio', 'Texas', 'Colorado'])
df2 = pd.DataFrame(np.arange(12.).reshape((4, 3)),
columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregeon'])
df1
df2
df1 + df2
```
### Arithmetic methods with fill values
```
df1 = pd.DataFrame(np.arange(12.).reshape((3, 4)),
columns=list('abcd'))
df2 = pd.DataFrame(np.arange(20.).reshape((4, 5)),
columns=list('abcde'))
df1
df2
df2.loc[1, 'b'] = np.nan
df2
df1 + df2
df1.add(df2, fill_value=0)
1/df1
df1.rdiv(1)
df1.reindex(columns=df2.columns, fill_value=0)
```
### Operations between DataFrame and Series
```
arr = np.arange(12.).reshape((3, 4))
arr
arr[0]
arr - arr[0]
frame = pd.DataFrame(np.arange(12.).reshape((4, 3)),
columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregeon'])
series = frame.iloc[0]
frame
series
frame - series
series2 = pd.Series(range(3), index=['b', 'e', 'f'])
frame + series2
series3 = frame['d']
frame
series3
frame.sub(series3, axis='index')
# frame.sub(series3, axis=0)
```
### Function Application and Mapping
```
frame = pd.DataFrame(np.random.randn(4, 3),
columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
frame
np.abs(frame)
f = lambda x: x.max() - x.min()
frame.apply(f)
def f(x):
return pd.Series([x.min(), x.max()],
index=['min', 'max'])
frame.apply(f)
format = lambda x: '%.2f' % x
frame.applymap(format)
frame['e'].map(format)
```
### Sorting and Ranking
```
obj = pd.Series(range(4), index=['d', 'a', 'b', 'c'])
obj.sort_index()
frame = pd.DataFrame(np.arange(8).reshape((2, 4)),
index=['three', 'one'],
columns=['d', 'a', 'b', 'c'])
frame
frame.sort_index()
frame.sort_index(axis=1, ascending=False)
obj = pd.Series([4, 7, -3, 2])
obj.sort_values()
obj = pd.Series([4, np.nan, 7, np.nan, -3, 2])
obj.sort_values()
frame = pd.DataFrame({'b': [4, 7, -3, 2],
'a': [0, 1, 0, 1]})
frame
frame.sort_values(by='b')
frame.sort_values(by=['a', 'b'])
obj = pd.Series([7, -5, 7, 4, 2, 0, 4])
obj.rank() # by deafult rank breaks ties by assigning each group the mean rank
# ranks can also be assigned according to the order in which they-re observed in the data
obj.rank(method='first')
obj.rank(ascending=False, method='max')
frame = pd.DataFrame({'b': [4.3, 7, -3, 2],
'a': [0, 1, 0, 1],
'c': [-2, 5, 8, -2.5]})
frame
frame.rank(axis='columns')
```
### Axing Indexes with Duplicate Labels
```
obj = pd.Series(range(5), index=['a', 'a', 'b', 'b', 'c'])
obj
obj.index.is_unique
obj['a']
obj['c']
df = pd.DataFrame(np.random.randn(4, 3),
index=['a', 'a', 'b', 'b'])
df
df.loc['b']
```
### Sumarizing and Computing Descriptive Statistics
```
df = pd.DataFrame([[1.4, np.nan], [7.1, -4.5],[np.nan, np.nan], [0.75, -1.3]],
index=['a', 'b', 'c', 'd'],
columns=['one', 'two'])
df
df.sum()
df.sum(axis='columns')
df.mean(axis='columns', skipna=False)
df.idxmax()
# accumulations
df.cumsum(axis=0)
df.describe()
obj = pd.Series(['a', 'a', 'b', 'c'] * 4)
obj.describe()
```
### Correlation and Covariance
```
import pandas_datareader.data as web
all_data = {ticker: web.get_data_yahoo(ticker) for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']}
price = pd.DataFrame({ticker: data['Adj Close'] for ticker, data in all_data.items()})
volume = pd.DataFrame({ticker: data['Volume'] for ticker, data in all_data.items()})
returns = price.pct_change()
returns.tail()
returns['MSFT'].corr(returns['IBM'])
returns['MSFT'].cov(returns['IBM'])
returns.MSFT.corr(returns.IBM)
returns.corr()
returns.cov()
returns.corrwith(returns.IBM)
returns.corrwith(volume)
```
### Unique Values, Value Counts, and Membership
```
obj = pd.Series(['c', 'a', 'd', 'a', 'a', 'b','b', 'c', 'c'])
uniques = obj.unique()
uniques
obj.value_counts()
pd.value_counts(obj.values, sort=False)
obj
mask = obj.isin(['b', 'c'])
mask
obj[mask]
to_match = pd.Series(['c', 'a', 'b', 'b', 'c', 'a'])
unique_vals = pd.Series(['c', 'b', 'a'])
pd.Index(unique_vals).get_indexer(to_match)
data = pd.DataFrame({'Qu1': [1, 3, 4, 3, 4],
'Qu2': [2, 3, 1, 2, 3],
'Qu3': [1, 5, 2, 4, 4]})
data
result = data.apply(pd.value_counts)
result
```
| github_jupyter |
<a href="https://colab.research.google.com/github/BreakoutMentors/Data-Science-and-Machine-Learning/blob/main/basics/Python_Intro_and_Object_Oriented_Programming.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Python Introduction with Object Oriented Programming
Here gives you the information you need to understand how to use Python!
# Variables
Variables are containers for storing values in Python, and they are very important in understanding python and programming in general!
```
# Assigning the number 5 to the variable x
print("Assigning the number 5 to the variable x")
x = 5
print('x equals', x)
# Changing x to be assigned to the number 3
print("\nChanging x to be assigned to the number 3")
x = 3
print('x =', x)
# Initializing the variable y to have the number 1 assigned to it
print('\nInitializing the variable y to have the number 1 assigned to it')
y = 1
print('y =', y)
# Assigning the value of x to y
print('\nAssigning the value of x to y')
y = x
print('y =', y)
# Incrementing y by +3
print('\nIncrementing y by +3')
y = y + 3
print('y =', y)
# Checking if x was changed, since we wrote y = x
print('\nChecking if x was changed, since we wrote y = x')
print('x =', x)
print('You can see when we used y = x; x was not changed when we changed y, therefore the value of x is copied and assigned to y')
```
# Strings
Strings is a datatype that holds text in our variables.
## String Usage
```
# String section
# Initalizing variables first_name and last_name; Please enter your own
first_name = 'John' # Replace this with your own name
last_name = "Rivera" # Replace this with your own name
# Adding strings
name = first_name + " " + last_name
print(name)
# Indexing characters in a string
print('First letter of your first name:', first_name[0])
# Looping strings
print('\nLooping through a string a character at a time')
for character in name:
print(character)
```
## Python Built-in String methods
Python has built-in string methods that allow us to do many useful changes to the strings. I will show some popular methods, but [here is a full list](https://www.w3schools.com/python/python_ref_string.asp).
```
# Captilize Method; Capitlizes first letter
lower_case_sentence = "adam is the person writing this code"
upper_case_sentence = lower_case_sentence.capitalize()
print('Capitalize Method:', upper_case_sentence)
# Split method; Splits the string by a character into a list with different values
print('\nName Before Split:', name)
split_name = name.split(' ')
print('Splitting Name with a space:', split_name)
# Join method; Joins items in a list by a character
print('\nJoining items in split_name by a space:', ' '.join(split_name))
# Stripping whitespace of a string; whitespace means empty space values before and after a string
white_space = ' lots of spaces '
print('\nThis is a string with lots of white space:', white_space)
print('\nThis is the same string with white space removed:', white_space.strip())
```
# Integers
Integers is a datatype that holds whole numbers (e.g. 1, 10, 15).
You are able to do mathematical operations with integers.
1. Addition (+)
2. Subtraction (-)
3. Multiplication (*)
4. Division (/)
5. Floor Division (//)
6. Exponents (**)
7. [Modulo](https://www.freecodecamp.org/news/the-python-modulo-operator-what-does-the-symbol-mean-in-python-solved/) (%):
- This gives you the remainder of division, for example `10%3` gives 1 since 3 divides into 10 only 3 times, so the remainder is 1.
```
# Assigning the number 15 to x
x = 15
# Adding 3 to x
print('Adding 3 to x to become 18')
x = x + 3
print('x:', x)
# Subtracting 3 from x
print('\nSubtracting 3 from x to become 15')
x = x - 3
print('x:', x)
# Multipling 2 times x
print('\nMultiplying x by 2 to become 30')
x = 2 * x
print('x:', x)
# Dividing x by 3
print('\nDividing x by 3 to become 10')
x = x / 3
print('x:', x)
print('\nUsing Floor Division to divide x by 3')
print('First using regular division which will give a decimal(x):', x/3)
x = x // 3
print('Floor division rounds the result to the lower whole number')
print('x:', x)
# Squaring x or giving the power of 2 to x
print('\nSquaring x to become 9')
x = x ** 2
print('x:', x)
# Getting the remainder of dividing x by 2
print('\nRemainder of dividing x by 2')
x = x % 2
print('x:', x)
```
# Floats
Floats is a datatype that holds decimal values along with whole numbers (e.g. 1.5, 10.11, -12.33, -5.0)
All of the same mathematical operations can be used on floats.
```
# Assigning the number 12.5 to x
x = 12.5
# Adding 3 to x
print('Adding 3 to x to become 15.5')
x = x + 3
print('x:', x)
# Subtracting 3 from x
print('\nSubtracting 3 from x to become 12.5')
x = x - 3
print('x:', x)
# Multipling 2 times x
print('\nMultiplying x by 2 to become 25')
x = 2 * x
print('x:', x)
# Dividing x by 3
print('\nDividing x by 3 to become 8.333')
x = x / 3
print('x:', x)
print('\nUsing Floor Division to divide x by 3')
print('First using regular division which will give a decimal(x):', x/3)
x = x // 3
print('Floor division rounds the result to the lower whole number')
print('x:', x)
# Squaring x or giving the power of 2 to x
print('\nSquaring x to become 4')
x = x ** 2
print('x:', x)
# Getting the remainder of dividing x by 3
print('\nRemainder of dividing x by 3')
x = x % 3
print('x:', x)
```
# Lists
Lists are a built-in data structure into Python that are able to store data of different datatypes. What is great about lists are that they are expandable, changeable, and ordered.
Below show examples of how to use a list, but there are many other great uses so [here is a resource](https://www.w3schools.com/python/python_lists.asp).
```
# Creating a list named a with items with different datatypes
a = [1, 10, 'book', 1.3, 'chair']
# Selecting individual items
print('You are able to get single items from list using their index')
print('Grabbing the first item:', a[0])
print('Grabbing the third item:', a[2])
print('Grabbing the last item:', a[-1])
print('The index is a number that is given to match the item in the list, it starts from 0 and ends at 4 because there are 5 items in list a')
# Slicing list
print('\nYou are able to select multiple items in the list using slicing')
print('Grabbing the first three items:', a[0:3])
print('Grabbing the last three items:', a[-3:])
print('This works by using a[x(inclusive):y(exclusive)] where item at index x is included and item at index y is not included, so it goes from x to (y-1)')
# Adding items to a list, using the append method
print('\nAdding 101.33 to the list a')
a.append(101.33)
print('a:', a)
# Replacing items by index
print("\nReplacing item at index 4; replacing 'chair' with 'couch'")
print('a[4] before replacement:', a[4])
a[4] = 'couch'
print('a[4] after replacement:', a[4])
print('a after replacement:', a)
# Removing an item in the list using the remove method
print('\nRemoving the number 10 from the list a')
a.remove(10)
print('a:', a)
# Getting length of a
print('\nYou can get the length of list a using the len() function')
print('Length of a:', len(a))
```
# Tuples
Tuples are a built-in data structure into Python that are able to store data of different datatypes. They are very similar to lists that they are ordered but they are NOT changeable, so you cannot add more items and cannot change individual items by index.
Below show examples of how to use a tuple, but there are many other great uses so [here is a resource](https://www.w3schools.com/python/python_tuples.asp).
```
# Creating tuple
tuple_b = ('backpack', 2, 'desk', 'pencil', 10.5)
# Selecting individual items
print('You are able to get single items from tuples using their index')
print('Grabbing the first item:', tuple_b[0])
print('Grabbing the third item:', tuple_b[2])
print('Grabbing the last item:', tuple_b[-1])
print('The index is a number that is given to match the item in the tuple, it starts from 0 and ends at 4 because there are 5 items in tuple_b')
# Slicing list
print('\nYou are able to select multiple items in the tuple using slicing')
print('Grabbing the first three items:', tuple_b[0:3])
print('Grabbing the last three items:', tuple_b[-3:])
print('This works by using a[x(inclusive):y(exclusive)] where item at index x is included and item at index y is not included, so it goes from x to (y-1)')
```
# Sets

Sets are another built-in Python data structure that comes from [mathematical set theory](https://en.wikipedia.org/wiki/Set_theory), which holds objects of different datatypes just like list and tuples but they cannot hold replicates. Sets are used to find objects that overlap between sets (intersection), join objects from both sets (union), or find differences between sets (difference). Sets are both unordered and unindexed, so you cannot access items by an index.
[Click here for more information on sets.](https://www.w3schools.com/python/python_sets.asp)
```
# Creating a set of fruits with repeated fruits
fruits = {"apple", "banana", "banana", "guava"}
print("Fruit Set:", fruits)
print("Banana was placed twice, but you can see it only kept 1 because there cannot be replicates in sets")
# Creating sets with fruits and veggies
fruits_and_veggies = {'apple', 'broccli', 'spanich'}
print("\nFruits and Veggies Set:", fruits_and_veggies)
# Finding the intersection between both sets
print("\nFinding intersection between the two sets")
print("Intersection:", fruits.intersection(fruits_and_veggies))
# Finding the difference between the sets
print("\nFinding difference between the two sets")
print("Difference between 'fruits' set and 'fruits_and_veggies':", fruits.difference(fruits_and_veggies))
print("Difference between 'fruits_and_veggies' set and 'fruits':", fruits_and_veggies.difference(fruits))
# Adding more items to a set
fruits.add('starfruit')
print("\nAdded starfruit to 'fruits' set:", fruits)
# Removing items in a set
fruits_and_veggies.remove('spanich')
print("Removed spanich from 'fruits_and_veggies'", fruits_and_veggies)
```
# Dictionaries
Dictionaries are a built-in data structure in Python that allows you to store values in key:value pairs. The reason why dictionaries are really useful is that you can use keys to access items in a dictionary rather than an index in lists.
[Click here for more information on dictionaries.](https://www.w3schools.com/python/python_dictionaries.asp)
```
# Creating a dictionary with information about me
adam_dict = {'first_name':'Adam',
'last_name':'Villarreal',
'fav_ice_cream':'Mint Chocolate Chip'}
print('The keys are what go left of the colon(:) and the values are to the right of the colon')
# Accessing items
print('\nYou can access values by their keys')
print("Getting my first name with 'first_name' key:", adam_dict['first_name'])
# Changing items
print('\nYou can also change values by reassignment')
new_fav_flavor = "Rocky Road"
adam_dict['fav_ice_cream'] = new_fav_flavor # Reassinging my new value
print('New Flavor:', adam_dict['fav_ice_cream'])
# Deleting items
print('\nYou can delete entire key:item pairs (items) using .pop() method')
adam_dict.pop('fav_ice_cream')
print(adam_dict)
# Adding items
print('\nYou can add items by assignment or using the .update() method')
adam_dict['fav_sport'] = 'Soccer'
adam_dict.update({'fav_coffee_drink':'Latte'})
print(adam_dict)
```
# Looping over Lists, Sets, and Dictionaries
## Looping Lists
There are two ways to loop over lists:
1. Loop by element
2. Loop by index
```
list_a = [1, 10, 'book', 1.3, 'chair']
# Looping by element
print('Looping by element:')
for elem in list_a:
print(elem)
# Looping by index
print("\nLooping by index:")
for i in range(len(list_a)):
print(list_a[i])
print("range(len(list_a)) gives you a 'range' object that has ordered numbers from 0 to (length of list_a-1)")
print("If range(len(list_a)) is converted to a list, it looks like this:", list(range(len(list_a))))
```
## Looping Tuples
Looping tuples is exactly the same as looping lists.
```
tuple_b = ('backpack', 2, 'desk', 'pencil', 10.5)
# Looping by element
print('Looping by element:')
for elem in tuple_b:
print(elem)
# Looping by index
print("\nLooping by index:")
for i in range(len(tuple_b)):
print(tuple_b[i])
```
## Looping Sets
You can only loop through sets by element since sets are naturally unindexed, so you cannot access items by indices.
```
fruits = {"apple", "banana", "banana", "guava"}
for fruit in fruits:
print(fruit)
```
## Looping Dictionaries
To loop dictionaries, there are three ways:
1. Loop by keys
2. Loop by values
3. Loop by key:value item pairs
```
adam_dict = {'first_name':'Adam',
'last_name':'Villarreal',
'fav_ice_cream':'Mint Chocolate Chip'}
# Looping by keys
print('Looping by keys')
for key in adam_dict:
print('key:', key)
print('value:', adam_dict[key])
# Looping by Values
print('\nLooping by values')
for value in adam_dict.values():
print("Value:", value)
# Looping by items
print('\nLooping by items')
for key, value in adam_dict.items():
print('key:', key)
print('value:', value)
```
# Python is an Object Oriented Programming (OOP) Language
I highly recommend you to read this introduction of [Object Oriented Programming](https://www.educative.io/blog/object-oriented-programming).
At a high level, OOP Programming is a programming style where you are able to create data types that are abstract and very malleable. `Classes` are what allows us to do this, classes are essentially blueprints of the datatype you want to create, and you create `objects` from the `class`!
## Creating Classes
There are two parts of classes:
1. Constructor - This creates the object of the class, it also uses parameters!
- You save data from the parameters in the constructor by defining the class's attributes.
2. Methods - These are functions you create that uses the class's attributes.
```
# Creating a class called Car
class Car():
# Defining Constructor
def __init__(self, num_wheels, color, owners_name):
# Defining attributes
self.wheels = num_wheels
self.color = color
self.name = owners_name
# Defining Methods
# These next three are called accessor methods that access the class's attributes
def get_owners_name(self):
return self.name
def get_color(self):
return self.color
def get_num_wheels(self):
return self.wheels
def drive(self, num_miles):
print(f"{self.name}'s car drives {num_miles} miles.")
# Creating Object
my_car = Car(num_wheels=4, color='Blue', owners_name='Adam')
# Printing attributes
print('Wheels:', my_car.wheels)
print('Color:', my_car.color)
print('Name:', my_car.name)
# Using accessor methods
print('\nNow using accessor methods:')
print('Wheels:', my_car.get_num_wheels())
print('Color:', my_car.get_color())
print('Name:', my_car.get_owners_name())
# Using 'drive' method
miles = 5
my_car.drive(miles)
```
## Inheritance
Inheritance allows a class that uses the same attributes and methods of another class, usually the class that inherits is called the `child class` or `subclass` while the class that is being inherited from is called the `parent class`.
We will be creating a Pickup Truck class, that will inherent from the car class, since a Pickup Truck also has a set number of wheels, a certain color, and a specific owner, and a Pickup Truck also drives! But only Pickup Trucks have a trunk bed, so its just not a normal car.
When defining the contstructor of the subclass, you need to call the constructor of the parent class as well.
[Please read more about inheritance here](https://www.w3schools.com/python/python_inheritance.asp).
```
# Creating Pickup Truck class by inheriting from the Car class
class Pickup_Truck(Car):
# Defining Constructor
def __init__(self, color, owners_name):
# Calling Constructor of the parent class with specific parameters
super().__init__(num_wheels=4, color=color, owners_name=owners_name)
# Defining attributes only specific to pickup trucks
self.trunk_bed = True
self.items_in_trunk = []
# Defining methods specific to Pickup Trucks
def add_item_to_trunk(self, item):
# Appending item to items_in_truck list
self.items_in_trunk.append(item)
print('Added {} to the trunk'.format(item))
def list_items_in_trunk(self):
if len(self.items_in_trunk) == 0:
print('Trunk is Empty')
else:
print('This is what is in the trunk:')
for item in self.items_in_trunk:
print(item)
# Overwritting drive method
def drive(self, num_miles):
print(f"{self.name}'s truck drives {num_miles} miles.")
# Creating Object
my_truck = Pickup_Truck(color='White', owners_name='Adam')
# Printing attributes
print('Wheels:', my_truck.wheels)
print('Color:', my_truck.color)
print('Name:', my_truck.name)
print('Has Truck Bed:', my_truck.trunk_bed)
# Using accessor methods
print('\nNow using accessor methods:')
print('Wheels:', my_truck.get_num_wheels())
print('Color:', my_truck.get_color())
print('Name:', my_truck.get_owners_name())
# Using 'drive' method
miles = 8
my_truck.drive(miles)
print('\nNow adding items to list!')
my_truck.list_items_in_trunk()
my_truck.add_item_to_trunk('Shovel')
my_truck.add_item_to_trunk('Lawn Mower')
my_truck.list_items_in_trunk()
```
| github_jupyter |
# Random Forests
Random Forests are a popular form of "ensembling" — the strategy of combining multiple different kinds of ML models to make a single decision. In ensembling in general any number of models might be combined, many different types of models might be used, and their votes might be weighted or unweighted.
A Random Forest is a specific strategy for applying the concept of ensembling to a series of Decision Trees. Two techniques are used in order to ensure that each Decision Tree is different from the other trees in the forest:
1. Bagging (short for bootstrap aggregation), and
2. Random feature selection.
Bagging is a fancy term for sampling with replacement. For us, it means that for every underlying decision tree we randomly sample the items in our training data, with replacement, typically up to the size of the training data (but this is a hyperparameter you can change).
In a standard decision tree we consider EVERY feature and EVERY possible split point per feature. With random feature selection we instead specify a number of features to consider for split points when we first build the model. Every time we make a new split, we randomly select that number of features to consider. Among the selected features every split point will still be considered, and the optimum split will still be chosen, but the model will not have access to every possible feature at every possible split point.
These two changes generally make RF's a bit more robust than DT's. In particular an RF is less prone to overfitting than a DT. Conversely, DTs are generally faster to train and use, since you're only building one tree as opposed to many.
Anything that you can control via hyperparameters in a DT can be applied in an RF, as well as a few unique hyperparameters such as the number of trees to build.
```
# Lets look at the same examples from the DT lessons.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, KFold
from sklearn.ensemble import RandomForestClassifier
# Load the data
heart_dataset = pd.read_csv('../../datasets/uci-heart-disease/heart.csv')
# Split the data into input and labels
labels = heart_dataset['target']
input_data = heart_dataset.drop(columns=['target'])
# Split the data into training and test
training_data, test_data, training_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
model = RandomForestClassifier()
model.fit(training_data, training_labels)
model.score(test_data, test_labels)
# We can still get the feature importances:
feat_importances = pd.Series(model.feature_importances_, index=training_data.columns)
feat_importances.sort_values().plot(kind='barh', figsize=(10,10))
from sklearn.ensemble import RandomForestRegressor
# Load the data
fish_dataset = pd.read_csv('../../datasets/fish/Fish.csv')
# Split the data into input and labels — we're trying to predict fish weight based on
# its size and species
labels = fish_dataset['Weight']
input_data = fish_dataset.drop(columns=['Weight'])
# We have one categorical parameter, so lets tell pandas to one-hot encode this value.
input_data = pd.get_dummies(input_data, columns=['Species'])
# Split the data into training and test
training_data, test_data, training_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
model = RandomForestRegressor()
model.fit(training_data, training_labels)
model.score(test_data, test_labels)
feat_importances = pd.Series(model.feature_importances_, index=training_data.columns)
feat_importances.sort_values().plot(kind='barh', figsize=(10,10))
```
| github_jupyter |
## Dependencies
```
import glob
import numpy as np
import pandas as pd
from transformers import TFDistilBertModel
from tokenizers import BertWordPieceTokenizer
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, Concatenate
# Auxiliary functions
# Transformer inputs
def preprocess_test(text, context, tokenizer, max_seq_len):
context_encoded = tokenizer.encode(context)
context_encoded = context_encoded.ids[1:-1]
encoded = tokenizer.encode(text)
encoded.pad(max_seq_len)
encoded.truncate(max_seq_len)
input_ids = encoded.ids
attention_mask = encoded.attention_mask
token_type_ids = ([0] * 3) + ([1] * (max_seq_len - 3))
input_ids = [101] + context_encoded + [102] + input_ids
# update input ids and attentions masks size
input_ids = input_ids[:-3]
attention_mask = [1] * 3 + attention_mask[:-3]
x = [np.asarray(input_ids, dtype=np.int32),
np.asarray(attention_mask, dtype=np.int32),
np.asarray(token_type_ids, dtype=np.int32)]
return x
def get_data_test(df, tokenizer, MAX_LEN):
x_input_ids = []
x_attention_masks = []
x_token_type_ids = []
for row in df.itertuples():
x = preprocess_test(getattr(row, "text"), getattr(row, "sentiment"), tokenizer, MAX_LEN)
x_input_ids.append(x[0])
x_attention_masks.append(x[1])
x_token_type_ids.append(x[2])
x_data = [np.asarray(x_input_ids), np.asarray(x_attention_masks), np.asarray(x_token_type_ids)]
return x_data
def decode(pred_start, pred_end, text, tokenizer):
offset = tokenizer.encode(text).offsets
if pred_end >= len(offset):
pred_end = len(offset)-1
decoded_text = ""
for i in range(pred_start, pred_end+1):
decoded_text += text[offset[i][0]:offset[i][1]]
if (i+1) < len(offset) and offset[i][1] < offset[i+1][0]:
decoded_text += " "
return decoded_text
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
MAX_LEN = 128
base_path = '/kaggle/input/qa-transformers/distilbert/'
base_model_path = base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5'
config_path = base_path + 'distilbert-base-uncased-distilled-squad-config.json'
tokenizer_path = base_path + 'bert-large-uncased-vocab.txt'
input_base_path = '/kaggle/input/1-tweet-train-distilbert/'
# tokenizer_path = glob.glob(input_base_path + '*.txt')
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=False)
```
# Pre process
```
test['text'].fillna('', inplace=True)
x_test = get_data_test(test, tokenizer, MAX_LEN)
```
# Model
```
def model_fn():
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids')
base_model = TFDistilBertModel.from_pretrained(base_model_path, config=config_path, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids})
last_state = sequence_output[0]
x = GlobalAveragePooling1D()(last_state)
y_start = Dense(MAX_LEN, activation='sigmoid', name='y_start')(x)
y_end = Dense(MAX_LEN, activation='sigmoid', name='y_end')(x)
model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
test_end_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
for model_path in model_path_list:
print(model_path)
model = model_fn()
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], tokenizer), axis=1)
test["selected_text"] = test["selected_text"].apply(lambda x: '.' if x.strip() == '' else x)
# selected_text = test for neutral sentiment
test['selected_text'] = test.apply(lambda x: x['text'] if x['sentiment'] == 'neutral' else x['selected_text'], axis=1)
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_selection import RFECV,RFE
from sklearn.model_selection import train_test_split, GridSearchCV, KFold,RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn import metrics
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score,f1_score
import numpy as np
from sklearn.metrics import make_scorer
f1_score = make_scorer(f1_score)
#import data
Data=pd.read_csv("Kitchener-Transfomed-Data-BS-NoBreak.csv")
X = Data.iloc[:,:-1]
y = Data.iloc[:,-1]
#split test and training set.
np.random.seed(60)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15,
random_state = 1000)
#Define estimator and model
classifiers = {}
classifiers.update({"XGBoost": XGBClassifier(random_state=1000,eval_metric=f1_score,use_label_encoder=False)})
#Define range of hyperparameters for estimator
np.random.seed(60)
parameters = {}
parameters.update({"XGBoost": {"classifier__eta":[0.001,0.01,0.02,0.1,0.25,0.5,1],
"classifier__alpha":[0.001,0.01,0.02,0.1],
"classifier__min_child_weight" : [0.001,0.01,0.02,0.1],
"classifier__lambda" :[0.001,0.01,0.02,0.1],
"classifier__gamma" :[0.001,0.01,0.02,0.1,0.25,0.5,1],
#"classifier__max_depth": [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,1920]
}})
# Make correlation matrix
corr_matrix = X_train.corr(method = "spearman").abs()
# Draw the heatmap
sns.set(font_scale = 1.0)
f, ax = plt.subplots(figsize=(11, 9))
sns.heatmap(corr_matrix, cmap= "YlGnBu", square=True, ax = ax)
f.tight_layout()
plt.savefig("correlation_matrix.png", dpi = 1080)
# Select upper triangle of matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.8
to_drop = [column for column in upper.columns if any(upper[column] > 0.8)]
# Drop features
X_train = X_train.drop(to_drop, axis = 1)
X_test = X_test.drop(to_drop, axis = 1)
X_train
FEATURE_IMPORTANCE = {"XGBoost"}
selected_classifier = "XGBoost"
classifier = classifiers[selected_classifier]
scaler = StandardScaler()
steps = [("scaler", scaler), ("classifier", classifier)]
pipeline = Pipeline(steps = steps)
#Define parameters that we want to use in gridsearch cv
param_grid = parameters[selected_classifier]
# Initialize gridsearchCV object for estimator
gscv =RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs=-1, verbose = 3, scoring = f1_score, n_iter =10)
# Fit gscv (Tunes estimator)
print(f"Now tuning {selected_classifier}. Go grab a beer or something.")
gscv.fit(X_train, np.ravel(y_train))
#Getting the best hyperparameters
best_params = gscv.best_params_
best_params
#Getting the best score of model
best_score = gscv.best_score_
best_score
#Check overfitting of the estimator
from sklearn.model_selection import cross_val_score
mod = XGBClassifier(alpha=0.02,
eta= 0.5,
gamma= 0.01,
reg_lambda=0.001,
#max_Depth=7,
min_child_weight=0.1,
eval_metric='mlogloss',
random_state=10000)
scores_test = cross_val_score(mod, X_test, y_test, scoring='f1', cv=5)
scores_test
tuned_params = {item[12:]: best_params[item] for item in best_params}
classifier.set_params(**tuned_params)
#Find f1 score of the model with all features (Model is tuned for all features)
results={}
model=classifier.set_params(alpha=0.02,
eta= 0.5,
gamma= 0.01,
reg_lambda=0.001,
#max_Depth=7,
min_child_weight=0.1,
eval_metric='mlogloss',
random_state=10000)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
F1 = metrics.f1_score(y_test, y_pred)
results = {"classifier": model,
"Best Parameters": best_params,
"Training f1": best_score*100,
"Test f1": F1*100}
results
# Select Features using RFECV
class PipelineRFE(Pipeline):
# Source: https://ramhiser.com/post/2018-03-25-feature-selection-with-scikit-learn-pipeline/
def fit(self, X, y=None, **fit_params):
super(PipelineRFE, self).fit(X, y, **fit_params)
self.feature_importances_ = self.steps[-1][-1].feature_importances_
return self
steps = [("scaler", scaler), ("classifier", classifier)]
pipe = PipelineRFE(steps = steps)
np.random.seed(60)
# Initialize RFECV object
feature_selector = RFECV(pipe, cv = 5, step = 1, verbose = 3)
# Fit RFECV
feature_selector.fit(X_train, np.ravel(y_train))
# Get selected features
feature_names = X_train.columns
selected_features = feature_names[feature_selector.support_].tolist()
performance_curve = {"Number of Features": list(range(1, len(feature_names) + 1)),
"F1": feature_selector.grid_scores_}
performance_curve = pd.DataFrame(performance_curve)
# Performance vs Number of Features
# Set graph style
sns.set(font_scale = 1.75)
sns.set_style({"axes.facecolor": "1.0", "axes.edgecolor": "0.85", "grid.color": "0.85",
"grid.linestyle": "-", 'axes.labelcolor': '0.4', "xtick.color": "0.4",
'ytick.color': '0.4'})
colors = sns.color_palette("RdYlGn", 20)
line_color = colors[3]
marker_colors = colors[-1]
# Plot
f, ax = plt.subplots(figsize=(13, 6.5))
sns.lineplot(x = "Number of Features", y = "F1", data = performance_curve,
color = line_color, lw = 4, ax = ax)
sns.regplot(x = performance_curve["Number of Features"], y = performance_curve["F1"],
color = marker_colors, fit_reg = False, scatter_kws = {"s": 200}, ax = ax)
# Axes limits
plt.xlim(0.5, len(feature_names)+0.5)
plt.ylim(0.60, 1)
# Generate a bolded horizontal line at y = 0
ax.axhline(y = 0.625, color = 'black', linewidth = 1.3, alpha = .7)
# Turn frame off
ax.set_frame_on(False)
# Tight layout
plt.tight_layout()
#Define new training and test set based based on selected features by RFECV
X_train_rfecv = X_train[selected_features]
X_test_rfecv= X_test[selected_features]
np.random.seed(60)
classifier.fit(X_train_rfecv, np.ravel(y_train))
#Finding important features
np.random.seed(60)
feature_importance = pd.DataFrame(selected_features, columns = ["Feature Label"])
feature_importance["Feature Importance"] = classifier.feature_importances_
feature_importance = feature_importance.sort_values(by="Feature Importance", ascending=False)
feature_importance
# Initialize GridSearch object for model with selected features
np.random.seed(60)
gscv = RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs= -1, verbose = 3, scoring = f1_score, n_iter=50)
#Tuning random forest classifier with selected features
np.random.seed(60)
gscv.fit(X_train_rfecv,y_train)
#Getting the best parameters of model with selected features
best_params = gscv.best_params_
best_params
#Getting the score of model with selected features
best_score = gscv.best_score_
best_score
#Check overfitting of the tuned model with selected features
from sklearn.model_selection import cross_val_score
mod = XGBClassifier(alpha=0.01,
eta=0.25,
gamma=0.01,
reg_lambda=0.001,
#max_depth=4,
min_child_weight=0.1,
eval_metric='mlogloss',
random_state=10000)
scores_test = cross_val_score(mod, X_test_rfecv, y_test, scoring='f1', cv=5)
scores_test
results={}
model=classifier.set_params(alpha=0.01,
eta=0.25,
gamma=0.01,
reg_lambda=0.001,
#max_depth=4,
min_child_weight=0.1,
eval_metric='mlogloss',
random_state=10000)
model.fit(X_train_rfecv,y_train)
y_pred = model.predict(X_test_rfecv)
F1 = metrics.f1_score(y_test, y_pred)
results = {"classifier": model,
"Best Parameters": best_params,
"Training f1": best_score*100,
"Test f1": F1*100}
results
```
| github_jupyter |
# Web Data Scraping
[Spring 2019 ITSS Mini-Course](https://www.colorado.edu/cartss/programs/interdisciplinary-training-social-sciences-itss/mini-course-web-data-scraping) — ARSC 5040
[Brian C. Keegan, Ph.D.](http://brianckeegan.com/)
[Assistant Professor, Department of Information Science](https://www.colorado.edu/cmci/people/information-science/brian-c-keegan)
University of Colorado Boulder
Copyright and distributed under an [MIT License](https://opensource.org/licenses/MIT)
## Class outline
* **Week 1**: Introduction to Jupyter, browser console, structured data, ethical considerations
* **Week 2**: Scraping HTML with `requests` and `BeautifulSoup`
* **Week 3**: Scraping web data with Selenium, other tools for automated scraping
* **Week 4**: Scraping an API with `requests` and `json`, Wikipedia
* **Week 5**: Authenticating through an API, Twitter & Reddit
## Acknowledgements
This course will draw on resources built by myself and [Allison Morgan](https://allisonmorgan.github.io/) for the [2018 Summer Institute for Computational Social Science](https://github.com/allisonmorgan/sicss_boulder), which were in turn derived from [other resources](https://github.com/simonmunzert/web-scraping-with-r-extended-edition) developed by [Simon Munzert](http://simonmunzert.github.io/) and [Chris Bail](http://www.chrisbail.net/).
Thank you also to Professors [Bhuvana Narasimhan](https://www.colorado.edu/linguistics/bhuvana-narasimhan) and [Stefanie Mollborn](https://behavioralscience.colorado.edu/person/stefanie-mollborn) for coordinating the ITSS seminars.
## Class 2 goals
* Sharing accomplishments and challenges with last week's material
* Parsing HTML data into tabular data
* Writing your own parser
* Traversing directories vs. parsing targets to retrieve data
* Applying techniques and debugging for individual projects
## Sharing accomplishments and challenges
* Using the inspect tool
* Counting numbers of members of U.S. House with XML
* Parsing information out from Twitter's JSON payload
## Parsing HTML data into tabular data
The overall goal we have as researchers in scraping data from the web is converting data from one structured format (HTML's tree-like structures) into another structured format (probably a tabular structure with rows and columns).
This could involve simply reading tables out of a webpage all the way up to taking irregularly-structured HTML elements into a tabular format.
We are going to make some use of the [`pandas`](https://pandas.pydata.org/) library ("**pan**el **da**ta", not the cute animal), which is Python's implementation of a data frame concept. This is a very powerful and complex library that I typically spend more than 12 hours of lecture teaching in intermediate programming classes. I hope to convey some important elements as we work through material, but it is far beyond the scope of this class to be able to cover all the fundamentals and syntax.
Let's begin by importing the libraries we'll need in this notebook: requests, BeautifulSoup, and pandas
```
# Most straight-forward way to import a librayr in Python
import requests
# BeautifulSoup is a module inside the "bs4" library, we only import the BeautifulSoup module
from bs4 import BeautifulSoup
# We import pandas but give the library a shortcut alias "pd" since we will call its functions so much
import pandas as pd
```
### Reading an HTML table into Python
[The Numbers](http://www.the-numbers.com) is a popular source of data about movies' box office revenue numbers. Their daily domestic charts are HTML tables with the top-grossing movies for each day of the year, going back for several years. This [table](https://www.the-numbers.com/box-office-chart/daily/2018/12/25) for Christmas day in 2018 has coluns for the current week's ranking, previous week's ranking, name of movie, distributor, gross, change over the previous week, number of theaters, revenue per theater, total gross, and number of days since release. This looks like a fairly straightforward table that could be read directly into data frame-like structure.
Using the Inspect tool, we can see the table exists as a `<table border="0" ... align="CENTER">` element with child tags like `<tbody>` and `<tr>` (table row). Each `<tr>` has `<td>` which defines each of the cells and their content. For more on how HTML defines tables, check out [this tutoral](https://www.w3schools.com/html/html_tables.asp).
Using `requests` and `BeautifulSoup` we would get this webpage's HTML, turn it into soup, and then find the table (`<table>`) or the table rows (`<tr>`) and pull out their content.
```
# Make the request
xmas_bo_raw = requests.get('https://www.the-numbers.com/box-office-chart/daily/2018/12/25').text
# Turn into soup, specify the HTML parser
xmas_bo_soup = BeautifulSoup(xmas_bo_raw,'html.parser')
# Use .find_all to retrieve all the tables in the page
xmas_bo_tables = xmas_bo_soup.find_all('table')
```
It turns out there are two tables on the page, the first is a baby table consisting of the "Previous Chart", "Chart Index", and "Next Chart" at the top. We want the second table with all the data: `xmas_bo_tables[1]` returns the second chart (remember that Python is 0-indexed, so the first chart is at `xmas_bo_tables[0]`). With this table identified, we can do a second `find_all` to get the table rows inside it and we save it as `xmas_bo_trs`.
```
xmas_bo_trs = xmas_bo_tables[1].find_all('tr')
```
Let's inspect a few of these rows. The first row in our list of rows under `xmas_bo_trs` should be the header with the names of the columns.
```
xmas_bo_trs[0]
```
The next table row should be for Aquaman.
```
xmas_bo_trs[1]
```
If we wanted to access the contents of this table row, we could use the `.contents` method to get a list of each of the `<td>` table cells, which (frustratingly) intersperses newline characters.
```
xmas_bo_trs[1].contents
```
Another alternative is to use the `.text` method to get the text content of all the cells in this row.
```
xmas_bo_trs[1].text
```
The `\n` characters re-appear here, but if we `print` out this statement, we see their newline functionality.
```
print(xmas_bo_trs[1].text)
```
We could use string processing to take this text string and convert it into a simple list of data. `.split('\n')` will split the string on the newline characters and return a list of what exists in between.
```
xmas_bo_trs[1].text.split('\n')
```
We'll write a `for` loop to go through all the table rows in `xmas_bo_trs`, get the list of data from the row, and add it back to a list of all the rows.
```
cleaned_xmas_bo_rows = []
# Loop through all the non-header (first row) table rows
for row in xmas_bo_trs[1:]:
# Get the text of the row and split on the newlines (like above)
cleaned_row = row.text.split('\n')
# Add this cleaned row back to the external list of row data
cleaned_xmas_bo_rows.append(cleaned_row)
# Inspect the first few rows of data
cleaned_xmas_bo_rows[:2]
```
Now we can pass this list of lists in `cleaned_xmas_bo_rows` to pandas's `DataFrame` function and hopefully get a nice table out.
```
xmas_bo_df = pd.DataFrame(cleaned_xmas_bo_rows)
# Inspect
xmas_bo_df.head()
```
We need to do a bit of cleanup on this data:
* Columns 0 and 11 are all empty
* Add column names
```
# Drop columns 0 and 11 and overwrite the xmas_box_df variable
xmas_bo_df = xmas_bo_df.drop(columns=[0,11])
# Rename the columns
xmas_bo_df.columns = ['Rank','Last rank','Movie','Distributor','Gross',
'Change','Theaters','Per theater','Total gross',
'Days']
# Write to disk
# xmas_bo_df.to_csv('christmas_2018_box_office.csv',encoding='utf8')
# Inspect
xmas_bo_df.head()
```
### `pandas`'s `read_html`
That was a good amount of work just to get this simple HTML table into Python. But it was important to cover how table elements moved from a string in `requests`, into a soup object from `BeautifulSoup`. into a list of data, and finally into `pandas`.
`pandas` also has powerful functionality for reading tables directly from HTML. If we convert the soup of the first table (`xmas_bo_tables[1]`) back into a string, `pandas` can read it directly into a table.
There are a few ideosyncracies here, the result is a list of dataframes—even if there's only a single table/dataframe—so we need to return the first (and only) element of this list. This is why there's a `[0]` at the end and the `.head()` is just to show the first five rows.
```
xmas_bo_table_as_string = str(xmas_bo_tables[1])
pd.read_html(xmas_bo_table_as_string)[0].head()
```
The column names got lumped in as rows, but we can fix this as well with the `read_html` function by passing the row index where the column lives. In this case, it is the first row, so we pass `header=0`.
```
pd.read_html(xmas_bo_table_as_string,header=0)[0].head()
```
Finally, you can point `read_html` at a URL without any `requests` or `BeautifulSoup` and get all the tables on the page as a list of DataFrames. `pandas` is simply doing the `requests` and `BeautifulSoup` on the inside. Interestingly, I'm getting a [HTTP 403](https://en.wikipedia.org/wiki/HTTP_403) error indicating the server (The Numbers) is forbidding the client (us) from accessing their data using this strategy. We will discuss next week whether and how to handle situations where web servers refuse connections from non-human clients. In this case, you cannot use the off-the-shelf `read_html` approach and would need to revert to using the `requests`+`BeautifulSoup` approach above.
```
simple_tables = pd.read_html('https://www.the-numbers.com/box-office-chart/daily/2018/12/25')
simple_tables
```
If we point it at Wikipedia's [2018 in film](https://en.wikipedia.org/wiki/2018_in_film), it will pull all of the tables present on the page.
```
simple_tables = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film')
```
The first three correspond to the "Year in film" navigation box on the side and are poorly-formatted by default.
```
simple_tables[0]
```
The fourth table in the `simple_tables` list we got from parsing the Wikipedia page with `read_html` is the table under the "Highest-grossing films" section.
```
simple_tables[3]
```
You can pass the "header" option in `read_html` to make sure the column names from a particular row (in this case the first row) do not accidentally become rows of data.
```
wiki_top_grossing_t = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film',header=0)[3]
wiki_top_grossing_t
```
Note that there are still a few errors in this table because the "Disney" value in the Wikipedia table spans two rows and `read_html` thus skips the "Distributor" value for Black Panther.
```
# Copy the value at index position 1, column position Distributor to Wordwide gross
wiki_top_grossing_t.loc[1,'Worldwide gross'] = wiki_top_grossing_t.loc[1,'Distributor']
# Change the value at 1, Distributor to Disney
wiki_top_grossing_t.loc[1,'Distributor'] = 'Disney'
wiki_top_grossing_t
```
## Writing your own parser
We will return to the historical Oscars data. Even though data as prominent as this is likely to already exist in tabular format somewhere, we will maintain the illusion that we are the first to both scrape it and parse it into a tabular format. Our goal here is to write a parser that will (ideally) work across multiple pages; in this case, each of the award years.
One of the first things we should do before writing any code is come up with a model of what we want our data to look like at the end of this. This is an intuitive and "tidy" format, but you might come up with alternatives based on your analysis and modeling needs.
| *Year* | *Category* | *Nominee* | *Movie* | *Won* |
| --- | --- | --- | --- | --- |
| 2019 | Actor in a leading role | Christian Bale | Vice | NA |
| 2019 | Actor in a leading role | Bradley Cooper | A Star Is Born | NA |
| 2019 | Actor in a leading role | Willem Dafoe | At Eternity's Gate | NA |
| 2019 | Actor in a leading role | Rami Malek | Bohemian Rhapsody | NA |
| 2019 | Actor in a leading role | Viggo Mortensen | Green Book | NA |
We will begin with writing a parser for a (hopefully!) representative year, then scrape the data for all the years, then apply the scraper to each of those years, and finally combine all the years' data together into a large data set.
Let's begin with writing a parser for a (hopefully!) representative year: in this case, 2019 is actually not a great case because it is missing information about who won and lost since (at the time of my writing this notebook) the winners had not been announced. We will use 2018 instead and make the profoundly naïve assumption it should work the same going back in time.
Start off with using `requests` to get the data and then use `BeautifulSoup` to turn it into soup we can parse through.
```
oscars2018_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2018').text
oscars2018_soup = BeautifulSoup(oscars2018_raw)
```
Using the Inspect tool exercise from Class 1, the `<div class="view-grouping">` seems to be the most promising tag for us to extract. Use `.find_all('div',{'class':'view-grouping'})` to (hopefully!) get all of these award groups. Inspect the first and last ones to make sure they looks coherent.
```
# Get all the groups that have a <div class="view-grouping"> tag
oscars2018_groups = oscars2018_soup.find_all('div',{'class':'view-grouping'})
# Inspect the first one
oscars2018_groups[0]
```
The last group is something besides "Writing (Original Screenplay)" and it's not clear to me where this tag's content renders on the page.
```
# Inspect the last one
oscars2018_groups[-1]
```
This puts us into something of a bind going forward: if the `.find_all` returns more groupings than we expected, then it's not sufficiently precise to identify *only* groupings of nominees. However, there do not appear to be any child tags in the `oscars2018_groups[0]` grouping that uniquely differentiate them from the child tags present in the `oscars2018_groups[-1]` grouping. Another alternative is to simple parse the first 24 groupings, but this is a very brittle solution since other years' awards might have more or fewer groupings.
```
len(oscars2018_groups)
```
### Navigating the HTML tree to find more specific parent elements
A third alternative is to leverage the tree structure of HTML and get the parent element in the hopes it is more unique than its children. In this case something like `<div id="quicktabs-tabpage-honorees-0"...>` is a promising lead. Use `find_all` to search for this tag and confirm there is only one the one `<div>` element (with its children) rather than multiple `<div>` elements matching "quicktabs-container-honorees".
```
# Get the new tag group
oscars2018_parent_group = oscars2018_soup.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Hopefully there is only one group matching this pattern
len(oscars2018_parent_group)
```
So far so good, now we can use `find_all` on the soup for this `<div class="view-grouping">` to search *within* this specific parent group and hopefully there should be the 24 awards groupings. Nope, still 105. This is because
```
# Note the addition of the [0] since the _parent_group is a list with 1 element in it
# We just extract that single element (which is a soup) and then we can use find_all on it
oscars2018_true_groups = oscars2018_parent_group[0].find_all('div',{'class':'view-grouping'})
len(oscars2018_true_groups)
```
Hallelujah! The award names for each group live inside a `<div class="view-grouping-header">`, so we can `find_all` for those, loop through each, and print out the name.
```
for group in oscars2018_parent_group[0].find_all('div',{'class':'view-grouping-header'}):
print(group.text)
```
It turns out that the Oscars site loads a bunch of extra data that it does not render that lives underneath a `<div id="quicktabs-tabpage-honorees-1">` which is where the 81 extra "awards" come from. This appears to be an attempt to organize the page by film, rather than by category.
### Navigating the HTML tree from a specific child to find specific generic parents
Now bear with me through some additional and presently unnecessary pain. Above, we were able to isolate the 24 category groupings we wanted through finding an appropriate *parent* tag and then working *down*. But I also want to show how we could identify the same 24 category groups by finding an appropriate *child* tag and working back up. This could be helpful in other situations where the elements are hard to disambiguate.
Let's start by finding the tag for the "Actor in a Leading Role" from the soup containing all the tags.
```
oscars2018_groups[0]
```
Rather than finding *all* the `<div class="view-grouping">` present in the page, we only want the 23 *siblings* of this specific tag. We can use the `find_next_siblings()` to get these 23 siblings. I do not like this method very much because you have to find the "eldest" sibling and then combine it with its siblings later on if you want all the children. In this case, you'd need to keep track of the `<div class="view-grouping">` corresponding to Best Actor and then combine it with its 23 siblings, rather than an approach that simply returns all 24 in a single list.
```
oscars2018_group0_next_siblings = oscars2018_groups[0].find_next_siblings()
len(oscars2018_group0_next_siblings)
```
We could also go up to get the parent and then find all 24 of the `<div class='view-grouping'>` among the children.
```
# From the child we like, get its parent
oscars2018_group0_parent = oscars2018_groups[0].parent
# Now with the parent, find all the relevant children
oscars2018_group0_parent_children = oscars2018_group0_parent.find_all('div',{'class':'view-grouping'})
# Confirm
len(oscars2018_group0_parent_children)
```
### Checking the relevant fields
That seemed like a major digression away from the core task of writing a parser, but it is critical that we write a parser that parses *only* the data we want and nothing else. Now that we have our 24 awards groups in `oscars2018_true_groups`, let's break one open and extract all the yummy data waiting inside.
There are a few `<div>` sub-classes that are helpfully named that should make extracting this data a bit easier.
* `<div class="view-grouping-header">` - name of the category
* `<span class="golden-text">` - winner
* `<div class="views-field views-field-field-actor-name">` - name of actor
* `<div class="views-field views-field-title">` - title of movie
```
oscars2018_true_groups[0]
```
"Zoom in" to the `views-field-field-actor-name`.
```
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-field-actor-name"})
```
These `<h4>` tags may be more specific and helpful.
```
oscars2018_true_groups[0].find_all('h4')
```
Zoom into the `views-field-title`.
```
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-title"})
```
These `<span>` tags may be more specific and helpful, but there are also empty tags here clogging things up.
```
oscars2018_true_groups[0].find_all('span')
```
As a battle-scarred web scraper, let me continue to emphasize the importance of quick-checking your assumptions before commiting to writing code. Are these fields still appropriate for other awards categories? Let's check the last category for original screenplay. Are the `<div>`s for "field-actor-name" still people and for "field-title" still movies? Nope.
Looking back at the web page, it's now obvious that the movie title and person who gets the award are flipped between actors/actresses and the other awards categories. We're going to have to keep this in mind going forward!
```
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-field-actor-name"})
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-title"})
```
### Writing the core parser functionality
How will we map the contents of the HTML to the
* **Year**: All the awards are from the same year, also in the URL
* **Category**: `<h2>`
* **Nominee**: `<h4>` for actors, `<span>` for non-actors
* **Movie**: `<span>` for actors, `<h4>` for non-actors
* **Won**: `<h3>` for sibling, 0 for everyone else; alternatively just the top nominee
```
oscars2018_true_groups[0]
category = oscars2018_true_groups[0].find_all('h2')[0].text
print("The name of the category is:",category)
names = []
for _nominee in oscars2018_true_groups[0].find_all('h4'):
nominee_name = _nominee.text
names.append(nominee_name)
print("The name of a nominee is:",nominee_name)
movies = []
for _movie in oscars2018_true_groups[0].find_all('span'):
if len(_movie.text) > 0:
movie_name = _movie.text.strip()
movies.append(movie_name)
print("The name of a movie is:",movie_name)
```
One strategy is to use Python's [`zip`](https://docs.python.org/3.7/library/functions.html#zip) library to combine elements from different lists together. But `zip` is a bit too slick and abstract for my tastes.
```
# The elements of each list being combined need to be the same size
# So we make a list of the category name and multiply it by 5 to make it the same size as the others
list(zip([category]*5,names,movies))
```
Another strategy is to use the `<div class="views-row">`s for each nominee and extract the relevant information from its subdivs. This is a bit more intuitive in the sense of reading from top to bottom and also makes it easier to capture the winner and losers based on position.
```
actor_nominees = oscars2018_true_groups[0].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(actor_nominees):
# If in the first position, the nominee won
if i == 0:
winner = 'Won'
# Otherwise, the nominee lost
else:
winner = 'Lost'
# Get a list of all the sub-divs
subdivs = nominee.find_all('div')
# The first subdiv (for an actor) is the name
name = subdivs[0].text.strip()
# The second subdiv (for an actor) is the movie name
movie = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
```
Check that reversing "movie" and "name" works for another award category like original screenplay (`oscars2018_true_groups[-1]`). There's some weirdness with "Written by" and "Story by" filtering in here rather than simply names that may need to get fixed in the final calculation, but I would want to talk to a domain expert about the differences between these labels.
```
original_screenplay_nominees = oscars2018_true_groups[-1].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(original_screenplay_nominees):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
# movie and name reversed
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
```
This was just for Best Actors, now lets add another layer for all the different awards categories. We can see the movie name and awardee switch is important now since most of the categories are reversed.
```
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for {2}\" and {3}.".format(name,category,movie,winner))
```
Include some flow control, if the name "actor" or "actree" appears in the category title, then do nominee name first and movie name second, otherwise do movie name first and nominee name second.
```
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for \"{2}\" and {3}.".format(name,category,movie,winner))
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("\"{0}\" was nominated in {1} for {2} and {3}.".format(name,category,movie,winner))
```
Rather than printing out the information, store it in `nominees_2018` so that we can turn it into a DataFrame.
```
nominees_2018 = []
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
# Swap out the print
# Make a payload for each nominee
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018, # We're only looking at 2018 right now
'Winner':winner}
# Add the payload to the list of nominees at top
nominees_2018.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
# Swap out the print
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018,
'Winner':winner}
nominees_2018.append(nominee_payload)
```
Moment of truth!
```
nominees_df = pd.DataFrame(nominees_2018)
nominees_df
```
Now let's turn this hulking beast of a parser into a function so we can apply it to other years' nominees in the next step.
```
def parse_nominees(true_groups,year):
nominees_list = []
for group in true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year, # We may look at other years
'Winner':winner}
nominees_list.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year,
'Winner':winner}
nominees_list.append(nominee_payload)
return nominees_list
```
## Iterating vs. parsing to retrieve data
Often the data you are interested in is spread across multiple web pages. In an ideal world, the naming conventions would let you retrieve the data from these pages systematically. In the case of the Oscars, the URLs appear to be consistently formatted: `https://www.oscars.org/oscars/ceremonies/2019` suggests that we could change the 2019 to any other date going back to the start of the Oscars and get that year as well: `https://www.oscars.org/oscars/ceremonies/2018` should get us the page for 2018, and so on. Let's demonstrate each of these strategies with the Oscars data: iterating from 2019 back to 1929 in the URL versus parsing the list of links from the header.
### Iterating strategies for retrieving data
The fundamental assumption with this strategy is that the data are stored at URLs in a consistent way that we can access sequentially. In the case of the Oscars, we *should* be able to simply pass each year to the URL in requests. Here we want to practice responsible data scraping by including a sleep between each request so that we do not overwhelm the Oscars server with requests. We can use the `sleep` function within `time`.
```
from time import sleep
```
The `sleep(3)` below prevents any more code from progressing for 3 seconds.
```
print("The start of something.")
sleep(3)
print("The end of something.")
```
The core part of the iterating strategy is simply using Python's [`range`](https://docs.python.org/3.7/library/functions.html#func-range) function to generate a sequence of values. Here, we can use `range` to print out a sequence of URLs that should correspond to awards pages from 2010 through 2019. We can also incorporate the `sleep` functionality and wait a second between each `print` statement—it should now take 10 seconds for this code to finish printing. This simulates how we can use `sleep` to slow down and spread out requests so that we do not overwhelm the servers whose data we are trying to scrape.
```
for year in range(2010,2020):
sleep(1)
print('https://www.oscars.org/oscars/ceremonies/{0}'.format(year))
```
We defined a function `parse_nominees` above that takes the "true groups" of nominees. Let's try to tie these pieces together for all the nominees in the 2010s.
```
# Create an empty list to store the data we get
all_years_nominees = dict()
# For each year starting in 2010 until 2019
for year in range(2010,2020):
# Pause for a second between each request
sleep(1)
# Get the raw HTML
year_raw_html = requests.get('https://www.oscars.org/oscars/ceremonies/{0}'.format(year)).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
```
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
```
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
```
### Parsing strategy for retrieving data
Frustratingly, this iterating strategy may not always hold: maybe some years are skipped or the naming convention changes at some point. We will cover some basics of [error-handling in Python](https://realpython.com/python-exceptions/) that could let us work around errors as they pop up, but this may result in an incomplete collection if the naming conventions are systematic. What we would want to do is to identify all the links ahead of time by parsing them from list and then work through that list to get the complete data collection.
What this means in the context of our Oscars example is assuming that we cannot trust that the sequential numbering of the years is a realiable guide to get all the data. Instead, we should get a list of the URLs for each of the awards pages from the "ceremonies-decade-scroller" (from Inspect) at the top. This scroller *should* be consistent across all the pages, but start with the nominees for 2019 just to be safe:
```
oscars2019_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2019').text
oscars2019_soup = BeautifulSoup(oscars2019_raw)
```
Using the Inspect tool, there is a `<div class="years">` that contains the links to each of the years. Run a `.find_all` to get all these href locations.
```
# Get the <div class="years"> as a parent tag first, just in case there are <a class="years"> elsewhere
oscars2019_years_div = oscars2019_soup.find_all('div',{'class':'years'})[0]
# Now get the <a class="years"> underneath only the oscars2019_years_div
oscars2019_years_a = oscars2019_years_div.find_all('a',{'class':'year'})
# Inspect the first 10
oscars2019_years_a[:10]
```
Each of these `<a>` tags contains an "href", or the URL element where the page lives, and a text element for what's displayed.
```
oscars2019_years_a[0]['href']
oscars2019_years_a[0].text
```
Now we can write a loop to print out the URL locations for all the other award years based on the "official" links in the "ceremonies-decade-scroller" navigation rather than assuming the years are sequential—I promise this will pay dividends in the future when inconsistent design wreaks havoc on your sequential data strategies!
```
for a in oscars2019_years_a[-10:]:
href = a['href']
print('https://www.oscars.org' + href)
```
We can now use the `parse_nominees` function for these pages as well.
```
# Create an empty list to store the data we get
all_years_nominees = dict()
# For the 10 most recent years
for a in oscars2019_years_a[-10:]:
# Pause for a second between each request
sleep(1)
# Get the href
href = a['href']
# Get the year
year = a.text
# Get the raw HTML
url = 'https://www.oscars.org' + href
year_raw_html = requests.get(url).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
```
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
```
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
```
## Project time
Let's take a look at websites you're interested in scraping and see what kinds of challenges and opportunities exist for scraping their data.
| github_jupyter |
<a id="title_ID"></a>
# Plotting a Catalog over a Kepler Full Frame Image File
<br> This tutorial demonstrates how to access the WCS (World Coordinate System) from a full frame image file and use this data to plot a catalog of objects over the FFI.
<br>
<img style="float: right;" src="./ffi_tic_plot.png" alt="ffi_tic_plot" width="500"/>
### Table of Contents
<div style="text-align: left"> <br> [Introduction](#intro_ID) <br> [Imports](#imports_ID) <br> [Getting the Data](#data_ID) <br> [File Information](#fileinfo_ID) <br> [Displaying Image Data](#image_ID) <br> [Overplotting Objects](#overplot_ID) <br> [Additional Resources](#resources_ID) <br> [About this Notebook](#about_ID) </div>
***
<a id="intro_ID"></a>
## Introduction
**Full Frame Image file background:** A Full Frame Image (FFI) contains values for every pixel in each of the 84 channels. Standard calibrations, such as flat fields, blacks, and smears have been applied to the calibrated FFIs. These files also contain a World Coordinate System (WCS) that attaches RA and Dec coordinates to pixel x and y values.
**Some notes about the file:** kplr2009170043915_ffi-cal.fits
<br>The filename contains phrases for identification, where
- kplr = Kepler
- 2009170043915 = year 2009, day 170, time 04:39:15
- ffi-cal = calibrated FFI image
**Defining some terms:**
- **HDU:** Header Data Unit; a FITS file is made up of Header or Data units that contain information, data, and metadata relating to the file. The first HDU is called the primary, and anything that follows is considered an extension.
- **TIC:** TESS Input Catalog; a catalog of luminous sources on the sky to be used by the TESS mission. We will use the TIC in this notebook to query a catalog of objects that we will then plot over an image from Kepler.
- **WCS:** World Coordinate System; coordinates attached to each pixel of an N-dimensional image of a FITS file. For example, a specified celestial RA and Dec associated with pixel location in the image.
For more information about the Kepler mission and collected data, visit the [Kepler archive page](https://archive.stsci.edu/kepler/). To read more details about light curves and important data terms, look in the [Kepler archive manual](https://archive.stsci.edu/kepler/manuals/archive_manual.pdf#page=25).
[Top of Page](#title_ID)
***
<a id="imports_ID"></a>
## Imports
Let's start by importing some libraries to the environment:
- *numpy* to handle array functions
- *astropy.io fits* for accessing fits files
- *astropy.wcs WCS* to project the World Coordinate System on the plot
- *astropy.table Table* for creating tidy tables of the data
- *matplotlib.pyplot* for plotting data
```
%matplotlib inline
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
import matplotlib.pyplot as plt
```
[Top of Page](#title_ID)
***
<a id="data_ID"></a>
## Getting the Data
Start by importing libraries from Astroquery. For a longer, more detailed description using of Astroquery, please visit this [tutorial](https://github.com/spacetelescope/MAST-API-Notebooks/blob/master/MUG2018_APITutorial_Astroquery.ipynb) or read the Astroquery [documentation](https://astroquery.readthedocs.io/en/latest/#).
```
from astroquery.mast import Mast
from astroquery.mast import Observations
```
<br>Next, we need to find the data file. This is similar to searching for the data using the [MAST Portal](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html) in that we will be using certain keywords to find the file. The object we are looking for is kplr2009170043915, collected by the Kepler spacecraft. We are searching for an FFI file of this object:
```
kplrObs = Observations.query_criteria(obs_id="kplr2009170043915_84", obs_collection="KeplerFFI")
kplrProds = Observations.get_product_list(kplrObs[0])
yourProd = Observations.filter_products(kplrProds, extension='kplr2009170043915_ffi-cal.fits',
mrp_only=False)
yourProd
```
<br>Now that we've found the data file, we can download it using the reults shown in the table above:
```
Observations.download_products(yourProd, mrp_only=False, cache=False)
```
[Top of Page](#title_ID)
***
<a id="extension_ID"></a>
## Reading FITS Extensions
<br>Now that we have the file, we can start working with the data. We will begin by assigning a shorter name to the file to make it easier to use. Then, using the info function from astropy.io.fits, we can see some information about the FITS Header Data Units:
```
filename = "./mastDownload/KeplerFFI/kplr2009170043915_84/kplr2009170043915_ffi-cal.fits"
fits.info(filename)
```
- **No. 0 (Primary): **
<br>This HDU contains meta-data related to the entire file.
- **No. 1-84 (Image): **
<br>Each of the 84 image extensions contains an array that can be plotted as an image. We will plot one in this tutorial along with catalog data.
<br>Let's say we wanted to see more information about the header and extensions than what the fits.info command gave us. For example, we can access information stored in the header of any of the Image extensions (No.1 - 84, MOD.OUT). The following line opens the FITS file, writes the first HDU extension into header1, and then closes the file. Only 24 rows of data are displayed here but you can view them all by adjusting the range:
```
with fits.open(filename) as hdulist:
header1 = hdulist[1].header
print(repr(header1[1:25]))
```
[Top of Page](#title_ID)
***
<a id="image_ID"></a>
## Displaying Image Data
<br>First, let's find the WCS information associated with the FITS file we are using. One way to do this is to access the header and print the rows containing the relevant data (54 - 65). This gives us the reference coordinates (CRVAL1, CRVAL2) that correspond to the reference pixels:
```
with fits.open(filename) as hdulist:
header1 = hdulist[1].header
print(repr(header1[54:61]))
```
<br>Let's pick an image HDU and display its array. We can also choose to print the length of the array to get an idea of the dimensions of the image:
```
with fits.open(filename) as hdulist:
imgdata = hdulist[1].data
print(len(imgdata))
print(imgdata)
```
We can now plot this array as an image:
```
fig = plt.figure(figsize=(16,8))
plt.imshow(imgdata, cmap=plt.cm.gray)
plt.colorbar()
plt.clim(0,20000)
```
Now that we've seen the image and the WCS information, we can plot FFI with a WCS projection. To do this, first we will access the file header and assign a WCS object. Then we will plot the image with the projection, and add labels and a grid for usability:
```
hdu = fits.open(filename)[1]
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(16,8))
ax = plt.subplot(projection=wcs)
im = ax.imshow(hdu.data, cmap=plt.cm.gray, origin='lower', clim=(0,20000))
fig.colorbar(im)
plt.title('FFI with WCS Projection')
ax.set_xlabel('RA [deg]')
ax.set_ylabel('Dec [deg]')
ax.grid(color='white', ls='solid')
```
[Top of Page](#title_ID)
***
<a id="catalog_ID"></a>
## Getting the Catalog Data
Now that we have an image, we can use astroquery to retrieve a catalog of objects and overlay it onto the image. First, we will start with importing catalog data from astroquery:
```
from astroquery.mast import Catalogs
```
We will query a catalog of objects from TIC (TESS Input Catalog). For more information about TIC, follow this [link](https://archive.stsci.edu/missions/tess/doc/tic_v5_drn.pdf). Our search will be centered on the same RA and Declination listed in the header of the FFI image and will list objects within a 1 degree radius of that location. It might take a couple seconds longer than usual for this cell to run:
**why tic??? explain...**
```
catalogData = Catalogs.query_region("290.4620065226813 38.32946356799192", radius="0.7 deg", catalog="TIC")
dattab = Table(catalogData)
dattab
```
<br>Let's isolate the RA and Dec columns into a separate table for creating a plot. We will can also filter our results to include only sources brigther than 15 magnitudes in B, which will give us a more managable amount of sources for plotting:
```
radec = (catalogData['ra','dec','Bmag'])
mask = radec['Bmag'] < 15.0
mag_radec = radec[mask]
print(mag_radec)
```
<br>We can plot this table to get an idea of what the catalog looks like visually:
```
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
plt.scatter(mag_radec['ra'], mag_radec['dec'], facecolors='none', edgecolors='c', linewidths=0.5)
```
[Top of Page](#title_ID)
***
<a id="overplot_ID"></a>
## Overplotting Objects
Now that we have a way to display an FFI file and a catalog of objects, we can put the two pieces of data on the same plot. To do this, we will project the World Coordinate System (WCS) as a grid in units of degrees, minutes, and seconds onto the image. Then, we will create a scatter plot of the catalog, similar to the one above, although here we will transform its coordinate values into ICRS (International Celestial Reference System) to be compatible with the WCS projection:
```
hdu = fits.open(filename)[1]
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(20,10))
ax = plt.subplot(projection=wcs)
im = ax.imshow(hdu.data, cmap=plt.cm.gray, origin='lower', clim=(0,20000))
fig.colorbar(im)
plt.title('FFI with TIC Catalog Objects')
ax.set_xlabel('RA [deg]')
ax.set_ylabel('Dec [deg]')
ax.grid(color='white', ls='solid')
ax.autoscale(False)
ax.scatter(mag_radec['ra'], mag_radec['dec'],
facecolors='none', edgecolors='c', linewidths=0.5,
transform=ax.get_transform('icrs')) # This is needed when projecting onto axes with WCS info
```
The catalog is displayed here as blue circles that highlight certain objects common in both the Kepler FFI and the TIC search. The image remains in x, y pixel values while the grid is projected in degrees based on the WCS. The projection works off of WCS data in the FFI header to create an accurate grid displaying RA and Dec coordinates that correspond to the original pixel values. The catalog data is transformed into ICRS coordinates in order to work compatibly with the other plotted data.
[Top of Page](#title_ID)
***
<a id="resources_ID"></a>
## Aditional Resources
For more information about the MAST archive and details about mission data:
<br>
<br>[MAST API](https://mast.stsci.edu/api/v0/index.html)
<br>[Kepler Archive Page (MAST)](https://archive.stsci.edu/kepler/)
<br>[Kepler Archive Manual](https://archive.stsci.edu/kepler/manuals/archive_manual.pdf)
<br>[Exo.MAST website](https://exo.mast.stsci.edu/exo/ExoMast/html/exomast.html)
<br>[TESS Archive Page (MAST)](https://archive.stsci.edu/tess/)
***
<a id="about_ID"></a>
## About this Notebook
**Author:** Josie Bunnell, STScI SASP Intern
<br>**Updated On:** 08/13/2018
***
[Top of Page](#title_ID)
<img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
| github_jupyter |
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#default_exp vision.utils
#export
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.vision.core import *
#hide
from nbdev.showdoc import *
# path = untar_data(URLs.IMAGENETTE)
# path
```
# Vision utils
> Some utils function to quickly download a bunch of images, check them and pre-resize them
```
#export
def _download_image_inner(dest, inp, timeout=4):
i,url = inp
suffix = re.findall(r'\.\w+?(?=(?:\?|$))', url)
suffix = suffix[0] if len(suffix)>0 else '.jpg'
try: download_url(url, dest/f"{i:08d}{suffix}", overwrite=True, show_progress=False, timeout=timeout)
except Exception as e: f"Couldn't download {url}."
with tempfile.TemporaryDirectory() as d:
d = Path(d)
url = "https://www.fast.ai/images/jh-head.jpg"
_download_image_inner(d, (125,url))
assert (d/'00000125.jpg').is_file()
#export
def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4):
"Download images listed in text file `url_file` to path `dest`, at most `max_pics`"
if urls is None: urls = url_file.read_text().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout), list(enumerate(urls)), n_workers=n_workers)
with tempfile.TemporaryDirectory() as d:
d = Path(d)
url_file = d/'urls.txt'
url_file.write_text("\n".join([f"https://www.fast.ai/images/{n}" for n in "jh-head.jpg thomas.JPG sg-head.jpg".split()]))
download_images(d, url_file)
for i in [0,2]: assert (d/f'0000000{i}.jpg').is_file()
assert (d/f'00000001.JPG').is_file()
#export
def resize_to(img, targ_sz, use_min=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
class _FakeImg():
def __init__(self, size): self.size=size
img = _FakeImg((200,500))
test_eq(resize_to(img, 400), [160,400])
test_eq(resize_to(img, 400, use_min=True), [400,1000])
#export
def verify_image(fn):
"Confirm that `fn` can be opened"
try:
im = Image.open(fn)
im.draft(im.mode, (32,32))
im.load()
return True
except: return False
#export
def verify_images(fns):
"Find images in `fns` that can't be opened"
return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
#export
def resize_image(file, dest, max_size=None, n_channels=3, ext=None,
img_format=None, resample=Image.BILINEAR, resume=False, **kwargs ):
"Resize file to dest to max_size"
dest = Path(dest)
dest_fname = dest/file.name
if resume and dest_fname.exists(): return
if verify_image(file):
img = Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=resample)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
file = Path('images/puppy.jpg')
dest = Path('.')
resize_image(file, max_size=400, dest=dest)
im = Image.open(dest/file.name)
test_eq(im.shape[1],400)
(dest/file.name).unlink()
#export
def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False,
dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=Image.BILINEAR,
resume=None, **kwargs):
"Resize files on path recursively to dest to max_size"
path = Path(path)
if resume is None and dest != Path('.'): resume=False
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
parallel(resize_image, files, max_workers=max_workers, max_size=max_size, dest=dest, n_channels=n_channels, ext=ext,
img_format=img_format, resample=resample, resume=resume, **kwargs)
with tempfile.TemporaryDirectory() as d:
dest = Path(d)/'resized_images'
resize_images('images', max_size=100, dest=dest)
```
# Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
```
import numpy as np
import numba
import matplotlib.pyplot as plt
import sympy as sym
plt.style.use('presentation.mplstyle')
%matplotlib notebook
def d2np(d):
names = []
numbers = ()
dtypes = []
for item in d:
names += item
if type(d[item]) == float:
numbers += (d[item],)
dtypes += [(item,float)]
if type(d[item]) == int:
numbers += (d[item],)
dtypes += [(item,int)]
if type(d[item]) == np.ndarray:
numbers += (d[item],)
dtypes += [(item,np.float64,d[item].shape)]
return np.array([numbers],dtype=dtypes)
i_1d,i_1q,i_2d,i_2q,v_cd,v_cq = sym.symbols('i_1d,i_1q,i_2d,i_2q,v_cd,v_cq')
di_1d,di_1q,di_2d,di_2q,dv_cd,dv_cq = sym.symbols('di_1d,di_1q,di_2d,di_2q,dv_cd,dv_cq')
L_1,L_2,C_ac,C_dc = sym.symbols('L_1,L_2,C_ac,C_dc')
R_1,R_2 = sym.symbols('R_1,R_2')
omega,dummy = sym.symbols('omega,dummy')
v_sd,v_sq,v_dc= sym.symbols('v_sd,v_sq,v_dc')
eta_d,eta_q = sym.symbols('eta_d,eta_q')
p_ref,q_ref = sym.symbols('p_ref,q_ref')
i_2d_ref,i_2q_ref = sym.symbols('i_2d_ref,i_2q_ref')
#i_2d = i_2d_ref
#i_2q = i_2q_ref
di_1d = 1/L_1*(0.5*eta_d*v_dc - R_1*i_1d + L_1*omega*i_1q - v_cd)
di_1q = 1/L_1*(0.5*eta_q*v_dc - R_1*i_1q - L_1*omega*i_1d - v_cq)
dv_cd = 1/C_ac*( i_1d + C_ac*omega*v_cq - i_2d)
dv_cq = 1/C_ac*( i_1q - C_ac*omega*v_cd - i_2q)
di_2d = 1/L_2*(v_cd - R_2*i_2d + L_2*omega*i_2q - v_sd)
di_2q = 1/L_2*(v_cq - R_2*i_2q - L_2*omega*i_2d - v_sq)
'''
'''
s = sym.solve([ di_1d, di_1q, dv_cd, dv_cq, di_2d, di_2q],
[ i_1d, i_1q, v_cd, v_cq, i_2d, i_2q])
for item in s:
print(item, '=', sym.simplify(s[item]))
s = sym.solve([ di_1d, di_1q, dv_cd, dv_cq, di_2d, di_2q],
[ eta_d, eta_q, i_1d, i_1q, v_cd, v_cq])
for item in s:
print(item, '=', sym.simplify(s[item]))
s = sym.solve([ dv_cd, dv_cq, di_2d, di_2q],
[ v_cd, v_cq, i_2d, i_2q])
for item in s:
print(item, '=', sym.simplify(s[item]))
s = sym.solve([ di_1d, di_1q],
[ i_1d, i_1q])
for item in s:
print(item, '=', sym.simplify(s[item]))
eq_p = p_ref - 3.0/2.0*(v_sd*i_2d + v_sq*i_2q)
eq_q = q_ref - 3.0/2.0*(v_sd*i_2q - v_sq*i_2d)
s = sym.solve([ eq_p, eq_q],
[i_2d, i_2q])
for item in s:
print(item, '=', sym.simplify(s[item]))
v_dc = 700.0
v_sq = 325.0
v_sd = 0.0
p_ref = 10.0e3
q_ref = 900.0e3
omega = 2.0*np.pi*50.0
L_1 = 200.0e-6
L_2 = 200.0e-6
R_1 = 0.1
R_2 = 0.1
C_ac = 100e-6
i_2d_ref = 2.0/3.0*(p_ref*v_sd - q_ref*v_sq)/(v_sd**2 + v_sq**2)
i_2q_ref = 2.0/3.0*(p_ref*v_sq + q_ref*v_sd)/(v_sd**2 + v_sq**2)
eta_d = 2.0*(-C_ac*R_1*omega*(L_2*i_2d_ref*omega + R_2*i_2q_ref + v_sq) - L_1*i_2q_ref*omega + R_1*i_2d_ref - (C_ac*L_1*omega**2 - 1.0)*(-L_2*i_2q_ref*omega + R_2*i_2d_ref + v_sd))/v_dc
eta_q = 2.0*(C_ac*R_1*omega*(-L_2*i_2q_ref*omega + R_2*i_2d_ref + v_sd) + L_1*i_2d_ref*omega + R_1*i_2q_ref - (C_ac*L_1*omega**2 - 1.0)*(L_2*i_2d_ref*omega + R_2*i_2q_ref + v_sq))/v_dc
print(i_2d_ref)
print(i_2q_ref)
print('eta_d = ',eta_d)
print(eta_q)
eta = eta_d + 1j*eta_q
angle = np.angle(eta)
eta_m = np.abs(eta)
if eta_m > 1.0:
eta_m = 1.0
eta = eta_m*np.exp(1j*angle)
eta_d = eta.real
eta_q = eta.imag
i_2d = (-0.5*eta_d*v_dc*(C_ac*L_1*R_2*omega**2 + C_ac*L_2*R_1*omega**2 - R_1 - R_2) + 0.5*eta_q*omega*v_dc*(-C_ac*L_1*L_2*omega**2 + C_ac*R_1*R_2 + L_1 + L_2) - omega*v_sq*(C_ac**2*L_1**2*L_2*omega**4 + C_ac**2*L_2*R_1**2*omega**2 - C_ac*L_1**2*omega**2 - 2.0*C_ac*L_1*L_2*omega**2 - C_ac*R_1**2 + L_1 + L_2) - v_sd*(C_ac**2*L_1**2*R_2*omega**4 + C_ac**2*R_1**2*R_2*omega**2 - 2.0*C_ac*L_1*R_2*omega**2 + R_1 + R_2))/(C_ac**2*L_1**2*L_2**2*omega**6 + C_ac**2*L_1**2*R_2**2*omega**4 + C_ac**2*L_2**2*R_1**2*omega**4 + C_ac**2*R_1**2*R_2**2*omega**2 - 2.0*C_ac*L_1**2*L_2*omega**4 - 2.0*C_ac*L_1*L_2**2*omega**4 - 2.0*C_ac*L_1*R_2**2*omega**2 - 2.0*C_ac*L_2*R_1**2*omega**2 + L_1**2*omega**2 + 2.0*L_1*L_2*omega**2 + L_2**2*omega**2 + R_1**2 + 2.0*R_1*R_2 + R_2**2)
i_2q = (-0.5*eta_d*omega*v_dc*(-C_ac*L_1*L_2*omega**2 + C_ac*R_1*R_2 + L_1 + L_2) - 0.5*eta_q*v_dc*(C_ac*L_1*R_2*omega**2 + C_ac*L_2*R_1*omega**2 - R_1 - R_2) + omega*v_sd*(C_ac**2*L_1**2*L_2*omega**4 + C_ac**2*L_2*R_1**2*omega**2 - C_ac*L_1**2*omega**2 - 2.0*C_ac*L_1*L_2*omega**2 - C_ac*R_1**2 + L_1 + L_2) - v_sq*(C_ac**2*L_1**2*R_2*omega**4 + C_ac**2*R_1**2*R_2*omega**2 - 2.0*C_ac*L_1*R_2*omega**2 + R_1 + R_2))/(C_ac**2*L_1**2*L_2**2*omega**6 + C_ac**2*L_1**2*R_2**2*omega**4 + C_ac**2*L_2**2*R_1**2*omega**4 + C_ac**2*R_1**2*R_2**2*omega**2 - 2.0*C_ac*L_1**2*L_2*omega**4 - 2.0*C_ac*L_1*L_2**2*omega**4 - 2.0*C_ac*L_1*R_2**2*omega**2 - 2.0*C_ac*L_2*R_1**2*omega**2 + L_1**2*omega**2 + 2.0*L_1*L_2*omega**2 + L_2**2*omega**2 + R_1**2 + 2.0*R_1*R_2 + R_2**2)
print(i_2d)
print(i_2q)
p = 3.0/2.0*(v_sd*i_2d + v_sq*i_2q)
q = 3.0/2.0*(v_sd*i_2q - v_sq*i_2d)
print('p = ', p/1000)
print('q = ', q/1000)
print('p = ', p/1000)
print('q = ', q/1000)
@numba.jit(nopython=True, cache=True)
def b2b_ctrl1(struct,i,m):
'''
Doubly Fed Induction Machine in with neglected dynamics and
rotor side converter and control level 1 already implemented.
i_rd = i_rd_ref and i_rq = i_rq_ref without dynamics
'''
x_idx = struct[i]['b2b_idx']
v_dc = float(struct[i]['x'][x_idx+0,0])
L_1 = struct[i]['L_1']
L_2 = struct[i]['L_2']
R_1 = struct[i]['R_1']
R_2 = struct[i]['R_2']
C_dc = struct[i]['C_dc']
omega_1 = struct[i]['omega_1']
omega_2 = struct[i]['omega_2']
i_1d_ref = struct[i]['i_1d_ref']
i_1q_ref = struct[i]['i_1q_ref']
i_2d_ref = struct[i]['i_2d_ref']
i_2q_ref = struct[i]['i_2q_ref']
i_1d = i_1d_ref
i_1q = i_1q_ref
i_2d = i_2d_ref
i_2q = i_2q_ref
v_1d = struct[i]['v_1d']
v_1q = struct[i]['v_1q']
v_2d = struct[i]['v_2d']
v_2q = struct[i]['v_2q']
eta_1d = 2.0*(R_1*i_1d - L_1*i_1q*omega_1 + v_1d)/v_dc
eta_1q = 2.0*(R_1*i_1q + L_1*i_1d*omega_1 + v_1q)/v_dc
eta_2d = 2.0*(R_2*i_2d - L_2*i_2q*omega_2 + v_2d)/v_dc
eta_2q = 2.0*(R_2*i_2q + L_2*i_2d*omega_2 + v_2q)/v_dc
i_dc_1 = 3.0/4.0*(eta_1d*i_1d + eta_1q*i_1q)
i_dc_2 = 3.0/4.0*(eta_2d*i_2d + eta_2q*i_2q)
dv_dc = 1.0/C_dc*(-i_dc_1 - i_dc_2)
struct[i]['eta_1d'] = eta_1d
struct[i]['eta_1q'] = eta_1q
struct[i]['eta_2d'] = eta_2d
struct[i]['eta_2q'] = eta_2q
struct[i]['i_dc_1'] = i_dc_1
struct[i]['i_dc_2'] = i_dc_2
struct[i]['p_1'] = 3.0/2.0*(v_1d*i_1d + v_1q*i_1q)
struct[i]['q_1'] = 3.0/2.0*(v_1d*i_1q - v_1q*i_1d)
struct[i]['p_2'] = 3.0/2.0*(v_2d*i_2d + v_2q*i_2q)
struct[i]['q_2'] = 3.0/2.0*(v_2d*i_2q - v_2q*i_2d)
struct[i]['f'][x_idx+0,0] = dv_dc
return 0
@numba.jit(nopython=True, cache=True)
def b2b_ctrl2(struct,i,m):
'''
Control level 2 for DC Voltage
'''
x_idx = struct[i]['b2b_ctrl_idx']
xi_v_dc = float(struct[i]['x'][x_idx+0,0])
S_b = struct[i]['S_b']
V_dc_b = struct[i]['V_dc_b']
K_v_p = struct[i]['K_v_p']
K_v_i = struct[i]['K_v_i']
v_dc = struct[i]['v_dc']
v_dc_ref = struct[i]['v_dc_ref']
p_1_ref = struct[i]['p_1_ref']
q_1_ref = struct[i]['q_1_ref']
p_2_ref = struct[i]['p_2_ref']
q_2_ref = struct[i]['q_2_ref']
v_1d = struct[i]['v_1d']
v_1q = struct[i]['v_1q']
v_2d = struct[i]['v_2d']
v_2q = struct[i]['v_2q']
error_v_dc = (v_dc - v_dc_ref)/V_dc_b
p_ref = (K_v_p * error_v_dc + K_v_i*xi_v_dc)*S_b
if struct[i]['vdc_ctrl'] == 1:
p_ref_1 = p_ref
if struct[i]['vdc_ctrl'] == 2:
p_ref_2 = p_ref
den = (v_1d**2 + v_1q**2)
den_1 = 0.001
if den_1 > 0.0:
den_1 = (v_1d**2 + v_1q**2)
den_2 = 0.001
if den_2 > 0.0:
den_2 = (v_2d**2 + v_2q**2)
i_1d_ref = 2.0/3.0*(p_1_ref*v_1d - q_1_ref*v_1q)/den_1
i_1q_ref = 2.0/3.0*(p_1_ref*v_1q + q_1_ref*v_1d)/den_1
i_2d_ref = 2.0/3.0*(p_2_ref*v_2d - q_2_ref*v_1q)/den_2
i_2q_ref = 2.0/3.0*(p_2_ref*v_2q + q_2_ref*v_1d)/den_2
struct[i]['i_1d_ref'] = i_1d_ref
struct[i]['i_1q_ref'] = i_1q_ref
struct[i]['i_2d_ref'] = i_2d_ref
struct[i]['i_2q_ref'] = i_2q_ref
dxi_v_dc = error_v_dc
struct[i]['f'][x_idx+0,0] = dxi_v_dc
return 0
R_1 = R_2 = 0.1
L_1 = L_2 = 0.5e-3
Omega_b = 2.0*np.pi*50.0
C_dc = 2200.0e-6
omega_1 = omega_2 = Omega_b
d =dict(R_1 = R_1,
R_2 = R_2,
L_1 = L_1,
L_2 = L_2,
C_dc = C_dc,
b2b_idx = 0,
b2b_ctrl_idx = 1,
v_dc = 800.0,
omega_1 = omega_1,
omega_2 = omega_2,
i_1d_ref = 0.0,
i_1q_ref = 100.0,
i_2d_ref = 0.0,
i_2q_ref = -100.0,
i_dc_1 = 0.0,
i_dc_2 = 0.0,
eta_1d = 0.0,
eta_1q = 0.0,
eta_2d = 0.0,
eta_2q = 0.0,
v_1d = 0.0,
v_1q = 325.0,
v_2d = 0.0,
v_2q = 325.0,
p_1 = 0.0,
q_1 = 0.0,
p_2 = 0.0,
q_2 = 0.0,
x_idx = 0,
xi_v_dc = 0.0,
S_b = 0.5e6,
V_dc_b = 800.0,
K_v_p = 0.1,
K_v_i = 0.0,
v_dc_ref = 750.0,
p_1_ref = 0.0,
q_1_ref = 0.0,
p_2_ref = 0.0,
q_2_ref = 0.0,
vdc_ctrl = 1,
x = np.array([[800.0],[0.0]]),
f = np.array([[0.0],[0.0]])
)
struct = d2np(d)
i=0
m=2
b2b_ctrl1(struct,i,m)
b2b_ctrl2(struct,i,m)
print(struct[i]['p_1'])
print(struct[i]['p_2'])
print(struct[i]['i_dc_1'])
print(struct[i]['i_dc_2'])
print(struct[i]['f'])
struct = d2np(d)
sys_d = dict(x = np.array([[800.0],[0.0]]),
f = np.zeros((2,1)))
sys_struct = d2np(sys_d)
@numba.jit(nopython=True, cache=True)
def f_eval(sys_struct,struct):
N_states = 2
for i in range(1):
struct[i]['x'][:,0] = sys_struct[0]['x'][N_states*i:N_states*(i+1),0]
b2b_ctrl1(struct,i,m)
b2b_ctrl2(struct,i,m)
sys_struct[0]['f'][N_states*i:N_states*(i+1),:] = struct[i]['f']
return 0
@numba.jit(nopython=True, cache=True)
def run(sys_struct,struct):
N_steps = 1000
N_states = 2
Dt = 10.0e-3
Omega_r = np.zeros((N_steps,1))
Omega_t = np.zeros((N_steps,1))
P_1 = np.zeros((N_steps,1))
Q_1 = np.zeros((N_steps,1))
P_2 = np.zeros((N_steps,1))
Q_2 = np.zeros((N_steps,1))
V_dr = np.zeros((N_steps,1))
V_qr = np.zeros((N_steps,1))
I_dr = np.zeros((N_steps,1))
I_qr = np.zeros((N_steps,1))
Tau_e = np.zeros((N_steps,1))
T = np.zeros((N_steps,1))
X = np.zeros((N_steps,N_states))
V_dc = np.zeros((N_steps,1))
p_ref = 0.0
q_ref = 0.0
xi_p = 0.0
xi_q = 0.0
struct[0]['x'][:,0] = np.copy(sys_struct[0]['x'][0:2,0])
for it in range(N_steps):
t = Dt*float(it)
# perturbations and references
struct[0]['p_1_ref'] = 0.0
struct[0]['p_2_ref'] = 0.0
struct[0]['q_1_ref'] = 0.0
struct[0]['q_2_ref'] = 0.0
if t>2.0:
struct[0]['p_1_ref'] = 1.0e6
if t>3.0:
struct[0]['p_2_ref'] = 0.1e6
## solver
f_eval(sys_struct,struct)
f1 = np.copy(sys_struct[0]['f'])
x1 = np.copy(sys_struct[0]['x'])
sys_struct[0]['x'][:]= np.copy(x1 + Dt*f1)
f_eval(sys_struct,struct)
f2 = np.copy(sys_struct[0]['f'])
sys_struct[0]['x'][:]= np.copy(x1 + 0.5*Dt*(f1 + f2))
for i in range(1):
struct[i]['x'][:,0] = sys_struct[0]['x'][2*i:2*(i+1),0]
T[it,0] = t
V_dc[it,0] = float(struct[0]['v_dc'])
X[it,:] = sys_struct[0]['x'][:].T
return T,X,V_dc
%timeit run(sys_struct, struct)
run(sys_struct, struct)
sys_struct['x'][:]= np.zeros((6,1))
struct['v_qs'] = 0.0
struct['v_ds'] = 690.0*np.sqrt(2.0/3.0)
struct['tau_t'] = 0.0
sys_struct[0]['x'][0,0] = Omega_b*0.9/struct[0]['N_tr']/struct[0]['N_pp']
sys_struct[0]['x'][3,0] = Omega_b*1.1/struct[1]['N_tr']/struct[0]['N_pp']
T,X,Tau_e,P_s_1,Q_s_1,P_r_1,Q_r_1,P_s_2,Q_s_2,P_r_2,Q_r_2,V_dr,V_qr,Omega_r,Omega_t,I_dr,I_qr = run(sys_struct, struct)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 5), sharex = True)
axes.plot(T,Tau_e)
fig.savefig('dfim_tau_e.svg', bbox_inches='tight')
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8), sharex = True)
axes[0].plot(T,P_s_1/1e6, label='$\sf p_{s1}$')
axes[0].plot(T,Q_s_1/1e6, label='$\sf q_{s1}$')
axes[0].plot(T,P_s_2/1e6, label='$\sf p_{s2}$')
axes[0].plot(T,Q_s_2/1e6, label='$\sf q_{s2}$')
axes[1].plot(T,P_r_1/1e6, label='$\sf p_{r1}$')
axes[1].plot(T,Q_r_1/1e6, label='$\sf q_{r1}$')
axes[1].plot(T,P_r_2/1e6, label='$\sf p_{r2}$')
axes[1].plot(T,Q_r_2/1e6, label='$\sf q_{r2}$')
axes[0].legend()
axes[1].legend()
fig.savefig('dfim_tau_e.svg', bbox_inches='tight')
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 5), sharex = True)
axes.plot(T,Omega_t)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8), sharex = True)
axes[0].plot(T,V_dr, label='$\sf v_{dr}$')
axes[0].plot(T,V_qr, label='$\sf v_{qr}$')
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8), sharex = True)
axes[0].plot(T,Omega_t, label='$\sf v_{dr}$')
Omega_t[0]
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8), sharex = True)
axes[0].plot(T,I_dr, label='$\sf i_{dr}$')
axes[0].plot(T,I_qr, label='$\sf i_{qr}$')
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8), sharex = True)
axes[0].plot(T,X[:,5], label='$\sf x$')
np.random.normal(500e3,100e3)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.