text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import os
store_dir = '/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/'
og_evts = '/global/cfs/projectdirs/m3443/data/trackml-kaggle/train_all/'
os.environ['TRKXINPUTDIR']=f"{store_dir}shift_x/"
os.environ['TRKXOUTPUTDIR']= f"{store_dir}shift_x_pre/"
import pkg_resources
import yaml
import pprint
import random
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import tqdm
from os import listdir
from os.path import isfile, join
import matplotlib.cm as cm
import sys
import csv
import time
import pickle
import tqdm
from tqdm import tqdm
import tqdm.notebook as tq
from pathlib import Path
# %matplotlib widget
sys.path.append('/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/')
# 3rd party
import torch
import torch.nn.functional as F
from torch_geometric.data import Data
from trackml.dataset import load_event
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
# local import
from exatrkx import config_dict # for accessing predefined configuration files
from exatrkx import outdir_dict # for accessing predefined output directories
from exatrkx.src import utils_dir
from exatrkx.src import utils_robust
from utils_robust import *
from exatrkx.src import utils_current
from utils_current import *
# for preprocessing
from exatrkx import FeatureStore
from exatrkx.src import utils_torch
# for embedding
from exatrkx import LayerlessEmbedding
from exatrkx.src import utils_torch
from torch_cluster import radius_graph
from utils_torch import build_edges
from embedding.embedding_base import *
# for filtering
from exatrkx import VanillaFilter
# for GNN
import tensorflow as tf
from graph_nets import utils_tf
from exatrkx import SegmentClassifier
import sonnet as snt
# for labeling
from exatrkx.scripts.tracks_from_gnn import prepare as prepare_labeling
from exatrkx.scripts.tracks_from_gnn import clustering as dbscan_clustering
# track efficiency
from trackml.score import _analyze_tracks
from exatrkx.scripts.eval_reco_trkx import make_cmp_plot, pt_configs, eta_configs
from functools import partial
event_path = '/global/cfs/cdirs/m3443/data/trackml-kaggle/train_all/'
eventid = 'event000001005'
misl = [0,0.0025,0.005,0.0075,0.01,0.012,0.015,0.017,0.02,0.1,0.4,0.6,0.8,1]
evts = np.arange(1000,1050,1)
store_dir = '/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/volumes_shifted/'
og_evts = '/global/cfs/projectdirs/m3443/data/trackml-kaggle/train_10evts/'
def save_pickle(data, store_dir, store_path, eventid):
with open('pathtofile','wb') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvstff)
def preprocess():
action = 'build'
config_file = pkg_resources.resource_filename("exatrkx",os.path.join('configs', config_dict[action]))
with open(config_file) as f:
b_config = yaml.load(f, Loader=yaml.FullLoader)
pp = pprint.PrettyPrinter(indent=4)
b_config['pt_min'] = 0
b_config['endcaps'] = True
b_config['n_workers'] = 2
b_config['n_files'] = 10
b_config['noise'] = 0
pp.pprint(b_config)
preprocess_dm = FeatureStore(b_config)
preprocess_dm.prepare_data()
```
# Remove Volume 8 Layer 6
```
# def remove_v8l6(hits,cells,particles,truth,store_dir,ev):
# nhits = hits[(hits['volume_id']==8) & (hits['layer_id']==6)]
# fhits = hits[~hits['hit_id'].isin(nhits['hit_id'])]
# fcells = cells[~cells['hit_id'].isin(fhits['hit_id'])]
# ftruth = truth[~truth['hit_id'].isin(fhits['hit_id'])]
# fparticles = particles[particles['particle_id'].isin(ftruth['particle_id'])]
# name = f"{store_dir}remove_v8l6/event00000{ev}-"
# fhits.to_csv(name+"hits.csv", index=False)
# fcells.to_csv(name+"cells.csv", index=False)
# ftruth.to_csv(name+"truth.csv", index=False)
# fparticles.to_csv(name+"particles.csv", index=False)
# # select 10 events, remove v8l6
# for evtid in evts:
# hits, cells, particles, truth = trackml.dataset.load_event(f"{og_evts}event00000{str(evtid)}")
# remove_v8l6(hits,cells,particles,truth,store_dir,evtid)
# v,_,_,_ = trackml.dataset.load_event(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/remove_v8l6/{eventid}")
# # check inference
# import os
# preprocess()
# v[(v['volume_id']==8) & (v['layer_id']==6)]
```
# Shift only x coordinate of every hit
```
# def shift_x(hits,cells,particles,truth,store_dir,ev,mis):
# # 1000 micron = 1 milimeter
# # one direction
# hits.loc[:, 'x'] = hits['x']+mis
# name = f"{store_dir}shift_x_all/{mis}"
# Path(name).mkdir(parents=True, exist_ok=True)
# name = name + f'/event00000{ev}-'
# hits.to_csv(name+"hits.csv", index=False)
# cells.to_csv(name+"cells.csv", index=False)
# truth.to_csv(name+"truth.csv", index=False)
# particles.to_csv(name+"particles.csv", index=False)
# for mis in tqdm(misl):
# print(f"------ {mis} ------")
# for evtid in tq.tqdm(evts):
# hits, cells, particles, truth = trackml.dataset.load_event(f"{event_path}event00000{str(evtid)}")
# shift_x(hits,cells,particles,truth,store_dir,evtid,mis)
# oh,oc,op,ot = trackml.dataset.load_event(f"{og_evts}{eventid}")
# oh
# h,c,p,t = trackml.dataset.load_event(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/shift_x/1/{eventid}")
# h
```
# Shift only x coordinate of Volume 8 all layers
```
# def shift_x_v8(hits,cells,particles,truth,store_dir,ev,mis):
# # 1000 micron = 1 milimeter
# #one direction
# hits.loc[(hits['volume_id']==8), 'x'] = hits[hits['volume_id']==8]['x']+mis
# # hits.loc[(hits['volume_id']==8) & (hits['x']<0), 'x'] = hits[hits['volume_id']==8]['x']-mis
# # hits.loc[(hits['volume_id']==8) & (hits['x']>0), 'x'] = hits[hits['volume_id']==8]['x']+mis
# name = f"{store_dir}shift_x_v8/{mis}/event00000{ev}-"
# hits.to_csv(name+"hits.csv", index=False)
# cells.to_csv(name+"cells.csv", index=False)
# truth.to_csv(name+"truth.csv", index=False)
# particles.to_csv(name+"particles.csv", index=False)
# for mis in misl:
# for evtid in evts:
# hits, cells, particles, truth = trackml.dataset.load_event(f"{og_evts}event00000{str(evtid)}")
# shift_x_v8(hits,cells,particles,truth,store_dir,evtid,mis)
# oh,oc,op,ot = trackml.dataset.load_event(f"{og_evts}{eventid}")
# oh[oh['volume_id'] == 8]
# h,c,p,t = trackml.dataset.load_event(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/shift_x_v8/1/{eventid}")
# h[h['volume_id'] == 8]
```
# Shift x and y coordinates of all layers in volume 8
```
# def shift_xy_v8(hits,cells,particles,truth,store_dir,ev,mis):
# # 1000 micron = 1 milimeter
# # two directions
# # hits.loc[(hits['volume_id']==8) & (hits['x']<0), 'x'] = hits[hits['volume_id']==8]['x']-mis
# # hits.loc[(hits['volume_id']==8) & (hits['x']>0), 'x'] = hits[hits['volume_id']==8]['x']+mis
# # hits.loc[(hits['volume_id']==8) & (hits['y']<0), 'x'] = hits[hits['volume_id']==8]['y']-mis
# # hits.loc[(hits['volume_id']==8) & (hits['y']>0), 'x'] = hits[hits['volume_id']==8]['y']+mis
# # one direction
# hits.loc[(hits['volume_id']==8), 'x'] = hits[hits['volume_id']==8]['x']+mis
# hits.loc[(hits['volume_id']==8), 'y'] = hits[hits['volume_id']==8]['y']+mis
# name = f"{store_dir}shift_xy_v8/{mis}/event00000{ev}-"
# hits.to_csv(name+"hits.csv", index=False)
# cells.to_csv(name+"cells.csv", index=False)
# truth.to_csv(name+"truth.csv", index=False)
# particles.to_csv(name+"particles.csv", index=False)
# for mis in misl:
# for evtid in evts:
# hits, cells, particles, truth = trackml.dataset.load_event(f"{og_evts}event00000{str(evtid)}")
# shift_xy_v8(hits,cells,particles,truth,store_dir,evtid,mis)
# oh,oc,op,ot = trackml.dataset.load_event(f"{og_evts}{eventid}")
# oh[oh['volume_id'] == 8]
# h,c,p,t = trackml.dataset.load_event(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/shift_xy_v8/1/{eventid}")
# h[h['volume_id'] == 8]
```
# Shift x coordinate of endcap volume 12
```
# def shift_x_v12(hits,cells,particles,truth,store_dir,ev,mis):
# # 1000 micron = 1 milimeter
# # two directions
# # hits.loc[(hits['volume_id']==8) & (hits['x']<0), 'x'] = hits[hits['volume_id']==8]['x']-mis
# # hits.loc[(hits['volume_id']==8) & (hits['x']>0), 'x'] = hits[hits['volume_id']==8]['x']+mis
# # hits.loc[(hits['volume_id']==8) & (hits['y']<0), 'x'] = hits[hits['volume_id']==8]['y']-mis
# # hits.loc[(hits['volume_id']==8) & (hits['y']>0), 'x'] = hits[hits['volume_id']==8]['y']+mis
# # one direction
# hits.loc[(hits['volume_id']==12), 'x'] = hits[hits['volume_id']==12]['x']+mis
# hits.loc[(hits['volume_id']==12), 'y'] = hits[hits['volume_id']==12]['y']+mis
# name = f"{store_dir}shift_x_v12/{mis}/event00000{ev}-"
# hits.to_csv(name+"hits.csv", index=False)
# cells.to_csv(name+"cells.csv", index=False)
# truth.to_csv(name+"truth.csv", index=False)
# particles.to_csv(name+"particles.csv", index=False)
# for mis in misl:
# for evtid in evts:
# hits, cells, particles, truth = trackml.dataset.load_event(f"{og_evts}event00000{str(evtid)}")
# shift_x_v12(hits,cells,particles,truth,store_dir,evtid,mis)
# oh,oc,op,ot = trackml.dataset.load_event(f"{og_evts}{eventid}")
# oh[oh['volume_id'] == 12]
# h,c,p,t = trackml.dataset.load_event(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/misaligned/new_mis/shift_x_v12/1/{eventid}")
# h[h['volume_id'] == 12]
def shift(hits,cells,particles,truth,store_dir,ev,mis,vol):
hits.loc[(hits['volume_id']==vol),'x'] = hits[hits['volume_id']==vol]['x']+mis
name = f"{store_dir}shift_x_{vol}/{mis}"
Path(name).mkdir(parents=True, exist_ok=True)
name = name + f'/event00000{ev}-'
hits.to_csv(name+"hits.csv", index=False)
cells.to_csv(name+"cells.csv", index=False)
truth.to_csv(name+"truth.csv", index=False)
particles.to_csv(name+"particles.csv", index=False)
vols = [7,8,9,12,13,14,16,17,18]
misl = [0.001]
evts = np.arange(1000,1010,1)
```
# Shift x of all volume by 0 to 1 mm
```
for mis in tqdm(misl):
print(f"------ {mis} ------")
for evtid in tq.tqdm(evts):
for vol in vols:
hits, cells, particles, truth = trackml.dataset.load_event(f"{event_path}event00000{str(evtid)}")
shift(hits,cells,particles,truth,store_dir,evtid,mis,vol)
```
| github_jupyter |
# Dictionaries and Sets
**CS1302 Introduction to Computer Programming**
___
```
%reload_ext mytutor
```
## Motivation for associative container
The following code simulates the outcomes from rolling a dice multiple times.
```
import random
dice_rolls = [random.randint(1,6) for i in range(10)]
print(*dice_rolls)
```
**What is the distribution, i.e., fractional counts?**
```
distribution = [dice_rolls.count(i) / len(dice_rolls) for i in range(7)]
import matplotlib.pyplot as plt
plt.stem(range(7), distribution, use_line_collection=True)
plt.xlabel('Outcomes')
plt.title('Distribution')
plt.ylim(0, 1)
```
In the above code, `distribution[i]` stores the fractional count of outcome `i`.
However, `distribution[0]` is `0` because a dice does not have outcome `0`. Can we avoid such redundancy?
```
distinct_outcomes = [
outcome for outcome in range(1, 7) if dice_rolls.count(outcome) > 0
]
distribution = [
dice_rolls.count(distinct_outcomes[i]) / len(dice_rolls)
for i in range(len(distinct_outcomes))
]
import matplotlib.pyplot as plt
plt.stem(distinct_outcomes, distribution, use_line_collection=True)
plt.xlabel('Outcomes')
plt.title('Distribution')
plt.ylim(0, 1)
```
In the above code,
- `distinct_outcomes` stores the list of distinct outcomes, and
- `distribution[distinct_outcomes[i]]` stores the fractional count of the `i`-th distinct outcome.
What about finding the distribution of characters in an article?
There are 1,112,064 unicode characters.
- How obtain the distribution efficiently without creating an entry for each unicode character?
- How to compute the set of distinct characters efficiently without iterating over the set of all unicode characters?
- Can we index `distribution` directly by the set of distinct characters?
What we need is a composite data type that
- can keep a set of *unique keys of different types* (such as the characters in our example), and
- associate to different keys possibly different *values of any types* such as (the fractional counts of the characters).
Such data structure is called an [associative container](https://en.wikipedia.org/wiki/Associative_containers).
**How to use associative containers in Python?**
There are two built-in classes for associative containers:
- `set` can store a set of unique keys of possibly different types.
- `dict`ionary can store a set of key-value pairs.
We have already used sets and dictionaries before.
```
%%mytutor -h 400
a = (lambda **kwargs: kwargs)(start=0, stop=5, step=1)
b = set([1,1,2,3,3,3])
assert len(a) == len(b)
```
Both `set` and `dict`
- implement `len` method that returns the number of keys, and
- are mutable, so we can mutate their keys and values.
## Constructing associative containers
**How to create set/dictionary?**
Similar to tuple/list, we can use enclosure, constructors, and comprehension.
**How to create a set/dict by enumerating its keys/values?**
For `dict`, enclose a comma-separated sequence of `key : value` pairs by braces `{` and `}`.
```
%%mytutor -h 350
empty_dictionary = {}
a = {'a': 0, 'b': 1}
b = {**a, 'c': 0, 'd': 1}
```
For `set`, omit `: value`.
```
%%mytutor -h 300
a = {(1, 2.0), print, *range(2), *'23'}
empty_set = {*()} # Why not use {}?
```
We can also create a set/dictionary from other objects using their constructors `set`/`dict`.
```
%%mytutor -h 550
empty_set = set()
string2set = set('abc')
range2set = set(range(2))
list2set = set(['abc',range(2)])
set2set = set(list2set)
%%mytutor -h 650
empty_dict = dict()
enumerate2dict = dict(enumerate('abc'))
zip2dict = dict(zip('abc','123'))
kwargs2dict = dict(one=1,two=2)
dict2dict = dict(kwargs2dict)
```
**Exercise** `dict` also has a [*class method* `fromkeys`](https://docs.python.org/3/library/stdtypes.html#dict.fromkeys) to construct a dictionary with keys from iterable pointing to a default value. Create a dictionary using `fromkeys` with keys being the non-negative integers smaller than `100` and values being `0`.
*Hint:* Use `dict.fromkeys` since a class method is bound to the class rather than an object of the class.
```
dict.fromkeys?
### BEGIN SOLUTION
fromkeys_dict = dict.fromkeys(range(100),0)
### END SOLUTION
# test
assert all(fromkeys_dict[k] == 0 for k in fromkeys_dict)
```
**How to use a rule to construct a set/dictionary?**
The following function uses a one-line dictionary comprehension to return the distribution of items in a sequence:
```
def distribute(seq):
return {k : seq.count(k)/len(seq) for k in set(seq)}
import matplotlib.pyplot as plt
def plot_distribution(seq):
dist = distribute(seq)
plt.stem(dist.keys(), # set-like view of the keys
dist.values(), # view of the values
use_line_collection=True)
plt.xlabel('Items')
plt.title('Distribution')
plt.ylim(0, 1)
plot_distribution('What is the distribution of different characters?')
```
- The object methods `keys` and `values` provide a dynamic [view](https://docs.python.org/3/glossary.html#term-dictionary-view) of the keys.
- Unlike a copy, subsequent changes to the dictionary are also reflected in a previously returned view.
- `items` provides a set-like view of the key-value pairs.
```
%%mytutor -h 500
a = dict(enumerate('abc'))
views = a.keys(), a.values(), a.items()
a.pop(1) # remove the key 1 and its associated value
a.popitem() # remove and return a key-value pair
a.clear() # clear the dictionary
```
`set` has `pop` and `clear` but not `popitem`. However, `set.pop` behaves like `dict.popitem` instead of `dict.pop`. (Why?)
```
%%mytutor -h 250
a = set('abc')
a.pop() # remove and return an element
a.clear() # clear the set
```
**Exercise** Use one-line comprehension to return a set of composite numbers smaller than `stop`.
*Hint:* You do not need to worry about duplicate elements for `set`.
```
def composite_set(stop):
### BEGIN SOLUTION
return {x for factor in range(2,stop) for x in range(factor*2,stop,factor)}
### END SOLUTION
print(*sorted(composite_set(100)))
```
## Hashability
For `set` and `dict`,
- identical keys are merged to the same entry even though
- values associated with different keys can be the same.
```
%%mytutor -h 350
a = {0: 'a', 0.0: 'b', 2: 'b'}
b = {0j, 0, 0.0, '', False}
assert 0 == 0.0 == 0j == False != ''
```
This is implemented efficiently by [*hashing*](https://docs.python.org/3/glossary.html#term-hashable). A key must be a hashable object which:
- has a hash value (returned by `__hash__` method) that never changes during its lifetime, and
- can be compared (using `__eq__` method) to other objects.
*Hashable objects which compare equal must have the same hash value.*
```
import collections
for i in 0, 0.0, 0j, '', False, (), [], {}, set(), frozenset():
if isinstance(i, collections.abc.Hashable):
print('{} is hashable. E.g., hash({!r}) == {}'.format(type(i),i,hash(i)))
else:
print('{} is NOT hashable.'.format(type(i)))
```
**Why the key should be hashable?**
**What is the use of a hash value?**
Associative containers are implemented as *hash tables* for efficient lookup of key values.
```
%%html
<iframe width="912" height="513" src="https://www.youtube.com/embed/LPzN8jgbnvA" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
Most mutable objects are not hashable. Why?
Mutating a key makes it a different key, which is [hard to track](https://hynek.me/articles/hashes-and-equality/).
`set` has an immutable counterpart called `frozenset`, but `dict` does not have any immutable counterpart. Why?
While elements of a set must be hashable and therefore mostly immutable, dictionary values may be of mutable types.
Python also uses dictionary for its global/local frames.
Indeed, [hash collisions can slow down the lookup process](https://stackoverflow.com/questions/8271139/why-is-early-return-slower-than-else).
**Exercise** Why equal objects must have the same hash but different objects may have the same hash? An example is given below:
```
assert hash(0) == hash(0.0) == hash(0j) == hash(False) == hash('') and False != ''
```
1. To avoid duplicate keys occupying different entries in a hash table.
2. Hash collision can be detected by `==` and handled by [collision resolution](https://en.wikipedia.org/wiki/Hash_table#Collision_resolution) techniques. To keep the hash table small, hash collision is unavoidable.
## Accessing keys/values
**How to traverse a set/dictionary?**
Set and dictionaries are iterable.
The for loop iterates over the keys.
```
a = set('abcde')
b = dict(enumerate('abcde'))
print(*(element for element in a))
print(*((key,b[key]) for key in b))
a[0] # TypeError
```
- For the dictionary `b`, we used subscription `b[key]` to access the value associated with `key`.
- Unlike dictionary, set does not implement [`__getitem__`](https://docs.python.org/3/reference/datamodel.html#object.__getitem__) and is therefore not subscriptable.
Unlike tuple/list, `b[-1]` does not refer to the value of the last entry. (Dictionary is not ordered.)
```
b[-1] # KeyError
```
The above raises a key error because `-1` is not a key in the dictionary `b`.
Dictionary implements the [`__setitem__`](https://docs.python.org/3/reference/simple_stmts.html#assignment-statements) method so we can enter a key value pair to a dictionary using the assignment operator.
```
b[-1] = 'f'
b[-1]
```
To delete a key, we can use the function `del`.
```
del b[-1]
b[-1]
```
To avoid key error, we can check if a key is in a dictionary efficiently (due to hashing) using the `in` operator.
The following is a different implementation of `distribute`.
```
def distribute(seq):
dist = {}
for i in seq:
dist[i] = (dist[i] if i in dist else 0) + 1/len(seq)
return dist
plot_distribution('What is the distribution of different characters?')
```
**Exercise** Unlike the previous implementation using one-line dictionary comprehension, the above alternative implementation uses multiple lines of code to build the dictionary incrementally starting from an empty dictionary.
```Python
def distribute(seq):
return {k : seq.count(k)/len(seq) for k in set(seq)}
```
Explain whether the alternative is more efficient.
It is more efficient because
- the alternative implementation traverses `seq` once with near constant time lookup of the key, but
- the list comprehension can traverse `seq` a multiple times linear in `len(seq)`, since every call to `seq.count` has to traverse `seq` once.
Shorter code needs not be more efficient.
**Exercise** `dict` also has a getter method `get` that conveniently returns a default value if the key does not exist. Rewrite the alternative implementation of `distribute` to use `get` instead of `in`.
```
dict.get?
def distribute(seq):
dist = {}
for i in seq:
### BEGIN SOLUTION
dist[i] = dist.get(i,0) + 1/len(seq)
### END SOLUTION
return dist
plot_distribution('What is the distribution of different characters?')
```
**How to traverse in ascending order of the keys?**
We can apply the function `sorted` to a set/dictionary to return a sorted list of the keys.
```
%%mytutor -h 600
a = set(reversed('abcde'))
b = dict(reversed([*enumerate('abcde')]))
sorted_elements = sorted(a)
sorted_keys = sorted(b)
```
**Exercise** Re-implement `plot_distribution` to plot the distribution in ascending order of the keys.
```
def plot_distribution(seq):
dist = distribute(seq)
# pyplot.stem(dist.keys(), dist.values(), use_line_collection=True)
### BEGIN SOLUTION
dist_list = sorted(dist.items(), key = lambda p: p[0])
pyplot.stem([p[0] for p in dist_list], [p[1] for p in dist_list], use_line_collection=True)
### END SOLUTION
pyplot.xlabel('Items')
pyplot.title('Distribution')
pyplot.ylim(0, 1)
plot_distribution('What is the distribution of different characters?')
```
**How to add an element to a set and remove an element from it?**
Instead of subscription, `set` has the `add`/`discard`/`remove` methods for adding/removing elements.
```
%%mytutor -h 400
a = set('abc')
a.add('d')
a.discard('a')
a.remove('b')
a.clear()
a.discard('a') # no error
a.remove('b') # KeyError
```
## Other operators and methods
Unlike `str`/`tuple`/`list`, `set` and `dict` do not implement addition `+` and multiplication `*`:
```
any(hasattr(container, attr) for attr in ('__add__', '__mult__')
for container in (dict, set, frozenset))
```
**Exercise** Use the unpacking operators `*` and `**` to concatenate two sets/dictionaries below into a new set/dictionary.
```
set1 = set('abc')
set2 = set('cde')
### BEGIN SOLUTION
concatenated_set = {*set1,*set2}
### END SOLUTION
concatenated_set
dict1 = dict(enumerate('abc'))
dict2 = dict(enumerate('def',start=2))
### BEGIN SOLUTION
concatenated_dict = {**dict1,**dict2}
### END SOLUTION
concatenated_dict
```
`set` overloads many other operators:
```
%%mytutor -h 550
a, b = {1,2}, {2,3}
union = a | b
assert all(i in union for i in a) and all(i in union for i in b)
intersection = a & b
assert all(i in a and i in b for i in intersection)
assert intersection <= a <= union # subset
assert union > b > intersection # proper superset
assert len(a) + len(b) == len(intersection) + len(union)
symmetric_difference = a ^ b
assert all((i in a or i in b) and not (i in a and i in b)
for i in symmetric_difference)
assert symmetric_difference == union - intersection
assert set.isdisjoint(intersection, symmetric_difference)
assert len(union) == len(intersection) + len(symmetric_difference)
```
The following uses `&` and `-` to compare the sets of public attributes for `set` and `dict`:
```
set_attributes = {attr for attr in dir(set) if attr[0] != '_'}
dict_attributes = {attr for attr in dir(dict) if attr[0] != '_'}
print('Common attributes:',', '.join(set_attributes & dict_attributes))
print('dict-specific attributes:',', '.join(dict_attributes - set_attributes))
print('set-specific attributes:',', '.join(set_attributes - dict_attributes))
```
For `set`, the intersection operation `&` can also be performed by
- the class method `intersection` which returns the intersection of its arguments, and
- the object method `intersection_update` which mutates a set object by intersecting the set with the arguments.
```
%%mytutor -h 300
a = {0,1,2}
b = {1,2,3}
c = set.intersection(a,b,{2,3,4})
a.intersection_update(b,c)
```
- All other set-specific methods have an associated operator except `isdisjoint` as shown below.
- The object method for `union` is `update` not `union_update`.
| class method | object method | operator |
| ---------------------- | ----------------------------- | ------------ |
| `union` | `update` | `\| ` |
| `intersection` | `intersection_update` | `&` |
| `symmetric_difference` | `symmetric_difference_update` | `^` |
| `issubset` | | `<=` |
| `issuperset` | | `>=` |
| `isdisjoint` | | |
`dict` also has an `update` method that can update a dictionary using dictionary, iterables and keyword arguments:
```
%%mytutor -h 300
a = {}
a.update(enumerate('a'),b=2)
b = a.copy()
a.update(b,c=3)
```
**Exercise** For `dict`, there is also a method called [`setdefault`](https://stackoverflow.com/questions/3483520/use-cases-for-the-setdefault-dict-method). Use it to define a function `group_by_type` that
- takes a sequence `seq` of objects and
- returns a dictionary `d` such that `d[repr(t)]` returns the list of objects in `seq` of type `t`
If there is no objects of type `t`, raise a key error.
```
def group_by_type(seq):
group = {}
for i in seq:
### BEGIN SOLUTION
group.setdefault(repr(type(i)),[]).append(i)
### END SOLUTION
return group
group_by_type([*range(3),
*'abc',
*[i/2 for i in range(3)],
*[(i,) for i in range(3)],
*[[i] for i in range(3)],
*[{i} for i in range(3)],
*[{i:i} for i in range(3)],
print,hash,
int,str,float,set,dict,
(i for i in range(10)),
enumerate('abc'),
range(3),
zip(),
set.add,
dict.copy])
```
| github_jupyter |
```
# default_exp models.MINIROCKET
```
# MINIROCKET
> A Very Fast (Almost) Deterministic Transform for Time Series Classification.
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.external import *
from tsai.models.layers import *
#export
from sktime.transformations.panel.rocket import MiniRocketMultivariate
from sklearn.linear_model import RidgeCV, RidgeClassifierCV
from sklearn.ensemble import VotingClassifier, VotingRegressor
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#export
class MiniRocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using MINIROCKET features and a linear classifier"""
def __init__(self, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
""" MiniRocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas,
normalize=normalize_features,
scoring=scoring,
class_weight=class_weight,
**kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using MINIROCKET features and a linear regressor"""
def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
""" MiniRocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to r2.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketVotingClassifier(VotingClassifier):
"""Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketClassifier(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, class_weight=class_weight, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, voting='hard', weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingClassifier(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def get_minirocket_preds(X, fname, path='./models', model=None):
if X.ndim == 1: X = X[np.newaxis][np.newaxis]
elif X.ndim == 2: X = X[np.newaxis]
if model is None:
model = load_minirocket(fname=fname, path=path)
return model.predict(X)
#export
class MiniRocketVotingRegressor(VotingRegressor):
"""Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketRegressor(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingRegressor(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Univariate classification with sklearn-type API
dsid = 'OliveOil'
fname = 'MiniRocketClassifier'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.save(fname)
pred = cls.score(X_test, y_test)
del cls
cls = load_minirocket(fname)
test_eq(cls.score(X_test, y_test), pred)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketVotingClassifier(5)
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Univariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
fname = 'MiniRocketRegressor'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression ensemble with sklearn-type API
if X_train is not None:
reg = MiniRocketVotingRegressor(5, scoring=rmse_scorer)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
```
| github_jupyter |
Conditional Generative Adversarial Network
----------------------------------------
A Generative Adversarial Network (GAN) is a type of generative model. It consists of two parts called the "generator" and the "discriminator". The generator takes random values as input and transforms them into an output that (hopefully) resembles the training data. The discriminator takes a set of samples as input and tries to distinguish the real training samples from the ones created by the generator. Both of them are trained together. The discriminator tries to get better and better at telling real from false data, while the generator tries to get better and better at fooling the discriminator.
A Conditional GAN (CGAN) allows additional inputs to the generator and discriminator that their output is conditioned on. For example, this might be a class label, and the GAN tries to learn how the data distribution varies between classes.
For this example, we will create a data distribution consisting of a set of ellipses in 2D, each with a random position, shape, and orientation. Each class corresponds to a different ellipse. Let's randomly generate the ellipses.
```
import deepchem as dc
import numpy as np
import tensorflow as tf
n_classes = 4
class_centers = np.random.uniform(-4, 4, (n_classes, 2))
class_transforms = []
for i in range(n_classes):
xscale = np.random.uniform(0.5, 2)
yscale = np.random.uniform(0.5, 2)
angle = np.random.uniform(0, np.pi)
m = [[xscale*np.cos(angle), -yscale*np.sin(angle)],
[xscale*np.sin(angle), yscale*np.cos(angle)]]
class_transforms.append(m)
class_transforms = np.array(class_transforms)
```
This function generates random data from the distribution. For each point it chooses a random class, then a random position in that class' ellipse.
```
def generate_data(n_points):
classes = np.random.randint(n_classes, size=n_points)
r = np.random.random(n_points)
angle = 2*np.pi*np.random.random(n_points)
points = (r*np.array([np.cos(angle), np.sin(angle)])).T
points = np.einsum('ijk,ik->ij', class_transforms[classes], points)
points += class_centers[classes]
return classes, points
```
Let's plot a bunch of random points drawn from this distribution to see what it looks like. Points are colored based on their class label.
```
%matplotlib inline
import matplotlib.pyplot as plot
classes, points = generate_data(1000)
plot.scatter(x=points[:,0], y=points[:,1], c=classes)
```
Now let's create the model for our CGAN.
```
import deepchem.models.tensorgraph.layers as layers
model = dc.models.TensorGraph(learning_rate=1e-4, use_queue=False)
# Inputs to the model
random_in = layers.Feature(shape=(None, 10)) # Random input to the generator
generator_classes = layers.Feature(shape=(None, n_classes)) # The classes of the generated samples
real_data_points = layers.Feature(shape=(None, 2)) # The training samples
real_data_classes = layers.Feature(shape=(None, n_classes)) # The classes of the training samples
is_real = layers.Weights(shape=(None, 1)) # Flags to distinguish real from generated samples
# The generator
gen_in = layers.Concat([random_in, generator_classes])
gen_dense1 = layers.Dense(30, in_layers=gen_in, activation_fn=tf.nn.relu)
gen_dense2 = layers.Dense(30, in_layers=gen_dense1, activation_fn=tf.nn.relu)
generator_points = layers.Dense(2, in_layers=gen_dense2)
model.add_output(generator_points)
# The discriminator
all_points = layers.Concat([generator_points, real_data_points], axis=0)
all_classes = layers.Concat([generator_classes, real_data_classes], axis=0)
discrim_in = layers.Concat([all_points, all_classes])
discrim_dense1 = layers.Dense(30, in_layers=discrim_in, activation_fn=tf.nn.relu)
discrim_dense2 = layers.Dense(30, in_layers=discrim_dense1, activation_fn=tf.nn.relu)
discrim_prob = layers.Dense(1, in_layers=discrim_dense2, activation_fn=tf.sigmoid)
```
We'll use different loss functions for training the generator and discriminator. The discriminator outputs its predictions in the form of a probability that each sample is a real sample (that is, that it came from the training set rather than the generator). Its loss consists of two terms. The first term tries to maximize the output probability for real data, and the second term tries to minimize the output probability for generated samples. The loss function for the generator is just a single term: it tries to maximize the discriminator's output probability for generated samples.
For each one, we create a "submodel" specifying a set of layers that will be optimized based on a loss function.
```
# Discriminator
discrim_real_data_loss = -layers.Log(discrim_prob+1e-10) * is_real
discrim_gen_data_loss = -layers.Log(1-discrim_prob+1e-10) * (1-is_real)
discrim_loss = layers.ReduceMean(discrim_real_data_loss + discrim_gen_data_loss)
discrim_submodel = model.create_submodel(layers=[discrim_dense1, discrim_dense2, discrim_prob], loss=discrim_loss)
# Generator
gen_loss = -layers.ReduceMean(layers.Log(discrim_prob+1e-10) * (1-is_real))
gen_submodel = model.create_submodel(layers=[gen_dense1, gen_dense2, generator_points], loss=gen_loss)
```
Now to fit the model. Here are some important points to notice about the code.
- We use `fit_generator()` to train only a single batch at a time, and we alternate between the discriminator and the generator. That way. both parts of the model improve together.
- We only train the generator half as often as the discriminator. On this particular model, that gives much better results. You will often need to adjust `(# of discriminator steps)/(# of generator steps)` to get good results on a given problem.
- We disable checkpointing by specifying `checkpoint_interval=0`. Since each call to `fit_generator()` includes only a single batch, it would otherwise save a checkpoint to disk after every batch, which would be very slow. If this were a real project and not just an example, we would want to occasionally call `model.save_checkpoint()` to write checkpoints at a reasonable interval.
```
batch_size = model.batch_size
discrim_error = []
gen_error = []
for step in range(20000):
classes, points = generate_data(batch_size)
class_flags = dc.metrics.to_one_hot(classes, n_classes)
feed_dict={random_in: np.random.random((batch_size, 10)),
generator_classes: class_flags,
real_data_points: points,
real_data_classes: class_flags,
is_real: np.concatenate([np.zeros((batch_size,1)), np.ones((batch_size,1))])}
discrim_error.append(model.fit_generator([feed_dict],
submodel=discrim_submodel,
checkpoint_interval=0))
if step%2 == 0:
gen_error.append(model.fit_generator([feed_dict],
submodel=gen_submodel,
checkpoint_interval=0))
if step%1000 == 999:
print(step, np.mean(discrim_error), np.mean(gen_error))
discrim_error = []
gen_error = []
```
Have the trained model generate some data, and see how well it matches the training distribution we plotted before.
```
classes, points = generate_data(1000)
feed_dict = {random_in: np.random.random((1000, 10)),
generator_classes: dc.metrics.to_one_hot(classes, n_classes)}
gen_points = model.predict_on_generator([feed_dict])
plot.scatter(x=gen_points[:,0], y=gen_points[:,1], c=classes)
```
| github_jupyter |
# GATE Worker
The GATE Worker is a module that allows to run anything in a Java GATE process from Python and interchange documents between Python and Java.
One possible use of this is to run an existing GATE pipeline on a Python GateNLP document.
This is done by the python module communicating with a Java process over a socket connection.
Java calls on the Python side are sent over to Java, executed and the result is send back to Python.
For this to work, GATE and Java have to be installed on the machine that runs the GATE Worker.
The easiest way to run this is by first manually starting the GATE Worker in the Java GATE GUI and then
connecting to it from the Python side.
## Manually starting the GATE Worker from GATE
1. Start GATE
2. Load the Python plugin using the CREOLE Plugin Manager
3. Create a new Language Resource (NOTE: not a Processing Reource!): "PythonWorkerLr"
When creating the PyhonWorkerLr, the following initialization parameters can be specified:
* `authToken`: this is used to prevent other processes from connecting to the worker. You can either specify
some string here or with `useAuthToken` set to `true` let GATE choose a random one and display it in the
message pane after the resource has been created.
* for testing this, enter "verysecretauthtoken"
* `host`: The host name or address to bind to. The default 127.0.0.1 makes the worker only visible on the same
machine. In order to make it visible on other machines, use the host name or IP address on the network
or use 0.0.0.0
* for testing, keep the default of 127.0.0.1
* `logActions`: if this is set to true, the actions requested by the Python process are logged to the message pane.
* for testing, change to "true"
* `port`: the port number to use. Each worker requires their own port number so if more than one worker is running
on a machine, they need to use different, unused port numbers.
* for testing, keep the default
* `useAuthToken`: if this is set to false, no auth token is generated and used, and the connection can be
established by any process connecting to that port number.
* for testing, keep the default
A GATE Worker started via the PythonWorkerLr keeps running until the resource is deleted or GATE is ended.
## Using the GATE Worker from Python
Once the PythonWorkerLr resource has been created it is ready to get used by a Python program:
```
from gatenlp.gateworker import GateWorker
```
To connect to an already running worker process, the parameter `start=False` must be specified.
In addition the auth token must be provided and the port and host, if they differ from the default.
```
gs = GateWorker(start=False, auth_token="verysecretauthtoken")
```
The gate worker instance can now be used to run arbitrary Java methods on the Java side.
The gate worker instance provides a number of useful methods directly (see [PythonDoc for gateworker](https://gatenlp.github.io/python-gatenlp/pythondoc/gatenlp/gateworker.html) )
* `gs.load_gdoc(filepath, mimetype=None`: load a GATE document on the Java side and return it to Python
* `gs.save_gdoc(gatedocument, filepath, mimetype=None)`: save a GATE document on the Java side
* `gs.gdoc2pdoc(gatedocument)`: convert the Java GATE document as a Python GateNLP document and return it
* `gs.pdoc2gdoc(doc)`: convert the Python GateNLP document to a Java GATE document and return it
* `gs.del_resource(gatedocument)`: remove a Java GATE document on the Java side (this necessary to release memory)
This can also be used to remove other kinds of GATE resources like ProcessingResource, Corpus, LanguageResource
etc.
* `gs.load_pdoc(filepath, mimetype=None)`: load a document on the Java side using the file format specified via the mime type and return it as a Python GateNLP document
* `gs.log_actions(trueorfalse)`: switch logging of actions on the worker side off/on
In addition, there is a larger number of utility methods which are available through `gs.worker` (see
[PythonWorker Source code](https://github.com/GateNLP/gateplugin-Python/blob/master/src/main/java/gate/plugin/python/PythonWorker.java), here are a few examples:
* `loadMavenPlugin(group, artifact, version)`: make the plugin identified by the given Maven coordinates available
* `loadPipelineFromFile(filepath)`: load the pipeline/controller from the given file path and return it
* `loadDocumentFromFile(filepath)`: load a GATE document from the file and return it
* `loadDocumentFromFile(filepath, mimetype)`: load a GATE document from the file using the format corresponding to the given mime type and return it
* `saveDocumentToFile(gatedocument, filepath, mimetype)`: save the document to the file, using the format corresponding to the mime type
* `createDocument(content)`: create a new document from the given String content and return it
* `run4Document(pipeline, document)`: run the given pipeline on the given document
```
# Create a new Java document from a string
# You should see how the document gets created in the GATE GUI
gdoc1 = gs.worker.createDocument("This is a 💩 document. It mentions Barack Obama and George Bush and New York.")
gdoc1
# you can call the API methods for the document directly from Python
print(gdoc1.getName())
print(gdoc1.getFeatures())
# so far the document only "lives" in the Java process. In order to copy it to Python, it has to be converted
# to a Python GateNLP document:
pdoc1 = gs.gdoc2pdoc(gdoc1)
pdoc1.text
# Let's load ANNIE on the Java side and run it on that document:
# First we have to load the ANNIE plugin:
gs.worker.loadMavenPlugin("uk.ac.gate.plugins", "annie", "8.6")
# now load the prepared ANNIE pipeline from the plugin
pipeline = gs.worker.loadPipelineFromPlugin("uk.ac.gate.plugins","annie", "/resources/ANNIE_with_defaults.gapp")
pipeline.getName()
# run the pipeline on the document and convert it to a GateNLP Python document and display it
gs.worker.run4Document(pipeline, gdoc1)
pdoc1 = gs.gdoc2pdoc(gdoc1)
pdoc1
```
## Manually starting the GATE Worker from Python
After installation of Python `gatenlp`, the command `gatenlp-gate-worker` is available.
You can run `gatenlp-gate-worker --help` to get help information:
```
usage: gatenlp-gate-worker [-h] [--port PORT] [--host HOST] [--auth AUTH]
[--noauth] [--gatehome GATEHOME]
[--platform PLATFORM] [--log_actions] [--keep]
Start Java GATE Worker
optional arguments:
-h, --help show this help message and exit
--port PORT Port (25333)
--host HOST Host to bind to (127.0.0.1)
--auth AUTH Auth token to use (generate random)
--noauth Do not use auth token
--gatehome GATEHOME Location of GATE (environment variable GATE_HOME)
--platform PLATFORM OS/Platform: windows or linux (autodetect)
--log_actions If worker actions should be logged
--keep Prevent shutting down the worker
```
For example to start a gate worker as with the PythonWorkerLr above, but this time re-using the exact same
auth token and switching on logging of the actions:
```
gatenlp-gate-worker --auth 841e634a-d1f0-4768-b763-a7738ddee003 --log_actions
```
Again the Python program can connect to the server as before:
```
gs = GateWorker(start=False, auth_token="841e634a-d1f0-4768-b763-a7738ddee003")
gs
```
The GATE worker started that way keeps running until it is interrupted from the keyboard using "Ctrl-C" or
until the GATE worker sends the "close" request:
```
gs.close()
```
## Automatically starting the GATE Worker from Python
When using the GateWorker class from python, it is possible to just start the worker processes automatically in the background by setting the paramater `start` to `True`:
```
gs = GateWorker(start=True, auth_token="my-super-secret-auth-token")
gdoc1 = gs.worker.createDocument("This is a 💩 document. It mentions Barack Obama and George Bush and New York.")
gdoc1
# when done, the gate worker should get closed:
gs.close()
```
## A better way to close the GATE Worker
```
# using the GateWork this way will automatically close it when exiting the with block:
with GateWorker(start=True) as gw:
print(gw.gate_version)
```
## Using the GateWorkerAnnotator
The GateWorkerAnnotator is an annotator that simplifies the common task of letting a GATE Java annotation pipeline annotate a bunch of Python gatenlp documents. It can be used like other annotators (see [Processing](https://gatenlp.github.io/python-gatenlp/processing))
To run the GateWorkerAnnotator, Java must be installed and the `java` command must be on the path. Currently only Java version 8 has been tested.
A simple way to install Java on Linux and choose from various Java versions is [SDKMan](https://sdkman.io/)
Also, the GATE_HOME environment variable must be set, or the path to an
installed Java GATE must get passed on using the `gatehome` parameter.
An installed Java GATE can be one of:
* a GATE release downloaded from https://github.com/GateNLP/gate-core/releases/ and installed
* the GATE release will get installed into some directory
* the `GATE_HOME` environment variable or the `gatehome` parameter should point to that directory
* the [gate-core](https://github.com/GateNLP/gate-core) repository checked out locally and installed using Maven (`mvn install`)
* the `GATE_HOME` environment variable or the `gatehome` parameter should point to the `distro` subdirectory of that repository directory
```
from gatenlp import Document
# Create a small corpus of documents to process
texts = [
"A very simple document.",
"Another document, this one mentions New York and Washington. It also mentions the person Barack Obama.",
"One more document for this little test."
]
corpus = [Document(t) for t in texts]
from gatenlp.gateworker import GateWorkerAnnotator
from gatenlp.processing.executor import SerialCorpusExecutor
# use the path of your GATE pipeline instead of annie.xgapp
# To create the GateWorkerAnnotator a GateWorker must first be created
# To run the pipeline on a corpus, first initialize the pipeline using start(), then annotate all documents,
# then finish the pipeline using finish().
# At this point the same annotator can be used in the same way again to run on another corpus.
# If the GateWorkerAnnotator is not used any more, use close() to stop the GateWorker (the GATE worker is also
# stopped automatically when the Python process ends)
# If an executor is used, only the final close() is necessary, as the executor takes care of everything else
with GateWorker() as gw:
pipeline = GateWorkerAnnotator("annie.xgapp", gw)
executor = SerialCorpusExecutor(pipeline, corpus=corpus)
executor()
# Show the second document
corpus[1]
```
| github_jupyter |
```
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# File name: test.py
# First Edit: 2020-02-13
# Last Change: 13-Feb-2020.
"""
adb kill-server
adb start-server
adb device -l
adb shell dumpsys display
"""
import io
import os
import subprocess
import cv2
import numpy as np
import pytesseract
import sys
from adbutils import adb
from matplotlib import pyplot as plt
from PIL import Image
from ppadb.client import Client as AdbClient
from scipy import misc
from IPython.display import display
from ipywidgets import interact, IntRangeSlider
import datetime
from time import time, sleep
client = AdbClient(host="127.0.0.1", port=5037)
client = AdbClient(host="127.0.0.1", port=5037)
device = client.device("8AFY0K7DR")
def get_screenshot(file_name="test.png"):
result = device.screencap()
with open(file_name, "wb") as fp:
fp.write(result)
return file_name
def show_screenshot():
display(Image.open(get_screenshot("tmp")))
os.remove("tmp")
def data2array(data):
return np.asarray(data)
def read_png(name):
return data2array(plt.imread(name))
map_array = read_png('map.png')
start_array = read_png('start.png')[200:, :, :]
plt.imshow(start_array)
plt.imshow(map_array)
d = adb.device()
START_TIME = time()
for i in range(1000):
print(i)
loop_time = time()
flag = 1
while True:
screen = read_png( get_screenshot(file_name="now.png"))
os.remove("./now.png")
if time() - loop_time > 300:
print(str(time() - loop_time) + " has passed")
sys.exit()
elif abs(start_array - screen[200:, :, :]).mean() < 0.01:
d.click(2555,1333)
flag += 1
print("start_find")
print(time() - loop_time)
print('='*20)
sleep(1)
d.click(2430,1000)
print('sleeping')
sleep(155)
print("sleep finished")
print(time() - loop_time)
d.click(1500,200)
sleep(3)
break
elif flag == 1 and abs((map_array - screen)).mean() < 0.01:
#d.click(1920,746)
d.click(2100,560)
flag += 1
print("map_find")
print(time() - loop_time)
print('='*20)
print()
else:
d.click(1500,200)
sleep(4)
print("pass")
d.keyevent("HOME")
def makeRangeSlider(max_n):
# 範囲のスライダーを作る。2f%で計算しても良さそうだったけど、細かいところまで弄れるように画素単位にした。
return IntRangeSlider(
value=[0., max_n],
min=0., max= max_n-1, step=1,
description='xlim:',
readout_format='.1d',
)
def cropImage(heightRange, widthRange):
plt.figure(figsize=(20, 20))
plt.imshow(capture_array[heightRange[0]:heightRange[1], widthRange[0]:widthRange[1]])
capture_array = np.asarray(screen_capture)
#capture_array = np.array(test_image.getdata())
height, width = capture_array.shape[0], capture_array.shape[1]
interact(cropImage, heightRange = makeRangeSlider(height), widthRange = makeRangeSlider(width))
"""
d.swipe(10, 10, 200, 2000, 0.5)
# swipe from(10, 10) to(200, 200) 500ms
d.send_keys("hello world$%^&*") #
d.click(100, 100)
# swipe from(10, 10) to(200, 200) 500ms
d.swipe(10, 10, 200, 200, 0.5)
d.list_packages()
# example output: ["com.example.hello"]
d.window_size()
# example output: (1080, 1920)
d.rotation()
# example output: 1
# other possible valus: 0, 1, 2, 3
d.package_info("com.github.uiautomator")
# example output: {"version_name": "1.1.7", "version_code": "1007"}
d.keyevent("HOME")
d.send_keys("hello world$%^&*") # simulate: adb shell input text "hello%sworld\%\^\&\*"
d.open_browser("https://www.baidu.com")
"""
```
| github_jupyter |
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Smoothing
```
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
## Introduction
The performance of the Kalman filter is not optimal when you consider future data. For example, suppose we are tracking an aircraft, and the latest measurement deviates far from the current track, like so (I'll only consider 1 dimension for simplicity):
```
import matplotlib.pyplot as plt
data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3,
10.1, 9.9, 10.2, 10.0, 9.9, 11.4]
plt.plot(data)
plt.xlabel('time')
plt.ylabel('position');
```
After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether
* The aircraft started a turn awhile ago, but the previous measurements were noisy and didn't show the change.
* The aircraft is turning, and this measurement is very noisy
* The measurement is very noisy and the aircraft has not turned
* The aircraft is turning in the opposite direction, and the measurement is extremely noisy
Now, suppose the following measurements are:
11.3 12.1 13.3 13.9 14.5 15.2
```
data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2]
plt.plot(data + data2);
```
Given these future measurements we can infer that yes, the aircraft initiated a turn.
On the other hand, suppose these are the following measurements.
```
data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1]
plt.plot(data + data3);
```
In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy.
## An Overview of How Smoothers Work
The Kalman filter is a *recursive* filter with the Markov property - it's estimate at step `k` is based only on the estimate from step `k-1` and the measurement at step `k`. But this means that the estimate from step `k-1` is based on step `k-2`, and so on back to the first epoch. Hence, the estimate at step `k` depends on all of the previous measurements, though to varying degrees. `k-1` has the most influence, `k-2` has the next most, and so on.
Smoothing filters incorporate future measurements into the estimate for step `k`. The measurement from `k+1` will have the most effect, `k+2` will have less effect, `k+3` less yet, and so on.
This topic is called *smoothing*, but I think that is a misleading name. I could smooth the data above by passing it through a low pass filter. The result would be smooth, but not necessarily accurate because a low pass filter will remove real variations just as much as it removes noise. In contrast, Kalman smoothers are *optimal* - they incorporate all available information to make the best estimate that is mathematically achievable.
## Types of Smoothers
There are three classes of Kalman smoothers that produce better tracking in these situations.
* Fixed-Interval Smoothing
This is a batch processing based filter. This filter waits for all of the data to be collected before making any estimates. For example, you may be a scientist collecting data for an experiment, and don't need to know the result until the experiment is complete. A fixed-interval smoother will collect all the data, then estimate the state at each measurement using all available previous and future measurements. If it is possible for you to run your Kalman filter in batch mode it is always recommended to use one of these filters a it will provide much better results than the recursive forms of the filter from the previous chapters.
* Fixed-Lag Smoothing
Fixed-lag smoothers introduce latency into the output. Suppose we choose a lag of 4 steps. The filter will ingest the first 3 measurements but not output a filtered result. Then, when the 4th measurement comes in the filter will produce the output for measurement 1, taking measurements 1 through 4 into account. When the 5th measurement comes in, the filter will produce the result for measurement 2, taking measurements 2 through 5 into account. This is useful when you need recent data but can afford a bit of lag. For example, perhaps you are using machine vision to monitor a manufacturing process. If you can afford a few seconds delay in the estimate a fixed-lag smoother will allow you to produce very accurate and smooth results.
* Fixed-Point Smoothing
A fixed-point filter operates as a normal Kalman filter, but also produces an estimate for the state at some fixed time $j$. Before the time $k$ reaches $j$ the filter operates as a normal filter. Once $k>j$ the filter estimates $x_k$ and then also updates its estimate for $x_j$ using all of the measurements between $j\dots k$. This can be useful to estimate initial paramters for a system, or for producing the best estimate for an event that happened at a specific time. For example, you may have a robot that took a photograph at time $j$. You can use a fixed-point smoother to get the best possible pose information for the camera at time $j$ as the robot continues moving.
## Choice of Filters
The choice of these filters depends on your needs and how much memory and processing time you can spare. Fixed-point smoothing requires storage of all measurements, and is very costly to compute because the output is for every time step is recomputed for every measurement. On the other hand, the filter does produce a decent output for the current measurement, so this filter can be used for real time applications.
Fixed-lag smoothing only requires you to store a window of data, and processing requirements are modest because only that window is processed for each new measurement. The drawback is that the filter's output always lags the input, and the smoothing is not as pronounced as is possible with fixed-interval smoothing.
Fixed-interval smoothing produces the most smoothed output at the cost of having to be batch processed. Most algorithms use some sort of forwards/backwards algorithm that is only twice as slow as a recursive Kalman filter.
## Fixed-Interval Smoothing
There are many fixed-lag smoothers available in the literature. I have chosen to implement the smoother invented by Rauch, Tung, and Striebel because of its ease of implementation and efficiency of computation. It is also the smoother I have seen used most often in real applications. This smoother is commonly known as an RTS smoother.
Derivation of the RTS smoother runs to several pages of densely packed math. I'm not going to inflict it on you. Instead I will briefly present the algorithm, equations, and then move directly to implementation and demonstration of the smoother.
The RTS smoother works by first running the Kalman filter in a batch mode, computing the filter output for each step. Given the filter output for each measurement along with the covariance matrix corresponding to each output the RTS runs over the data backwards, incorporating its knowledge of the future into the past measurements. When it reaches the first measurement it is done, and the filtered output incorporates all of the information in a maximally optimal form.
The equations for the RTS smoother are very straightforward and easy to implement. This derivation is for the linear Kalman filter. Similar derivations exist for the EKF and UKF. These steps are performed on the output of the batch processing, going backwards from the most recent in time back to the first estimate. Each iteration incorporates the knowledge of the future into the state estimate. Since the state estimate already incorporates all of the past measurements the result will be that each estimate will contain knowledge of all measurements in the past and future. Here is it very important to distinguish between past, present, and future so I have used subscripts to denote whether the data is from the future or not.
Predict Step
$$\begin{aligned}
\mathbf{P} &= \mathbf{FP}_k\mathbf{F}^\mathsf{T} + \mathbf{Q }
\end{aligned}$$
Update Step
$$\begin{aligned}
\mathbf{K}_k &= \mathbf{P}_k\mathbf{F} \hspace{2 mm}\mathbf{P}^{-1} \\
\mathbf{x}_k &= \mathbf{x}_k + \mathbf{K}_k(\mathbf{x}_{x+1} - \mathbf{FX}_k) \\
\mathbf{P}_k &= \mathbf{P}_k + \mathbf{K}_k(\mathbf{P}_{K+1} - \mathbf{P})\mathbf{K}_k^\mathsf{T}
\end{aligned}$$
As always, the hardest part of the implementation is correctly accounting for the subscripts. A basic implementation without comments or error checking would be:
```python
def rts_smoother(Xs, Ps, F, Q):
n, dim_x, _ = Xs.shape
# smoother gain
K = zeros((n,dim_x, dim_x))
x, P = Xs.copy(), Ps.copy()
for k in range(n-2,-1,-1):
P_pred = dot(F, P[k]).dot(F.T) + Q
K[k] = dot(P[k], F.T).dot(inv(P_pred))
x[k] += dot(K[k], x[k+1] - dot(F, x[k]))
P[k] += dot(K[k], P[k+1] - P_pred).dot(K[k].T)
return (x, P, K)
```
This implementation mirrors the implementation provided in FilterPy. It assumes that the Kalman filter is being run externally in batch mode, and the results of the state and covariances are passed in via the `Xs` and `Ps` variable.
Here is an example.
```
import numpy as np
from numpy import random
from numpy.random import randn
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter
import code.book_plots as bp
def plot_rts(noise, Q=0.001, show_velocity=False):
random.seed(123)
fk = KalmanFilter(dim_x=2, dim_z=1)
fk.x = np.array([0., 1.]) # state (x and dx)
fk.F = np.array([[1., 1.],
[0., 1.]]) # state transition matrix
fk.H = np.array([[1., 0.]]) # Measurement function
fk.P = 10. # covariance matrix
fk.R = noise # state uncertainty
fk.Q = Q # process uncertainty
# create noisy data
zs = np.asarray([t + randn()*noise for t in range (40)])
# filter data with Kalman filter, than run smoother on it
mu, cov, _, _ = fk.batch_filter(zs)
M,P,C = fk.rts_smoother(mu, cov)
# plot data
if show_velocity:
index = 1
print('gu')
else:
index = 0
if not show_velocity:
bp.plot_measurements(zs, lw=1)
plt.plot(M[:, index], c='b', label='RTS')
plt.plot(mu[:, index], c='g', ls='--', label='KF output')
if not show_velocity:
N = len(zs)
plt.plot([0, N], [0, N], 'k', lw=2, label='track')
plt.legend(loc=4)
plt.show()
plot_rts(7.)
```
I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output.
With a perhaps more reasonable amount of noise we can see that the RTS output nearly lies on the ideal output. The Kalman filter output, while much better, still varies by a far greater amount.
```
plot_rts(noise=1.)
```
However, we must understand that this smoothing is predicated on the system model. We have told the filter that that what we are tracking follows a constant velocity model with very low process error. When the filter *looks ahead* it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph.
```
plot_rts(noise=7., Q=.1)
```
This underscores the fact that these filters are not *smoothing* the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements.
Let's wrap this up by looking at the velocity estimates of Kalman filter vs the RTS smoother.
```
plot_rts(7.,show_velocity=True)
```
The improvement in the velocity, which is an hidden variable, is even more dramatic.
## Fixed-Lag Smoothing
The RTS smoother presented above should always be your choice of algorithm if you can run in batch mode because it incorporates all available data into each estimate. Not all problems allow you to do that, but you may still be interested in receiving smoothed values for previous estimates. The number line below illustrates this concept.
```
from book_format import figsize
from code.smoothing_internal import *
with figsize(y=2):
show_fixed_lag_numberline()
```
At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps.
Derivation for this math is beyond the scope of this book; Dan Simon's *Optimal State Estimation* [2] has a very good exposition if you are interested. The essense of the idea is that instead of having a state vector $\mathbf{x}$ we make an augmented state containing
$$\mathbf{x} = \begin{bmatrix}\mathbf{x}_k \\ \mathbf{x}_{k-1} \\ \vdots\\ \mathbf{x}_{k-N+1}\end{bmatrix}$$
This yields a very large covariance matrix that contains the covariance between states at different steps. FilterPy's class `FixedLagSmoother` takes care of all of this computation for you, including creation of the augmented matrices. All you need to do is compose it as if you are using the `KalmanFilter` class and then call `smooth()`, which implements the predict and update steps of the algorithm.
Each call of `smooth` computes the estimate for the current measurement, but it also goes back and adjusts the previous `N-1` points as well. The smoothed values are contained in the list `FixedLagSmoother.xSmooth`. If you use `FixedLagSmoother.x` you will get the most recent estimate, but it is not smoothed and is no different from a standard Kalman filter output.
```
from filterpy.kalman import FixedLagSmoother, KalmanFilter
import numpy.random as random
fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8)
fls.x = np.array([0., .5])
fls.F = np.array([[1.,1.],
[0.,1.]])
fls.H = np.array([[1.,0.]])
fls.P *= 200
fls.R *= 5.
fls.Q *= 0.001
kf = KalmanFilter(dim_x=2, dim_z=1)
kf.x = np.array([0., .5])
kf.F = np.array([[1.,1.],
[0.,1.]])
kf.H = np.array([[1.,0.]])
kf.P *= 200
kf.R *= 5.
kf.Q *= 0.001
N = 4 # size of lag
nom = np.array([t/2. for t in range (0, 40)])
zs = np.array([t + random.randn()*5.1 for t in nom])
for z in zs:
fls.smooth(z)
kf_x, _, _, _ = kf.batch_filter(zs)
x_smooth = np.array(fls.xSmooth)[:, 0]
fls_res = abs(x_smooth - nom)
kf_res = abs(kf_x[:, 0] - nom)
plt.plot(zs,'o', alpha=0.5, marker='o', label='zs')
plt.plot(x_smooth, label='FLS')
plt.plot(kf_x[:, 0], label='KF', ls='--')
plt.legend(loc=4)
print('standard deviation fixed-lag:', np.mean(fls_res))
print('standard deviation kalman:', np.mean(kf_res))
```
Here I have set `N=8` which means that we will incorporate 8 future measurements into our estimates. This provides us with a very smooth estimate once the filter converges, at the cost of roughly 8x the amount of computation of the standard Kalman filter. Feel free to experiment with larger and smaller values of `N`. I chose 8 somewhat at random, not due to any theoretical concerns.
## References
[1] H. Rauch, F. Tung, and C. Striebel. "Maximum likelihood estimates of linear dynamic systems," *AIAA Journal*, **3**(8), pp. 1445-1450 (August 1965).
[2] Dan Simon. "Optimal State Estimation," John Wiley & Sons, 2006.
http://arc.aiaa.org/doi/abs/10.2514/3.3166
| github_jupyter |
```
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['mathtext.fontset'] = 'stix'
```
# Calculate $\kappa$ sampled from the first training
In the first training, we let 200 independent LSTMs predict 200 trajectories of 200$ns$. Since we are using LSTM as a generative model, we can also train just one LSTM and use it to generate 200 predictions, starting from either the same initial condition or different initial conditions.
Data location: `./Output/`
```
output_dir='./Output'
kappa_list=[]
for i in range(200):
pred_dir=os.path.join(output_dir, '{}/prediction.npy'.format(i))
prediction=np.load(pred_dir)
N0=len(np.where(prediction<=15)[0])
N1=len(np.where(prediction>=16)[0])
kappa=N0/N1
kappa_list.append(kappa)
kappa_arr=np.array(kappa_list)
```
Plot distribution of $\kappa$
```
# Plot distribution
hist = np.histogram( kappa_arr, bins=50 )
prob = hist[0].T
mids = 0.5*(hist[1][1:]+hist[1][:-1])
fig, ax = plt.subplots(figsize=(5,4))
ax.set_title('Distribution', size=20)
ax.plot(mids, prob)
ax.tick_params(axis='both', which='both', direction='in', labelsize=14)
ax.set_xlabel('$\kappa$', size=16)
ax.set_ylabel('Counts', size=16)
ax.set_yscale('log')
plt.show()
```
# Determine $\Delta\lambda$
Following the reference, we want to solve the following equation for $\Delta\lambda$
\begin{align}
\bar{s}^{(j)}_2&=\sum_{\Gamma}P^{(2)}_{\Gamma}s^{(j)}_{\Gamma} \nonumber \\
&=\frac{\sum_{k\in\Omega} s^{(j)}_k e^{-\Delta\lambda_j s^{(j)}_k} }{\sum_{k\in\Omega} e^{-\Delta\lambda_j s^{(j)}_k}} \\
&=f(\Delta\lambda)
\label{eq:lambda_solver}
\end{align}
To determine the $\Delta\lambda$ value, we can calculate the above equation and plot it versus $\Delta\lambda$, and find $\Delta\lambda=\Delta\lambda_{\ast}$ which gives
\begin{align}
\bar{s}^{(j)}_2=f(\Delta\lambda_{\ast})=s^{\rm target}
\end{align}
### $s=\kappa$
```
def f(lm):
return np.sum(kappa_arr*np.exp(-lm*kappa_arr))/np.sum(np.exp(-lm*kappa_arr))
lm_arr = np.linspace(0,5)
f_arr = [f(lm_i) for lm_i in lm_arr]
fig, ax=plt.subplots(figsize=(5,3))
ax.plot(lm_arr, f_arr, label='$\kappa_f$')
ax.plot(lm_arr, [1]*len(lm_arr), '--', label='$\kappa^{\mathrm{target}}$')
ax.tick_params(axis='both', which='both', direction='in', labelsize=14)
ax.set_xlabel('$\lambda$', size=16)
ax.set_ylabel('$f(\lambda)$', size=16)
ax.legend(fontsize=16)
plt.show()
lm=0.317
print( 'f({:.3f}) = {:.3f}'.format(lm, f(lm)) )
```
Let's see if select 10 predictions to build the subset is enough.
```
lm_ast=0.317 # Delta_lambda we used for bias sampling
p=np.exp(-lm_ast*(kappa_arr))
p/=np.sum(p)
subset_mean_arr = []
for i in range(200):
idx = np.random.choice(len(kappa_arr), 10, p=p)
selected = kappa_arr[idx]
mean=np.mean(selected)
subset_mean_arr.append(mean)
fig, ax = plt.subplots(figsize=(6,5), nrows=1, ncols=1)
ax.plot(subset_mean_arr)
ax.plot(np.arange(len(subset_mean_arr)), [1.0]*len(subset_mean_arr), label="constraint $\kappa$")
ax.tick_params(axis='both', which='both', direction='in', labelsize=16)
ax.set_xlabel('indices', size=16)
ax.set_ylabel('$\langle\kappa\\rangle$', size=16)
ax.set_ylim(0.0,3.0)
plt.show()
```
So we will constrain our $\kappa$ to 1 with standard error 0.081. Even though we believe from the above test the subset size=10 is sufficient, there is still some variance in mean constraint. Therefore, we will also constrain the standard deviation of $\kappa$ in the subset.
```
lm_ast=0.317
p=np.exp(-lm_ast*(kappa_arr))
p/=np.sum(p)
mean=np.inf
stdv=np.inf
while abs(mean-1)>0.01 or abs(stdv-0.09)>0.01:
idx = np.random.choice(len(kappa_arr), 10, p=p)
selected = kappa_arr[idx]
mean=np.mean(selected)
stdv=np.std(selected)/np.sqrt(len(selected))
print( 'mean of selected sample = {:.3f}'.format(np.mean(selected)) )
print( 'Standard error stderr[selected sample] = {:.3f}'.format(np.std(selected)/np.sqrt(len(selected))) )
```
Concatenate the subset to a single trajectory, this concatenated trajectory is then used later to re-train a new LSTM.
# Concatenate subset as a new training set
```
conc=[]
output_dir='./Output'
for i in idx:
pred_dir=os.path.join(output_dir, '{}/prediction.npy'.format(i))
prediction=np.load(pred_dir)
N0=len(np.where(prediction<=15)[0])
N1=len(np.where(prediction>=16)[0])
kappa=N0/N1
print(kappa)
conc.extend(prediction)
conc = np.array(conc)
N0=len(np.where(conc<=15)[0])
N1=len(np.where(conc>=16)[0])
kappa_conc = N0/N1
print('kappa_conc:{:.3f}'.format(kappa_conc))
```
| github_jupyter |
## Dependencies
```
!pip install --quiet /kaggle/input/kerasapplications
!pip install --quiet /kaggle/input/efficientnet-git
import warnings, glob
from tensorflow.keras import Sequential, Model
import efficientnet.tfkeras as efn
from cassava_scripts import *
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
```
### Hardware configuration
```
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
```
# Model parameters
```
BATCH_SIZE = 8 * REPLICAS
HEIGHT = 512
WIDTH = 512
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 0 # Do TTA if > 0
```
# Augmentation
```
def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
# Rotates
if p_rotate > .75:
image = tf.image.rot90(image, k=3) # rotate 270º
elif p_rotate > .5:
image = tf.image.rot90(image, k=2) # rotate 180º
elif p_rotate > .25:
image = tf.image.rot90(image, k=1) # rotate 90º
# Pixel-level transforms
if p_pixel_1 >= .4:
image = tf.image.random_saturation(image, lower=.7, upper=1.3)
if p_pixel_2 >= .4:
image = tf.image.random_contrast(image, lower=.8, upper=1.2)
if p_pixel_3 >= .4:
image = tf.image.random_brightness(image, max_delta=.1)
# Crops
if p_crop > .7:
if p_crop > .9:
image = tf.image.central_crop(image, central_fraction=.7)
elif p_crop > .8:
image = tf.image.central_crop(image, central_fraction=.8)
else:
image = tf.image.central_crop(image, central_fraction=.9)
elif p_crop > .4:
crop_size = tf.random.uniform([], int(HEIGHT*.8), HEIGHT, dtype=tf.int32)
image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
# # Crops
# if p_crop > .6:
# if p_crop > .9:
# image = tf.image.central_crop(image, central_fraction=.5)
# elif p_crop > .8:
# image = tf.image.central_crop(image, central_fraction=.6)
# elif p_crop > .7:
# image = tf.image.central_crop(image, central_fraction=.7)
# else:
# image = tf.image.central_crop(image, central_fraction=.8)
# elif p_crop > .3:
# crop_size = tf.random.uniform([], int(HEIGHT*.6), HEIGHT, dtype=tf.int32)
# image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
return image, label
```
## Auxiliary functions
```
# Datasets utility functions
def resize_image(image, label):
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def process_path(file_path):
name = get_name(file_path)
img = tf.io.read_file(file_path)
img = decode_image(img)
img, _ = scale_image(img, None)
# img = center_crop(img, HEIGHT, WIDTH)
return img, name
def get_dataset(files_path, shuffled=False, tta=False, extension='jpg'):
dataset = tf.data.Dataset.list_files(f'{files_path}*{extension}', shuffle=shuffled)
dataset = dataset.map(process_path, num_parallel_calls=AUTO)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(resize_image, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
```
# Load data
```
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
submission = pd.read_csv(f'{database_base_path}sample_submission.csv')
display(submission.head())
TEST_FILENAMES = tf.io.gfile.glob(f'{database_base_path}test_tfrecords/ld_test*.tfrec')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'GCS: test: {NUM_TEST_IMAGES}')
model_path_list = glob.glob('/kaggle/input/58-cassava-leaf-effnetb5-no-dropout-512/*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep='\n')
```
# Model
```
def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB5(input_tensor=inputs,
include_top=False,
weights=None,
pooling='avg')
x = L.Dropout(.25)(base_model.output)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
return model
with strategy.scope():
model = model_fn((None, None, CHANNELS), N_CLASSES)
model.summary()
```
# Test set predictions
```
files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros((test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True).repeat()
ct_steps = TTA_STEPS * ((test_size/BATCH_SIZE) + 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test) / len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8') for img, img_name in iter(test_names_ds.unbatch())]
submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head())
```
| github_jupyter |
## Trajectory equations:
```
%matplotlib inline
import matplotlib.pyplot as plt
from sympy import *
init_printing()
Ex, Ey, Ez = symbols("E_x, E_y, E_z")
Bx, By, Bz, B = symbols("B_x, B_y, B_z, B")
x, y, z = symbols("x, y, z")
vx, vy, vz, v = symbols("v_x, v_y, v_z, v")
t = symbols("t")
q, m = symbols("q, m")
c, eps0 = symbols("c, epsilon_0")
```
The equation of motion:
$$
\begin{gather*}
m \frac{d^2 \vec{r} }{dt^2} =
q \vec{E} + \frac{q}{c} [ \vec{v} \vec{B} ]
\end{gather*}
$$
In Cortesian coordinates:
```
eq_x = Eq( diff(x(t), t, 2), q / m * Ex + q / c / m * (vy * Bz - vz * By) )
eq_y = Eq( diff(y(t), t, 2), q / m * Ey + q / c / m * (-vx * Bz + vz * Bx) )
eq_z = Eq( diff(z(t), t, 2), q / m * Ez + q / c / m * (vx * By - vy * Bx) )
display( eq_x, eq_y, eq_z )
```
For the case of a uniform magnetic field
along the $z$-axis:
$$ \vec{B} = B_z = B, \quad B_x = 0, \quad B_y = 0 $$
```
uni_mgn_subs = [ (Bx, 0), (By, 0), (Bz, B) ]
eq_x = eq_x.subs(uni_mgn_subs)
eq_y = eq_y.subs(uni_mgn_subs)
eq_z = eq_z.subs(uni_mgn_subs)
display( eq_x, eq_y, eq_z )
```
Assuming $E_z = 0$ and $E_y = 0$:
```
zero_EyEz_subs = [ (Ey, 0), (Ez, 0) ]
eq_x = eq_x.subs(zero_EyEz_subs)
eq_y = eq_y.subs(zero_EyEz_subs)
eq_z = eq_z.subs(zero_EyEz_subs)
display( eq_x, eq_y, eq_z )
```
Motion is uniform along the $z$-axis:
```
z_eq = dsolve( eq_z, z(t) )
vz_eq = Eq( z_eq.lhs.diff(t), z_eq.rhs.diff(t) )
display( z_eq, vz_eq )
```
The constants of integration can be found from the initial conditions $z(0) = 0$ and $v_z(0) = v$:
```
z_0 = 0
v_0 = v
c1_c2_system = []
initial_cond_subs = [(t, 0), (z(0), z_0), (diff(z(t),t).subs(t,0), v_0) ]
c1_c2_system.append( z_eq.subs( initial_cond_subs ) )
c1_c2_system.append( vz_eq.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
```
So that
```
z_sol = z_eq.subs( c1_c2 )
vz_sol = vz_eq.subs( c1_c2 )
display( z_sol, vz_sol )
```
Now, the equation for $y$ can be integrated:
```
v_as_diff = [ (vx, diff(x(t),t)), (vy, diff(y(t),t)), (vz, diff(z_sol.lhs,t)) ]
eq_y = eq_y.subs( v_as_diff )
eq_y = Eq( integrate( eq_y.lhs, (t, 0, t) ), integrate( eq_y.rhs, (t, 0, t) ) )
eq_y
```
For initial conditions $x(0) = x_0, y'(0) = 0$:
```
x_0 = Symbol('x_0')
vy_0 = 0
initial_cond_subs = [(x(0), x_0), (diff(y(t),t).subs(t,0), vy_0) ]
vy_sol = eq_y.subs( initial_cond_subs )
vy_sol
```
This equation can be substituted into the equation for $x$-coorditante:
```
eq_x = eq_x.subs( vy, vy_sol.rhs )
eq_x = Eq( eq_x.lhs, collect( expand( eq_x.rhs ), B *q / c / m ) )
eq_x
```
An expression for $E_x$ can be taken from the example on ribbon beam in free space $E_x = \dfrac{ 2 \pi I_0 }{v}$:
```
I0 = symbols('I_0')
Ex_subs = [ (Ex, 2 * pi * I0 / v) ]
eq_x = eq_x.subs( ex_subs )
eq_x
```
This is an oscillator-type equation
$$
x'' + a x + b = 0
$$
with $a$ and $b$ given by
```
eq_a = Eq(a, eq_x.rhs.expand().coeff(x(t), 1))
eq_b = Eq( b, eq_x.rhs.expand().coeff(x(t), 0) )
display( eq_a , eq_b )
```
It's solution is given by:
```
a, b, c = symbols("a, b, c")
osc_eqn = Eq( diff(x(t),t,2), - abs(a)*x(t) + b)
display( osc_eqn )
osc_eqn_sol = dsolve( osc_eqn )
osc_eqn_sol
```
From initial conditions $x(0) = x_0, v_0 = 0$:
```
x_0 = symbols( 'x_0' )
v_0 = 0
c1_c2_system = []
initial_cond_subs = [(t, 0), (x(0), x_0), (diff(x(t),t).subs(t,0), v_0) ]
c1_c2_system.append( osc_eqn_sol.subs( initial_cond_subs ) )
osc_eqn_sol_diff = Eq( osc_eqn_sol.lhs.diff(t), osc_eqn_sol.rhs.diff(t) )
c1_c2_system.append( osc_eqn_sol_diff.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
```
So that
```
x_sol = osc_eqn_sol.subs( c1_c2 )
x_sol
```
Taking into account that
$$ \sqrt{|a|} = \omega_g = \frac{ q B }{mc } $$
where $\omega_g$ is the gyrofrequency, and since
```
b_over_a = simplify( eq_b.rhs / abs( eq_a.rhs ).subs( abs( eq_a.rhs ), -eq_a.rhs ) )
Eq( b/abs(a), b_over_a )
```
It is possible to rewrite the solution as
```
omega_g = symbols('omega_g')
eq_omega_g = Eq( omega_g, q * B / m / c )
A = symbols('A')
eq_A = Eq( A, b_over_a - x_0 )
subs_list = [ (b/abs(a), b_over_a), ( sqrt( abs(a) ), omega_g ), ( eq_A.rhs, eq_A.lhs) ]
x_sol = x_sol.subs( subs_list )
display( x_sol, eq_A, eq_omega_g )
```
From the laws of motion for $x(t)$ and $z(t)$
```
display( x_sol, z_sol )
```
it is possible to obtain a trajectory equation:
```
t_from_z = solve( z_sol.subs(z(t),z), t )[0]
x_z_traj = Eq( x_sol.lhs.subs( t, z ), x_sol.rhs.subs( [(t, t_from_z)] ) )
display( x_z_traj, eq_A, eq_omega_g )
```
| github_jupyter |
# Intro to Pandas
Pandas is a Python package for data analysis and exposes two new
data structures: Dataframes and Series.
- [Dataframes](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html) store tabular data consisting of rows and columns.
- [Series](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html) are similar to Python's built-in list or set data types.
In this notebook, we will explore the data structures that Pandas
provides, and learn how to interact with them.
### 1. Importing Pandas
To import an external Python library such as Pandas, use Python's
import function. To save yourself some typing later on, you can
give the library you import an alias. Here, we are importing Pandas
and giving it an alias of `pd`.
```
import pandas as pd
```
### 2. Creating A Dataframe and Basic Exploration
We will load a CSV file as a dataframe using Panda's `read_csv`
method. This will allow us to use Pandas' dataframe functions to
explore the data in the CSV.
```
df = pd.read_csv("../data/loans_full_africa.csv")
```
Once we have loaded the CSV as a dataframe, we can start to explore
the data. Here are a few useful methods:
- .head(): returns first 5 rows of the DataFrame
- .tail(): returns last 5 rows of the DataFrame
- .shape: returns tuple with first element indicating the number of rows and the second element indicating the number of columns
- .columns: returns list of all columns in DataFrame
- .index: returns DataFrame indices
- .dtypes: returns Series explaining the datatype of each column
```
df.dtypes
```
To get some basic stats of the columns you can either use .describe() for discrete data or .value_counts for categroical data
```
df.describe()
```
Alternatively, if you want just the count or min / max of one column, you can use Pandas built in functions:
```
print(len(df['borrower_count']))
print(max(df['funded_amount']))
print(df['loan_amount'].mean())
df.head()
df['activity'].value_counts()
```
### 3. Selecting Data
To examine a specfic column of the DataFrame:
```
df['activity'].head()
df[['activity','basket_amount']].tail()
```
To examine specific rows and columns of a Dataframe, Pandas provides
the `iloc` and `loc` methods to do so. `iloc` is used when you want to specify a list or range of indices, and `.loc` is used when you want to specify a list or range of labels.
For both of these methods you need to specify two elements, with the first element indicating the rows that you want to select and the second element indicating the columns that you want to select.
```
# Get rows 1 through 3 and columns 0 through 5.
df.iloc[1:3,:5]
# Get rows with index values of 2-4 and the columns basket_amount and activity
df.loc[2:4, ["basket_amount", "activity"]]
```
What do you notice about the way the indices work for `iloc` versus `loc`?
```
# To see all the rows and columns:
df.iloc[:,:]
# You can also store a slice of the dataframe as a new dataframe!
titles_df = df.iloc[:,2]
titles_df.head()
```
### 4. Select subets of the DataFrame
A powerful feature of DataFrames is that you can view a subset of the DataFrame based on the values of the columns or rows. For example, lets say you only wanted to view loans with a status of "expired"
```
df[df['status']=='expired']
```
To view all loans with a status of "expired" `or` "fundraising":
```
df[(df['status']=='expired')|(df['status']=='fundraising')]
```
Select loans that have expired and with loan amounts greater than 1000
```
df[(df['status']=='expired')&(df['loan_amount']>1000)]
```
### 5. Merging and grouping data
You can group data by a column that has duplicates, like activity for the sector group.
```
df.groupby(['activity'])['loan_amount'].sum().reset_index()
```
You can also use SQL functions like inner join, outer join, left / right join using pd.merge(). Find documentation on this concept here: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html
## Great Resources for further information:
- [10 minute introduction to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html)
- [Pandas in ipython notebooks](http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/master/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb)
```
!ls
!pip install "name of the library missing"
```
| github_jupyter |
```
import cv2
import time
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
data_path = "/home/sid-pc/ashutosh/DDP/NYU Dataset and Toolbox/nyu_depth_v2_labeled.mat"
img_resize_X = 320
img_resize_Y = 240
depth_resize_X = 80
depth_resize_Y = 60
t1 = time.time()
f = h5py.File(data_path)
# HxWxN matrix of in-painted depth maps where H and W are the height and width,
# respectively and N is the number of images. The values of the depth elements are in meters.
depths = f['depths']
depths = np.einsum('ijk->ikj',depths)
#HxWx3xN matrix of RGB images where H and W are the height and width, respectively, and N is the number of images.
# 3 * 640 * 480
imgs = f['images']
imgs = np.einsum('ijkl->ilkj',imgs)
# HxWxN matrix of instance maps
instances = f['instances']
instances = np.einsum('ijk->ikj',instances)
# HxWxN matrix of object label masks where H and W are the height and width, respectively and N is the number of images.
# The labels range from 1..C where C is the total number of classes.
# If a pixel’s label value is 0, then that pixel is ‘unlabeled’.
lbls = f['labels']
lbls = np.einsum('ijk->ikj',lbls)
# Cx1 cell array of the english names of each class.
names = np.array(f['names'])
# map from english label names to class IDs (with C key-value pairs)
names_to_ids = np.array(f['namesToIds'])
# HxWxN matrix of raw depth maps where H and W are the height and width, respectively, and N is the number of images.
# These depth maps capture the depth images after they have been projected onto the RGB image plane but
# before the missing depth values have been filled in.
# Additionally, the depth non-linearity from the Kinect device has been removed and the values of each depth image are in meters
raw_depths = f['rawDepths']
raw_depths = np.einsum('ijk->ikj',raw_depths)
# Nx1 cell array of the type of the scene from which each image was taken
scenceTypes = f['sceneTypes']
# Nx1 cell array of the name of the scene from which each image was taken
scenes = f['scenes']
max_depth = np.amax(depths)
min_depth = np.amin(depths)
print "minimum depth {} m \nmaximum depth {} m".format(min_depth, max_depth)
print "Range of values of class labels ({},{})".format(np.amin(lbls), np.amax(lbls))
scale_depths = np.divide((depths - min_depth) , (max_depth - min_depth))
depths_resized = []
for i in tqdm(range(depths.shape[0])) :
depths_resized.append(cv2.resize(scale_depths[i], (depth_resize_X, depth_resize_Y), interpolation = cv2.INTER_AREA))
depths_resized = np.array(depths_resized)
plt.figure(figsize=(15,30))
plt.subplot(1,4,1)
plt.title("RGB Image")
plt.imshow(imgs[0], cmap = 'gray', interpolation = 'bicubic')
plt.subplot(1,4,2)
plt.title("Kinect Output")
plt.imshow(raw_depths[0], cmap = 'gray', interpolation = 'bicubic')
plt.subplot(1,4,3)
plt.title("Depth Map")
plt.imshow(depths[0], cmap = 'gray', interpolation = 'bicubic')
plt.subplot(1,4,4)
plt.title("Labelled Depth Map")
plt.imshow(lbls[0], cmap = 'gray', interpolation = 'bicubic')
plt.show()
plt.title("Reszied Depth Map")
plt.imshow(depths_resized[0], cmap = 'gray', interpolation = 'bicubic')
plt.show()
imgs_resized = []
for i in tqdm(range(depths.shape[0])) :
imgs_resized.append(cv2.resize(imgs[i], (img_resize_X , img_resize_Y), interpolation = cv2.INTER_AREA))
imgs_resized = np.array(imgs_resized)
scale_imgs = np.divide(imgs_resized , 255.0)
X = scale_imgs
y = []
for i in tqdm(range(depths.shape[0])) :
y.append(depths_resized[i].flatten())
y = np.array(y)
np.save('train_X_{}_{}'.format(img_resize_Y , img_resize_X ) , X)
np.save('train_Y_{}_{}'.format(depth_resize_Y , depth_resize_X), y)
t2 = time.time()
print "Preprocessing time is {} minute".format((t2-t1)/60)
```
| github_jupyter |
**This notebook is an exercise in the [Intro to Game AI and Reinforcement Learning](https://www.kaggle.com/learn/intro-to-game-ai-and-reinforcement-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/n-step-lookahead).**
---
# Introduction
In the tutorial, you learned how to build a reasonably intelligent agent with the minimax algorithm. In this exercise, you will check your understanding and submit your own agent to the competition.
```
from learntools.core import binder
binder.bind(globals())
from learntools.game_ai.ex3 import *
```
### 1) A closer look
The heuristic from the tutorial looks at all groups of four adjacent grid locations on the same row, column, or diagonal and assigns points for each occurrence of the following patterns:
<center>
<img src="https://i.imgur.com/3NvBEGL.png" width=70%><br/>
</center>
Is it really necessary to use so many numbers to define the heuristic? Consider simplifying it, as in the image below.
<center>
<img src="https://i.imgur.com/grViegG.png" width=70%><br/>
</center>
How would each heuristic score the potential moves in the example below (where, in this case, the agent looks only one step ahead)? Which heuristic would lead to the agent selecting the better move?
<center>
<img src="https://i.imgur.com/LWPLy7N.png" width=100%><br/>
</center>
```
#q_1.hint()
# Check your answer (Run this code cell to receive credit!)
#q_1.solution()
q_1.check()
```
### 2) Count the leaves
In the tutorial, we worked with a small game tree.
<center>
<img src="https://i.imgur.com/BrRe7Bu.png" width=90%><br/>
</center>
The game tree above has 8 leaf nodes that appear at the bottom of the tree. By definition, "leaf nodes" in a game tree are nodes that don't have nodes below them.
In the ConnectX competition, the game trees will be much larger!
To see this, consider a minimax agent that is trying to plan its first move, where all columns in the game board are empty. Say the agent builds a game tree of depth 3. How many leaf nodes are in the game tree?
Use your answer to fill in the blank below.
```
# Fill in the blank
num_leaves = 7 ** 3
# Check your answer
q_2.check()
# Lines below will give you a hint or solution code
#q_2.hint()
#q_2.solution()
```
### 3) Which move will the agent select?
In this question, you'll check your understanding of the minimax algorithm. Remember that with this algorithm,
> The agent chooses moves to get a score that is as high as possible, and it assumes the opponent will counteract this by choosing moves to force the score to be as low as possible.
Consider the toy example below of a game tree that the agent will use to select its next move.
<center>
<img src="https://i.imgur.com/QlfWGM9.png" width=80%><br/>
</center>
Which move will the agent select? Use your answer to set the value of the `selected_move` variable below. Your answer should be one of `1`, `2`, or `3`.
```
# Fill in the blank
selected_move = 3
# Check your answer
q_3.check()
# Lines below will give you a hint or solution code
#q_3.hint()
#q_3.solution()
```
### 4) Examine the assumptions
The minimax agent assumes that its opponent plays optimally (with respect to the heuristic, and using a game tree of limited depth). But this is almost never the case, in practice: it's far more likely for the agent to encounter a suboptimal (that is: worse than optimal) opponent.
Say the minimax agent encounters a suboptimal opponent. Should we expect the minimax agent to still play the game well, despite the contradiction with its assumptions? If so, why?
```
#q_4.hint()
# Check your answer (Run this code cell to receive credit!)
#q_4.solution()
q_4.check()
```
### 5) Submit to the competition
Now, it's time to submit an agent to the competition! Use the next code cell to define an agent. (You can see an example of how to write a valid agent in **[this notebook](https://www.kaggle.com/alexisbcook/create-a-connectx-agent)**.)
If you decide to use the minimax code from the tutorial, you might like to add [**alpha-beta pruning**](https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning) to decrease the computation time (i.e., get the minimax algorithm to run much faster!). In this case, "alpha" and "beta" to refer to two values that are maintained while the algorithm is running, that help to identify early stopping conditions.
Without alpha-beta pruning, minimax evaluates each leaf node. With alpha-beta pruning, minimax only evaluates nodes that could provide information that affects the agent's choice of action. Put another way, it identifies nodes that could not possibly affect the final result and avoids evaluating them.
```
def my_agent(obs, config):
# Your code here: Amend the agent!
import random
import numpy as np
# How deep to make the game tree: higher values take longer to run!
N_STEPS = 4
# Gets board at next step if agent drops piece in selected column
def drop_piece(grid, col, mark, config):
next_grid = grid.copy()
for row in range(config.rows-1, -1, -1):
if next_grid[row][col] == 0:
break
next_grid[row][col] = mark
return next_grid
# Helper function for get_heuristic: checks if window satisfies heuristic conditions
def check_window(window, num_discs, piece, config):
return (window.count(piece) == num_discs and window.count(0) == config.inarow-num_discs)
# Helper function for get_heuristic: counts number of windows satisfying specified heuristic conditions
def count_windows(grid, num_discs, piece, config):
num_windows = 0
# horizontal
for row in range(config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[row, col:col+config.inarow])
if check_window(window, num_discs, piece, config):
num_windows += 1
# vertical
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns):
window = list(grid[row:row+config.inarow, col])
if check_window(window, num_discs, piece, config):
num_windows += 1
# positive diagonal
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row+config.inarow), range(col, col+config.inarow)])
if check_window(window, num_discs, piece, config):
num_windows += 1
# negative diagonal
for row in range(config.inarow-1, config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row-config.inarow, -1), range(col, col+config.inarow)])
if check_window(window, num_discs, piece, config):
num_windows += 1
return num_windows
# Helper function for minimax: calculates value of heuristic for grid
def get_heuristic(grid, mark, config):
num_threes = count_windows(grid, 3, mark, config)
num_fours = count_windows(grid, 4, mark, config)
num_threes_opp = count_windows(grid, 3, mark%2+1, config)
num_fours_opp = count_windows(grid, 4, mark%2+1, config)
score = num_threes - 1e2*num_threes_opp - 1e4*num_fours_opp + 1e6*num_fours
return score
# Uses minimax to calculate value of dropping piece in selected column
def score_move(grid, col, mark, config, nsteps):
next_grid = drop_piece(grid, col, mark, config)
score = minimax(next_grid, nsteps-1, False, -np.Inf, np.Inf, mark, config)
return score
# Helper function for minimax: checks if agent or opponent has four in a row in the window
def is_terminal_window(window, config):
return window.count(1) == config.inarow or window.count(2) == config.inarow
# Helper function for minimax: checks if game has ended
def is_terminal_node(grid, config):
# Check for draw
if list(grid[0, :]).count(0) == 0:
return True
# Check for win: horizontal, vertical, or diagonal
# horizontal
for row in range(config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[row, col:col+config.inarow])
if is_terminal_window(window, config):
return True
# vertical
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns):
window = list(grid[row:row+config.inarow, col])
if is_terminal_window(window, config):
return True
# positive diagonal
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row+config.inarow), range(col, col+config.inarow)])
if is_terminal_window(window, config):
return True
# negative diagonal
for row in range(config.inarow-1, config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row-config.inarow, -1), range(col, col+config.inarow)])
if is_terminal_window(window, config):
return True
return False
# Minimax implementation
def minimax(node, depth, maximizingPlayer, alpha, beta, mark, config):
#is_terminal = is_terminal_node(node, config)
#if depth == 0 or is_terminal:
if depth == 0 or is_terminal_node(node, config):
return get_heuristic(node, mark, config)
valid_moves = [c for c in range(config.columns) if node[0][c] == 0]
if maximizingPlayer:
value = -np.Inf
for col in valid_moves:
child = drop_piece(node, col, mark, config)
value = max(value, minimax(child, depth-1, False, alpha, beta, mark, config))
alpha = max(value, alpha)
if alpha >= beta:
break
return value
else:
value = np.Inf
for col in valid_moves:
child = drop_piece(node, col, mark%2+1, config)
value = min(value, minimax(child, depth-1, True, alpha, beta, mark, config))
beta = min(value, beta)
if alpha >= beta:
break
return value
# Get list of valid moves
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
# Convert the board to a 2D grid
grid = np.asarray(obs.board).reshape(config.rows, config.columns)
# Use the heuristic to assign a score to each possible board in the next step
scores = dict(zip(valid_moves, [score_move(grid, col, obs.mark, config, N_STEPS) for col in valid_moves]))
# Get a list of columns (moves) that maximize the heuristic
max_cols = [key for key in scores.keys() if scores[key] == max(scores.values())]
# Select at random from the maximizing columns
return random.choice(max_cols)
# Run this code cell to get credit for creating an agent
q_5.check()
import inspect
import os
def write_agent_to_file(function, file):
with open(file, "a" if os.path.exists(file) else "w") as f:
f.write(inspect.getsource(function))
print(function, "written to", file)
write_agent_to_file(my_agent, "submission.py")
```
Then, follow these steps to submit your agent to the competition:
1. Begin by clicking on the blue **Save Version** button in the top right corner of the window. This will generate a pop-up window.
2. Ensure that the **Save and Run All** option is selected, and then click on the blue **Save** button.
3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
You have now successfully submitted to the competition!
If you want to keep working to improve your performance, select the blue **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
Go to **"My Submissions"** to view your score and episodes being played.
# Keep going
Move on to learn how to **[use deep reinforcement learning](https://www.kaggle.com/alexisbcook/deep-reinforcement-learning)** to develop an agent without a heuristic!
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161477) to chat with other Learners.*
| github_jupyter |
# Computer Vision Example: Image Classification with WMLA
https://developer.ibm.com/technologies/artificial-intelligence/tutorials/use-computer-vision-with-dli-watson-machine-learning-accelerator/
This workflow is documented here...
### Contents
- [Introduction](#Introduction)
- [Upload this notebook to your environment](#Upload-notebook)
- [Download dataset and model](#Download-dataset-model)
- [Import dataset](#Import-dataset)
- [Build the model](#Build-the-model)
- [Tune Hyper-parameter](#Tune-hyper-parameter)
- [Run Training](#Run-training)
- [Inspect Training Run](#Inspect-training-run)
- [Create an inference model](#Create-an-inference-model)
- [Test it out](#Test-it-out)
## Introduction
[Back to top](#Contents)
This notebook details the process of performing a basic computer vision image classification example using the Deep Learning Impact functionality within Watson Machine Learning Accelerator.
Please visit [Watson Machine Learning Accelerator Learning Path](https://developer.ibm.com/series/learning-path-get-started-with-watson-machine-learning-accelerator/) for further insight of Watson ML Accelerator
For this lab we will build a **custom image classifier** using WMLA
```
## Imports
import os,sys
def get_config(cfg_in={}):
cfg = {}
cfg['userid']="b0p036aa"
# location of git clone ....
cfg['repo_dir']="/gpfs/home/s4s004/b0p036aa/wmla-learning-path"
cfg['image_dir']="/gpfs/home/s4s004/b0p036aa/wmla-learning-path/images"
cfg["classes"]=["cars","busses","trucks"]
cfg["num_images"] = {"train":200,"valid":20,"test":20}
cfg["d_partitions"]=["train"]
# overwrite configs if passed
for (k,v) in cfg_in.items() :
npt("Overriding Config {}:{} with {}".format(k,cfg[k],v))
cfg[k] = v
# non overrideable ...
cfg["jpeginfo"] =cfg['repo_dir']+"/utils/jpeginfo"
cfg["googliser"] =cfg['repo_dir']+"/utils/googliser.sh"
return cfg
# utility print function
def nprint(mystring) :
print("**{}** : {}".format(sys._getframe(1).f_code.co_name,mystring))
npt=nprint
```
## Download dataset and model
Here you are going to define your own image classification project! We will use google to grab images, and will build custom models..
Now we are ready to go, lets get started and download the dataset from github!!!
First step is that we will change our working directory to your Spectrum Scale//GPFS Directory
## Define classes for our dataset
Here we are going to build our own dataset !! Think of 3 categories you would like to classify images. In this example, we will use
* busses
* trucks
* cars
We will use an open source tool called *googliser* to download our images from google images.
For a Covid-19 based example you could make your classes something like
* "people wearing masks"
* "people posing street"
```
#################################################################################################
# @@ Students : Customize this cell with your custom classes for image classification
################################################################################################
# Overrides for lab
mycfg = {
'repo_dir':"/gpfs/home/s4s004/vanstee/2020-05-wmla/wmla-learning-path",
'image_dir':"/gpfs/home/s4s004/vanstee/2020-05-wmla/images",
"classes":["people wearing masks","people posing street","people skateboarding","people on bikes"], ## <<- CLASS Enter your search terms here
"d_partitions":["train"],
}
cfg=get_config(mycfg)
# Helpers to make directories
def class_folder_name(base,d_part,cls) :
return base+"/"+d_part+"/"+ cls.replace(" ","_")
def makeDirIfNotExist(directory) :
if not os.path.exists(directory):
npt("Making directory {}".format(directory))
os.makedirs(directory)
else :
npt("Directory {} already exists .. ".format(directory))
# Build directory hierarchy
# [train|valid|test ]
# -----------------> [class1 | class2 | class...]
for d_part in cfg["d_partitions"] :
for cls in cfg["classes"] :
directory=class_folder_name(cfg['image_dir'],d_part,cls)
makeDirIfNotExist(directory)
# install googliser
def install_googliser():
googliser_directory = cfg['repo_dir']+"/googliser"
if not os.path.exists(googliser_directory):
npt("Installing Googliser here : {} ".format(googliser_directory))
os.chdir(cfg['repo_dir'])
!git clone https://github.com/teracow/googliser
else :
npt("Googliser already installed here : {} ".format(googliser_directory))
googliser = cfg['repo_dir']+"/googliser/googliser.sh"
return googliser
googliser = install_googliser()
!ls {googliser}
# The code below will download files to train folder only to avoid duplicate downloads.
# We then move a few files over. This can be done manually or programatically. For our example
# we will let FastAI do the work for us!
def download_images(cfg):
utility_dir = cfg['repo_dir']
for d_p in cfg["d_partitions"] : # train only for now ..
for cls in cfg["classes"] :
current_dir =class_folder_name(cfg['image_dir'],d_p,cls)
#os.chdir(current_dir)
os.chdir(utility_dir)
command = googliser + \
" --o {}".format(current_dir) +\
" --phrase \"{}\"".format(cls) + \
" --parallel 50 --upper-size 500000 --lower-size 2000 " + \
" -n {}".format(cfg['num_images'][d_p]) + \
" --format jpg --timeout 15 --safesearch-off "
npt(command)
!{command}
npt("Downloads complete!")
download_images(cfg)
# clean with jpeginfo!
def clean_up_bad_jpegs(cfg,ext_list):
for extension in ext_list:
os.chdir(cfg['image_dir'])
nprint("Search for Error files in {}......".format(cfg['image_dir']))
# handle both jpg //jpeg
command = "find . -name \"*.{}\"".format(extension) + \
" | xargs -i {}".format(cfg["jpeginfo"]) + \
" -c {} | grep ERROR"
nprint("Running command : {}".format(command))
!{command}
nprint("Removing any error files listed above")
command = command + ' | cut -d " " -f1 | xargs -i rm {} '
nprint("Running command : {}".format(command))
!{command}
nprint("Done")
def remove_non_jpg(cfg,ext_list):
for extension in ext_list:
command = "find . -name \"*.{extension}\""
nprint(command)
clean_up_bad_jpegs(cfg,["jpg","jpeg"])
remove_non_jpg(cfg,["png","webpm"])
cd ../test
testing_path = %pwd
```
#### Copy the Dataset Training and Testing folder
```
print ('training_path: ' + training_path)
print ('testing_path:' + testing_path)
```
### Download model
```
cd ../..
!git clone https://us-south.git.cloud.ibm.com/ibmconductor-deep-learning-impact/dli-1.2.3-tensorflow-samples.git
cd dli-1.2.3-tensorflow-samples/tensorflow-1.13.1/cifar10
model_path = %pwd
print ('model_path: '+ model_path)
```
## Import Dataset
<a id='Import-dataset'></a>
[Back to top](#Contents)
**Data Scientist could bring in their dataset and transform data set as common output format in Watson ML Accelerator. In this scenario raw images are converted into TensorflowRecord format.**
1. Lets swtich back to the browse: https://IP_address:8443/platform
2. At the top Left select **Workload** > **Spark** > **Deep Learning**
3. Select the **Datasets** tab, and click **New**
4. Retrieve dataset trainig_path and dataset testing_path
```
print ('training_path: ' + training_path)
print ('testing_path:' + testing_path)
```
5. Click **Images for Object Classification**. When presented with a dialog box, provide a unique name (lets use "Cifar10"!!!) and select the TFRecords for 'Dataset stores images in', and then set the value of "Training folder" and "Testing folder" with the folder that contains the images obtained in the previous step ("**/tmp/CIFAR-10-images/train**" + "**/tmp/CIFAR-10-images/train**"). The other fields are fine to use with the default settings. When you're ready, click Create.
<br>

```
### Remove dataset from the file system.
### Before proceeding to this step please ensure the Import Dataset is in FINISHED state
!rm -rf /tmp/CIFAR-10-images
!rm /tmp/CIFAR10-images.zip
```
## Build the model
<a id='Build-the-model'></a>
[Back to top](#Contents)
1. Select the Models tab and click **New** > **Add Location**
2. Retrieve the model path
```
print ('model_path: '+ model_path)
```
3. When presented with a diaglog box, enter following attributes:

<br>
4. Select the **Tensorflow-cifar10** and click **Next**.
5. When presented with a dialog box, ensure that the Training engine is set to singlenode and that the data set points to the one you just created

<br>
6. Set the following parameters and click **Add**

<br>
7. The model is now ready to be trained.
```
## Clean up Model
### Before proceeding to this step please ensure the Model Creation is in FINISHED state
!rm -rf /tmp/dli-1.2.3-tensorflow-samples
```
## Tune Hyper-parameter
**Watson ML Accelerator automates the search for optimal hyperpamater by automating tuning jobs in parallel with four out-of-box search algorithm: Random Search, Bayesian, TPE, Hyperband, prior to the commencement of the training process.**
<a id='Tune-hyper-parameter'></a>
[Back to top](#Contents)
1. You could search optimal hyperparameter by leveraging automated Hyper-parameter Tuning.
1. Back at the **Models** tab, **click** on the model
1. Navigate from the **Overview panel** to the **Hyperparameter Tuning** panel
1. Click **New**
1. When presented with a dialog box, enter following value and click **Start Tuning**


1. Under the **Hyperparameter Tuning** panel, click on the hyperparameter search job

1. Navigate from the **Input panel** to the **Progress panel** and **Best panel** to review the optimal set of hyperparameter


## Run Training
<a id='Run-training'></a>
[Back to top](#Contents)
1. Back at the **Models** tab, select the model you created in previous step and click **Train**
1. When presented with a dialog box, keep default parameter and click **Start Training**

## Inspect Training Run
<a id='Inspect-training-run'></a>
[Back to top](#Contents)
**Spectrum Deep Learning Impact Insight offers Data Scientist the visualization to monitor training metric including loss rate and accuracy as epochs continue to execute. With this insight Data Scientist could decide to terminate the model training if there is no further gain in accuracy and no further drop in loss rate.**
1. From the **Train** submenu of the **Models** tab, select the model that is training by clicking the link.
1. Navigate from the **Overview panel** to the **Training** panel, and click the most recent link. You can watch as the results roll in.

## Create an inference model
**You are now ready to validate your training result by deploying your trained model as inference service.
You can submit inference request to inference restapi end point**
<a id='Create-an-inference-model'></a>
[Back to top](#Contents)
1. From the Training view, click Create Inference Model.

1. This creates a new model in the Models tab. You can view it by going to the Inference submenu.

## Test it out
<a id='Test-it-out'></a>
[Back to top](#Contents)
1. Download [inference test image](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/image-classification-with-WMLA-UI/Shared-images/car.jpg) to your laptop
1. Go back to the Models tab, select the new inference model, and click Test. At the new Testing overview screen, select New Test.

1. When presented with a dialog box, click **Choose File** to load the inference test image. Click **Start Test**

1. Wait for the test state to change from RUNNING to FINISHED. Click the link to view the results of the test.

1. As you can see, the images are available as a thumbnail preview along with their classified label and probability.

#### This is version 1.0 and its content is copyright of IBM. All rights reserved.
| github_jupyter |
```
import pandas as pd
import scipy as sp
from scipy.sparse import diags
import numpy as np
from numpy import linalg as LA
import sys
import matplotlib.pyplot as plt
#importing seaborn for plotting
import seaborn as sns
#for plotting purposes
%pylab inline
sns.set_style('ticks')
sns.set_context('paper')
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from scipy.signal import find_peaks
# mpl.rcParams
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['figure.figsize'] = [8, 16/3]
```
### converting ladder to nts
```
ls
filename = 'Eukaryote Total RNA Nano_2020-10-21_16-34-50'
ladder_dict = {}
ladder_times = {}
ladder_values = {}
ladder_df = pd.read_csv(filename+'_Ladder.csv', skiprows=17)[:-1]
ladder_time = np.array(ladder_df['Time'].astype(float))
ladder_value = np.array(ladder_df['Value'].astype(float))
peaks,_ = find_peaks(ladder_value, height=7.5, distance=15)
ladder_dict = list(peaks[:6])
plot(ladder_time, ladder_value, label='ladder trace')
plot(ladder_time[peaks], ladder_value[peaks], 'x', label='peaks', markersize=15)
title('Ladder Peaks: '+filename, fontsize=16)
legend(fontsize=14)
xlabel('Time', fontsize=14)
ylabel('FU', fontsize=14)
xticks(fontsize=14)
yticks(fontsize=14)
tight_layout()
savefig(filename+'_ladder.png', dpi=300)
peak_times = ladder_time[ladder_dict]
# peak_times
peak_fu = np.array([25, 200, 500, 1000, 2000, 4000])
ladder_fit = np.polyfit(x=peak_times, y = peak_fu, deg = 4)
lf = np.poly1d(ladder_fit)
ladder_nts = lf
plot(peak_fu, lf(peak_times), 'o',label='calculated nts vs. ladder nts')
plot(peak_fu, peak_fu,label='perfect correlation')
ylabel('Calculated nts (from time)', fontsize=14)
xlabel('Ladder nts', fontsize=14)
yticks(fontsize=12)
xticks(fontsize=12)
title('Ladder polynomial fit: '+filename, fontsize=16)
legend(fontsize=12)
tight_layout()
savefig(filename+'_ladder_fit.png', dpi=300)
ls
```
### converting samples to nucleotides as well
```
samples = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
num_samples = len(samples)
ncolumns=3
nrows = math.ceil(num_samples/ncolumns)
figure(figsize=(ncolumns*6, nrows*4+3))
samples_dict = {}
for i, sample in enumerate(samples):
sample_df = pd.read_csv(filename+'_Sample'+sample+'.csv', skiprows=17)[:-1]
samples_dict[sample] = sample_df
times = np.array(sample_df['Time'].astype(float))
sample_df['Nucleotides'] = ladder_nts(times)
sample_df.to_csv(filename+'_'+sample+'_nts.csv')
# for i, sample in enumerate(ladder_nts):
# nt_array = ladder_nts[sample](ladder_times[sample])
# peaks_store = []
# # heights_store = []
# subplot(nrows,ncolumns,i+1)
# #read in dataframe for per sample here
# sample_df = samples_dict[sample]
# timepoints = [0,0.5,1.0,1.5,2,3,4,5,18,24]
# for i,time in enumerate(timepoints):
# data = np.array(sample_df[sample_df['Timepoint']==time]['Value'])
# peaks, _ = find_peaks(data, distance=50, height=2.5)
# # peaks_store.append(peaks)
# heights_store.append(data[peaks])
# plot(nt_array[:int(len(nt_array)/2)],data[:int(len(nt_array)/2)], label=time)
# plot(np.array(nt_array)[peaks], data[peaks], 'x',markersize=6)
# ylabel('Flourescence Units', fontsize=14)
# xlabel('Nucleotides', fontsize=14)
# legend()
# title(sample, fontweight='bold')
# tight_layout()
# for sample in samples:
```
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import IPython
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
import time
from tqdm import tqdm
import tensorflow as tf
from nara_wpe.tf_wpe import wpe
from nara_wpe.tf_wpe import online_wpe_step, get_power_online
from nara_wpe.utils import stft, istft, get_stft_center_frequencies
from nara_wpe import project_root
stft_options = dict(
size=512,
shift=128,
window_length=None,
fading=True,
pad=True,
symmetric_window=False
)
```
# Example with real audio recordings
The iterations are dropped in contrast to the offline version. To use past observations the correlation matrix and the correlation vector are calculated recursively with a decaying window. $\alpha$ is the decay factor.
### Setup
```
channels = 8
sampling_rate = 16000
delay = 3
alpha=0.99
taps = 10
frequency_bins = stft_options['size'] // 2 + 1
```
### Audio data
```
file_template = 'AMI_WSJ20-Array1-{}_T10c0201.wav'
signal_list = [
sf.read(str(project_root / 'data' / file_template.format(d + 1)))[0]
for d in range(channels)
]
y = np.stack(signal_list, axis=0)
IPython.display.Audio(y[0], rate=sampling_rate)
```
### Online buffer
For simplicity the STFT is performed before providing the frames.
Shape: (frames, frequency bins, channels)
frames: K+delay+1
```
Y = stft(y, **stft_options).transpose(1, 2, 0)
T, _, _ = Y.shape
def aquire_framebuffer():
buffer = list(Y[:taps+delay, :, :])
for t in range(taps+delay+1, T):
buffer.append(Y[t, :, :])
yield np.array(buffer)
buffer.pop(0)
```
### Non-iterative frame online approach
A frame online example requires, that certain state variables are kept from frame to frame. That is the inverse correlation matrix $\text{R}_{t, f}^{-1}$ which is stored in Q and initialized with an identity matrix, as well as filter coefficient matrix that is stored in G and initialized with zeros.
Again for simplicity the ISTFT is applied in Numpy afterwards.
```
Z_list = []
Q = np.stack([np.identity(channels * taps) for a in range(frequency_bins)])
G = np.zeros((frequency_bins, channels * taps, channels))
with tf.Session() as session:
Y_tf = tf.placeholder(tf.complex128, shape=(taps + delay + 1, frequency_bins, channels))
Q_tf = tf.placeholder(tf.complex128, shape=(frequency_bins, channels * taps, channels * taps))
G_tf = tf.placeholder(tf.complex128, shape=(frequency_bins, channels * taps, channels))
results = online_wpe_step(Y_tf, get_power_online(tf.transpose(Y_tf, (1, 0, 2))), Q_tf, G_tf, alpha=alpha, taps=taps, delay=delay)
for Y_step in tqdm(aquire_framebuffer()):
feed_dict = {Y_tf: Y_step, Q_tf: Q, G_tf: G}
Z, Q, G = session.run(results, feed_dict)
Z_list.append(Z)
Z_stacked = np.stack(Z_list)
z = istft(np.asarray(Z_stacked).transpose(2, 0, 1), size=stft_options['size'], shift=stft_options['shift'])
IPython.display.Audio(z[0], rate=sampling_rate)
```
# Power spectrum
Before and after applying WPE.
```
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(20, 8))
im1 = ax1.imshow(20 * np.log10(np.abs(Y[200:400, :, 0])).T, origin='lower')
ax1.set_xlabel('')
_ = ax1.set_title('reverberated')
im2 = ax2.imshow(20 * np.log10(np.abs(Z_stacked[200:400, :, 0])).T, origin='lower')
_ = ax2.set_title('dereverberated')
cb = fig.colorbar(im1)
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/NER_DIAG_PROC.ipynb)
# **Detect diagnosis and procedures**
To run this yourself, you will need to upload your license keys to the notebook. Just Run The Cell Below in order to do that. Also You can open the file explorer on the left side of the screen and upload `license_keys.json` to the folder that opens.
Otherwise, you can look at the example outputs at the bottom of the notebook.
## 1. Colab Setup
Import license keys
```
import os
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
sparknlp_version = license_keys["PUBLIC_VERSION"]
jsl_version = license_keys["JSL_VERSION"]
print ('SparkNLP Version:', sparknlp_version)
print ('SparkNLP-JSL Version:', jsl_version)
```
Install dependencies
```
%%capture
for k,v in license_keys.items():
%set_env $k=$v
!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh
!bash jsl_colab_setup.sh
# Install Spark NLP Display for visualization
!pip install --ignore-installed spark-nlp-display
```
Import dependencies into Python and start the Spark session
```
import pandas as pd
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
import sparknlp
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
spark = sparknlp_jsl.start(license_keys['SECRET'])
# manually start session
# params = {"spark.driver.memory" : "16G",
# "spark.kryoserializer.buffer.max" : "2000M",
# "spark.driver.maxResultSize" : "2000M"}
# spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
```
## 2. Select the NER model and construct the pipeline
Select the NER model - Diagnosis & Procedures models: **ner_diseases, ner_clinical, ner_jsl**
For more details: https://github.com/JohnSnowLabs/spark-nlp-models#pretrained-models---spark-nlp-for-healthcare
```
# You can change this to the model you want to use and re-run cells below.
# Diagnosis & Procedures models: ner_diseases, ner_clinical, ner_jsl
MODEL_NAME = "ner_diseases"
```
Create the pipeline
```
document_assembler = DocumentAssembler() \
.setInputCol('text')\
.setOutputCol('document')
sentence_detector = SentenceDetector() \
.setInputCols(['document'])\
.setOutputCol('sentence')
tokenizer = Tokenizer()\
.setInputCols(['sentence']) \
.setOutputCol('token')
word_embeddings = WordEmbeddingsModel.pretrained('embeddings_clinical', 'en', 'clinical/models') \
.setInputCols(['sentence', 'token']) \
.setOutputCol('embeddings')
clinical_ner = MedicalNerModel.pretrained(MODEL_NAME, "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"])\
.setOutputCol("ner")
ner_converter = NerConverter()\
.setInputCols(['sentence', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
word_embeddings,
clinical_ner,
ner_converter])
```
## 3. Create example inputs
```
# Enter examples as strings in this array
input_list = [
"""FINDINGS: The patient was found upon excision of the cyst that it contained a large Prolene suture, which is multiply knotted as it always is; beneath this was a very small incisional hernia, the hernia cavity, which contained omentum; the hernia was easily repaired.
DESCRIPTION OF PROCEDURE: The patient was identified, then taken into the operating room, where after induction of an LMA anesthetic, his abdomen was prepped with Betadine solution and draped in sterile fashion. The puncta of the wound lesion was infiltrated with methylene blue and peroxide. The lesion was excised and the existing scar was excised using an ellipse and using a tenotomy scissors, the cyst was excised down to its base. In doing so, we identified a large Prolene suture within the wound and followed this cyst down to its base at which time we found that it contained omentum and was in fact overlying a small incisional hernia. The cyst was removed in its entirety, divided from the omentum using a Metzenbaum and tying with 2-0 silk ties. The hernia repair was undertaken with interrupted 0 Vicryl suture with simple sutures. The wound was then irrigated and closed with 3-0 Vicryl subcutaneous and 4-0 Vicryl subcuticular and Steri-Strips. Patient tolerated the procedure well. Dressings were applied and he was taken to recovery room in stable condition. """
]
```
## 4. Use the pipeline to create outputs
```
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({'text': input_list}))
result = pipeline_model.transform(df)
```
## 5. Visualize results
```
from sparknlp_display import NerVisualizer
NerVisualizer().display(
result = result.collect()[0],
label_col = 'ner_chunk',
document_col = 'document'
)
```
Visualize outputs as data frame
```
exploded = F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata'))
select_expression_0 = F.expr("cols['0']").alias("chunk")
select_expression_1 = F.expr("cols['1']['entity']").alias("ner_label")
result.select(exploded.alias("cols")) \
.select(select_expression_0, select_expression_1).show(truncate=False)
result = result.toPandas()
```
| github_jupyter |
## Rover Lab Notebook
This notebook contains the functions from the lesson and provides the scaffolding you need to test out your mapping methods. The steps you need to complete in this notebook for the project are the following:
* First just run each of the cells in the notebook, examine the code and the results of each.
**Note: For the online lab, data has been collected and provided for you. If you would like to try locally please do so! Please continue instructions from the continue point.**
* Run the simulator in "Training Mode" and record some data. Note: the simulator may crash if you try to record a large (longer than a few minutes) dataset, but you don't need a ton of data, just some example images to work with.
* Change the data directory path (2 cells below) to be the directory where you saved data
* Test out the functions provided on your data
**Continue Point**
* Write new functions (or modify existing ones) to report and map out detections of obstacles and rock samples (yellow rocks)
* Populate the `process_image()` function with the appropriate steps/functions to go from a raw image to a worldmap.
* Run the cell that calls `process_image()` using `moviepy` functions to create video output
* Once you have mapping working, move on to modifying `perception.py` and `decision.py` in the project to allow your rover to navigate and map in autonomous mode!
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
**Run the next cell to get code highlighting in the markdown cells.**
```
%%HTML
<style> code {background-color : orange !important;} </style>
%matplotlib inline
#%matplotlib qt # Choose %matplotlib qt to plot to an interactive window (note it may show up behind your browser)
# Make some of the relevant imports
import cv2 # OpenCV for perspective transform
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import scipy.misc # For saving images as needed
import glob # For reading in a list of images from a folder
```
## Quick Look at the Data
There's some example data provided in the `test_dataset` folder. This basic dataset is enough to get you up and running but if you want to hone your methods more carefully you should record some data of your own to sample various scenarios in the simulator.
Next, read in and display a random image from the `test_dataset` folder
```
path = './Sim Data/IMG/*'
img_list = glob.glob(path)
# Grab a random image and display it
idx = np.random.randint(0, len(img_list)-1)
image = mpimg.imread(img_list[idx])
plt.imshow(image)
```
## Calibration Data
Read in and display example grid and rock sample calibration images. You'll use the grid for perspective transform and the rock image for creating a new color selection that identifies these samples of interest.
```
# In the simulator you can toggle on a grid on the ground for calibration
# You can also toggle on the rock samples with the 0 (zero) key.
# Here's an example of the grid and one of the rocks
example_grid = './calibration_images/example_grid1.jpg'
example_rock = './calibration_images/example_rock2.jpg'
grid_img = mpimg.imread(example_grid)
rock_img = mpimg.imread(example_rock)
fig = plt.figure(figsize=(12,3))
plt.subplot(121)
plt.imshow(grid_img)
plt.subplot(122)
plt.imshow(rock_img)
```
## Perspective Transform
Define the perspective transform function from the lesson and test it on an image.
```
# Define a function to perform a perspective transform
# I've used the example grid image above to choose source points for the
# grid cell in front of the rover (each grid cell is 1 square meter in the sim)
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
return warped
# Define calibration box in source (actual) and destination (desired) coordinates
# These source and destination points are defined to warp the image
# to a grid where each 10x10 pixel square represents 1 square meter
# The destination box will be 2*dst_size on each side
dst_size = 5
# Set a bottom offset to account for the fact that the bottom of the image
# is not the position of the rover but a bit in front of it
# this is just a rough guess, feel free to change it!
bottom_offset = 6
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
])
warped = perspect_transform(grid_img, source, destination)
plt.imshow(warped)
# rock_warped = perspect_transform(rock_img, source, destination)
# plt.imshow(rock_warped)
#scipy.misc.imsave('../output/warped_example.jpg', warped)
```
## Color Thresholding
Define the color thresholding function from the lesson and apply it to the warped image
**TODO:** Ultimately, you want your map to not just include navigable terrain but also obstacles and the positions of the rock samples you're searching for. Modify this function or write a new function that returns the pixel locations of obstacles (areas below the threshold) and rock samples (yellow rocks in calibration images), such that you can map these areas into world coordinates as well.
**Suggestion:** Think about imposing a lower and upper boundary in your color selection to be more specific about choosing colors. Feel free to get creative and even bring in functions from other libraries. Here's an example of [color selection](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html) using OpenCV.
**Beware:** if you start manipulating images with OpenCV, keep in mind that it defaults to `BGR` instead of `RGB` color space when reading/writing images, so things can get confusing.
```
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
def rock_thresh(img):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
thresh_H = (20, 255)
thresh_L = (64, 93)
thresh_S = (186, 255)
binary_H = np.zeros_like(H)
binary_H[(H > thresh_H[0]) & (H <= thresh_H[1])] = 1
binary_L = np.zeros_like(L)
binary_L[(L > thresh_L[0]) & (L <= thresh_L[1])] = 1
binary_S = np.zeros_like(S)
binary_S[(S > thresh_S[0]) & (S <= thresh_S[1])] = 1
res = binary_L & binary_S
ypos, xpos = res.nonzero()
return res, ypos, xpos
res, ypos, xpos = rock_thresh(rock_img)
threshed = color_thresh(warped)
print(xpos.min(), xpos.max())
print(ypos.min(), ypos.max())
xl = (xpos.max() - xpos.min())
yl = (ypos.max() - ypos.min())
if xl >= yl:
major = xl
minor = yl
else:
major = yl
minor = xl
xcenter = xpos.min() + ((xl)//2)
ycenter = ypos.min() + ((yl)//2)
print(xcenter, ycenter)
cv2.ellipse(res,(xcenter,ycenter),(major//2, minor//2),0,0,360,255,-1)
# plt.imshow(threshed, cmap='gray')
plt.imshow(res, cmap='gray')
#scipy.misc.imsave('../output/warped_threshed.jpg', threshed*255)
plt.imshow(rock_img, cmap='gray')
```
## Coordinate Transformations
Define the functions used to do coordinate transforms and apply them to an image.
```
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Grab another random image
idx = np.random.randint(0, len(img_list)-1)
image = mpimg.imread(img_list[idx])
warped = perspect_transform(image, source, destination)
threshed = color_thresh(warped)
# Calculate pixel values in rover-centric coords and distance/angle to all pixels
xpix, ypix = rover_coords(threshed)
dist, angles = to_polar_coords(xpix, ypix)
mean_dir = np.mean(angles)
# Do some plotting
fig = plt.figure(figsize=(12,9))
plt.subplot(221)
plt.imshow(image)
plt.subplot(222)
plt.imshow(warped)
plt.subplot(223)
plt.imshow(threshed, cmap='gray')
plt.subplot(224)
plt.plot(xpix, ypix, '.')
plt.ylim(-160, 160)
plt.xlim(0, 160)
arrow_length = 100
x_arrow = arrow_length * np.cos(mean_dir)
y_arrow = arrow_length * np.sin(mean_dir)
plt.arrow(0, 0, x_arrow, y_arrow, color='red', zorder=2, head_width=10, width=2)
```
## Read in saved data and ground truth map of the world
The next cell is all setup to read your saved data into a `pandas` dataframe. Here you'll also read in a "ground truth" map of the world, where white pixels (pixel value = 1) represent navigable terrain.
After that, we'll define a class to store telemetry data and pathnames to images. When you instantiate this class (`data = Databucket()`) you'll have a global variable called `data` that you can refer to for telemetry and map data within the `process_image()` function in the following cell.
```
# Import pandas and read in csv file as a dataframe
import pandas as pd
# Change this path to your data directory
df = pd.read_csv('./test_dataset/robot_log.csv')
csv_img_list = df["Path"].tolist() # Create list of image pathnames
# Read in ground truth map and create a 3-channel image with it
ground_truth = mpimg.imread('./calibration_images/map_bw.png')
ground_truth_3d = np.dstack((ground_truth*0, ground_truth*255, ground_truth*0)).astype(np.float)
# Creating a class to be the data container
# Will read in saved data from csv file and populate this object
# Worldmap is instantiated as 200 x 200 grids corresponding
# to a 200m x 200m space (same size as the ground truth map: 200 x 200 pixels)
# This encompasses the full range of output position values in x and y from the sim
class Databucket():
def __init__(self):
self.images = csv_img_list
self.xpos = df["X_Position"].values
self.ypos = df["Y_Position"].values
self.yaw = df["Yaw"].values
self.count = -1 # This will be a running index, setting to -1 is a hack
# because moviepy (below) seems to run one extra iteration
self.worldmap = np.zeros((200, 200, 3)).astype(np.float)
self.ground_truth = ground_truth_3d # Ground truth worldmap
# Instantiate a Databucket().. this will be a global variable/object
# that you can refer to in the process_image() function below
data = Databucket()
```
---
## Write a function to process stored images
Modify the `process_image()` function below by adding in the perception step processes (functions defined above) to perform image analysis and mapping. The following cell is all set up to use this `process_image()` function in conjunction with the `moviepy` video processing package to create a video from the images you saved taking data in the simulator.
In short, you will be passing individual images into `process_image()` and building up an image called `output_image` that will be stored as one frame of video. You can make a mosaic of the various steps of your analysis process and add text as you like (example provided below).
To start with, you can simply run the next three cells to see what happens, but then go ahead and modify them such that the output video demonstrates your mapping process. Feel free to get creative!
```
# Define a function to pass stored images to
# reading rover position and yaw angle from csv file
# This function will be used by moviepy to create an output video
world_size = 200
scale = 10
def process_image(img):
# Example of how to use the Databucket() object defined above
# to print the current x, y and yaw values
# print(data.xpos[data.count], data.ypos[data.count], data.yaw[data.count])
count = data.count
# TODO:
# 1) Define source and destination points for perspective transform
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
])
# 2) Apply perspective transform
warped = perspect_transform(grid_img, source, destination)
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
threshed = color_thresh(warped)
# 4) Convert thresholded image pixel values to rover-centric coords
xpix, ypix = rover_coords(threshed)
rover_xpos = data.xpos[count]
rover_ypos = data.ypos[count]
yaw = data.yaw[count]
global world_size, scale
# 5) Convert rover-centric pixel values to world coords
x_world, y_world = pix_to_world(xpix, ypix, rover_xpos, rover_ypos, yaw, world_size, scale)
dist, angles = to_polar_coords(xpix, ypix)
mean_dir = np.mean(angles)
# 6) Update worldmap (to be displayed on right side of screen)
# Example:
# data.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# data.worldmap[rock_y_world, rock_x_world, 1] += 1
# data.worldmap[navigable_y_world, navigable_x_world, 2] += 1
data.worldmap[y_world, x_world, 2] += 1
# 7) Make a mosaic image, below is some example code
# First create a blank image (can be whatever shape you like)
output_image = np.zeros((img.shape[0] + data.worldmap.shape[0], img.shape[1]*2, 3))
# Next you can populate regions of the image with various output
# Here I'm putting the original image in the upper left hand corner
output_image[0:img.shape[0], 0:img.shape[1]] = img
# Let's create more images to add to the mosaic, first a warped image
warped = perspect_transform(img, source, destination)
# Add the warped image in the upper right hand corner
output_image[0:img.shape[0], img.shape[1]:] = warped
# Overlay worldmap with ground truth map
map_add = cv2.addWeighted(data.worldmap, 1, data.ground_truth, 0.5, 0)
# Flip map overlay so y-axis points upward and add to output_image
output_image[img.shape[0]:, 0:data.worldmap.shape[1]] = np.flipud(map_add)
# Then putting some text over the image
cv2.putText(output_image,"Populate this image with your analyses to make a video!", (20, 20),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
data.count += 1 # Keep track of the index in the Databucket()
return output_image
```
## Make a video from processed image data
Use the [moviepy](https://zulko.github.io/moviepy/) library to process images and create a video.
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from moviepy.editor import ImageSequenceClip
# Define pathname to save the output video
output = './output/test_mapping.mp4'
data = Databucket() # Re-initialize data in case you're running this cell multiple times
clip = ImageSequenceClip(data.images, fps=60) # Note: output video will be sped up because
# recording rate in simulator is fps=25
new_clip = clip.fl_image(process_image) #NOTE: this function expects color images!!
%time new_clip.write_videofile(output, audio=False)
```
### This next cell should function as an inline video player
If this fails to render the video, try running the following cell (alternative video rendering method). You can also simply have a look at the saved mp4 in your `/output` folder
```
output = './output/test_mapping.mp4'
from IPython.display import HTML
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(output))
```
| github_jupyter |
# Prerequisites
Install Theano and Lasagne using the following commands:
```bash
pip install -r https://raw.githubusercontent.com/Lasagne/Lasagne/master/requirements.txt
pip install https://github.com/Lasagne/Lasagne/archive/master.zip
```
Working in a virtual environment is recommended.
# Data preparation
Current code allows to generate geodesic patches from a collection of shapes represented as triangular meshes.
To get started with the pre-processing:
```
git clone https://github.com/jonathanmasci/ShapeNet_data_preparation_toolbox.git
```
The usual processing pipeline is show in ```run_forrest_run.m```.
We will soon update this preparation stage, so perhaps better to start with our pre-computed dataset, and stay tuned! :-)
## Prepared data
All it is required to train on the FAUST_registration dataset for this demo is available for download at
https://www.dropbox.com/s/aamd98nynkvbcop/EG16_tutorial.tar.bz2?dl=0
# ICNN Toolbox
```bash
git clone https://github.com/jonathanmasci/EG16_tutorial.git
```

```
import sys
import os
import numpy as np
import scipy.io
import time
import theano
import theano.tensor as T
import theano.sparse as Tsp
import lasagne as L
import lasagne.layers as LL
import lasagne.objectives as LO
from lasagne.layers.normalization import batch_norm
sys.path.append('..')
from icnn import aniso_utils_lasagne, dataset, snapshotter
```
## Data loading
```
base_path = '/home/shubham/Desktop/IndependentStudy/EG16_tutorial/dataset/FAUST_registrations/data/diam=200/'
# train_txt, test_txt, descs_path, patches_path, geods_path, labels_path, ...
# desc_field='desc', patch_field='M', geod_field='geods', label_field='labels', epoch_size=100
ds = dataset.ClassificationDatasetPatchesMinimal(
'FAUST_registrations_train.txt', 'FAUST_registrations_test.txt',
os.path.join(base_path, 'descs', 'shot'),
os.path.join(base_path, 'patch_aniso', 'alpha=100_nangles=016_ntvals=005_tmin=6.000_tmax=24.000_thresh=99.900_norm=L1'),
None,
os.path.join(base_path, 'labels'),
epoch_size=50)
# inp = LL.InputLayer(shape=(None, 544))
# print(inp.input_var)
# patch_op = LL.InputLayer(input_var=Tsp.csc_fmatrix('patch_op'), shape=(None, None))
# print(patch_op.shape)
# print(patch_op.input_var)
# icnn = LL.DenseLayer(inp, 16)
# print(icnn.output_shape)
# print(icnn.output_shape)
# desc_net = theano.dot(patch_op, icnn)
```
## Network definition
```
nin = 544
nclasses = 6890
l2_weight = 1e-5
def get_model(inp, patch_op):
icnn = LL.DenseLayer(inp, 16)
icnn = batch_norm(aniso_utils_lasagne.ACNNLayer([icnn, patch_op], 16, nscale=5, nangl=16))
icnn = batch_norm(aniso_utils_lasagne.ACNNLayer([icnn, patch_op], 32, nscale=5, nangl=16))
icnn = batch_norm(aniso_utils_lasagne.ACNNLayer([icnn, patch_op], 64, nscale=5, nangl=16))
ffn = batch_norm(LL.DenseLayer(icnn, 512))
ffn = LL.DenseLayer(icnn, nclasses, nonlinearity=aniso_utils_lasagne.log_softmax)
return ffn
inp = LL.InputLayer(shape=(None, nin))
patch_op = LL.InputLayer(input_var=Tsp.csc_fmatrix('patch_op'), shape=(None, None))
ffn = get_model(inp, patch_op)
# L.layers.get_output -> theano variable representing network
output = LL.get_output(ffn)
pred = LL.get_output(ffn, deterministic=True) # in case we use dropout
# target theano variable indicatind the index a vertex should be mapped to wrt the latent space
target = T.ivector('idxs')
# to work with logit predictions, better behaved numerically
cla = aniso_utils_lasagne.categorical_crossentropy_logdomain(output, target, nclasses).mean()
acc = LO.categorical_accuracy(pred, target).mean()
# a bit of regularization is commonly used
regL2 = L.regularization.regularize_network_params(ffn, L.regularization.l2)
cost = cla + l2_weight * regL2
```
## Define the update rule, how to train
```
params = LL.get_all_params(ffn, trainable=True)
grads = T.grad(cost, params)
# computes the L2 norm of the gradient to better inspect training
grads_norm = T.nlinalg.norm(T.concatenate([g.flatten() for g in grads]), 2)
# Adam turned out to be a very good choice for correspondence
updates = L.updates.adam(grads, params, learning_rate=0.001)
```
## Compile
```
funcs = dict()
funcs['train'] = theano.function([inp.input_var, patch_op.input_var, target],
[cost, cla, l2_weight * regL2, grads_norm, acc], updates=updates,
on_unused_input='warn')
funcs['acc_loss'] = theano.function([inp.input_var, patch_op.input_var, target],
[acc, cost], on_unused_input='warn')
funcs['predict'] = theano.function([inp.input_var, patch_op.input_var],
[pred], on_unused_input='warn')
```
# Training (a bit simplified)
```
n_epochs = 50
eval_freq = 1
start_time = time.time()
best_trn = 1e5
best_tst = 1e5
kvs = snapshotter.Snapshotter('demo_training.snap')
for it_count in xrange(n_epochs):
tic = time.time()
b_l, b_c, b_s, b_r, b_g, b_a = [], [], [], [], [], []
for x_ in ds.train_iter():
tmp = funcs['train'](*x_)
# do some book keeping (store stuff for training curves etc)
b_l.append(tmp[0])
b_c.append(tmp[1])
b_r.append(tmp[2])
b_g.append(tmp[3])
b_a.append(tmp[4])
epoch_cost = np.asarray([np.mean(b_l), np.mean(b_c), np.mean(b_r), np.mean(b_g), np.mean(b_a)])
print(('[Epoch %03i][trn] cost %9.6f (cla %6.4f, reg %6.4f), |grad| = %.06f, acc = %7.5f %% (%.2fsec)') %
(it_count, epoch_cost[0], epoch_cost[1], epoch_cost[2], epoch_cost[3], epoch_cost[4] * 100,
time.time() - tic))
if np.isnan(epoch_cost[0]):
print("NaN in the loss function...let's stop here")
break
if (it_count % eval_freq) == 0:
v_c, v_a = [], []
for x_ in ds.test_iter():
tmp = funcs['acc_loss'](*x_)
v_a.append(tmp[0])
v_c.append(tmp[1])
test_cost = [np.mean(v_c), np.mean(v_a)]
print((' [tst] cost %9.6f, acc = %7.5f %%') % (test_cost[0], test_cost[1] * 100))
if epoch_cost[0] < best_trn:
kvs.store('best_train_params', [it_count, LL.get_all_param_values(ffn)])
best_trn = epoch_cost[0]
if test_cost[0] < best_tst:
kvs.store('best_test_params', [it_count, LL.get_all_param_values(ffn)])
best_tst = test_cost[0]
print("...done training %f" % (time.time() - start_time))
```
# Test phase
Now that the model is train it is enough to take the fwd function and apply it to new data.
```
rewrite = True
out_path = '/tmp/EG16_tutorial/dumps/'
print "Saving output to: %s" % out_path
if not os.path.isdir(out_path) or rewrite==True:
try:
os.makedirs(out_path)
except:
pass
a = []
for i,d in enumerate(ds.test_iter()):
fname = os.path.join(out_path, "%s" % ds.test_fnames[i])
print fname,
tmp = funcs['predict'](d[0], d[1])[0]
a.append(np.mean(np.argmax(tmp, axis=1).flatten() == d[2].flatten()))
scipy.io.savemat(fname, {'desc': tmp})
print ", Acc: %7.5f %%" % (a[-1] * 100.0)
print "\nAverage accuracy across all shapes: %7.5f %%" % (np.mean(a) * 100.0)
else:
print "Model predictions already produced."
```
# Results

| github_jupyter |
```
# import the important libraries
import pandas as pd
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 1000)
# Check what's in this file
# The file is from NOAA for year 1950
df = pd.read_csv("StormEvents_details-ftp_v1.0_d1950_c20170120.csv")
ls
# the first 5 rows of the file
df.head()
```
### let's check the file for year 1996
```
df_1 = pd.read_csv("/home/mishraka/Documents/Manjula/Lambda_School/Lab_Project/StormEvents_details-ftp_v1.0_d1996_c20170717.csv")
df_1.shape
# check the first five rows
df_1.head()
```
### Check another file for storm data
```
df = pd.read_csv("/home/mishraka/Documents/Manjula/Lambda_School/Lab_Project/storm_data_search_results.csv")
df.head()
df.shape
```
### Earthquake data from Daniel
```
df_earthquake = pd.read_table("/home/mishraka/Documents/Manjula/Lambda_School/Lab_Project/Eathquake_Data/2014_pga10pct50yrs.dat", sep="\s+")
# since this data is a .dat format, it assigns any sentence words into random columns
df_earthquake.head()
# check the columns
df_earthquake.columns
# delete the unnecessary columns
df_earth = df_earthquake.drop(columns=['sums', '1', 'hazard', 'curves.',
'Site', 'Vs30', '760.0'])
df_earth.shape
df_earth.head()
# Assign row[0] as our header
header = df_earth.iloc[0]
# assigning the dataframe into a new variable row[1] onwards
df_earth_new = df_earth[1:]
# add the header
df = df_earth_new.rename(columns=header)
df.head()
# rename the columns
df_new = df.rename(columns={'#Long': 'lon', 'Lat':'lat', 'SA(g)':'sa_g'}, inplace=True)
df.head()
# check the data type of the columns
df.dtypes
df.columns
# Since they are all object type (str)
# convert them to floats to be able to use them
df['lon'] = df['lon'].astype('float64')
df['lat'] = df['lat'].astype('float64')
df['sa_g'] = df['sa_g'].astype('float64')
```
### create a new columns for zipcode
```
df['zipcode'] = 0
df.head()
```
### US zipcode
Since it's US geodata, we want to install uszipcode library to extract information about the zipcode uing longitude and latitude data
```
!pip install uszipcode
### Let's import the uszipcode library we just installed
import uszipcode
from uszipcode import SearchEngine, SimpleZipcode, Zipcode
search = SearchEngine()
```
### write a for loop
the forloop is on the whole dataset to search for zipcode based on geo data. The result splits out 5 zipcode by default. But zipcode itself is an object
```
# declare the range
# this range is the total rowd of the df: len(df)
for i in range(1,611309):
# print(df['lat'][i], df['lon'][i])
result = search.by_coordinates(lat=df['lat'][i],
lng=df['lon'][i], radius=30)
if result:
df['zipcode'][i] = result[0].zipcode # save the first result
# result = search.by_coordinates(df['lat'][1],df['lon'][1])
len(zipcode)
result = search.by_coordinates(24.6,-91.45, radius=30)
result
search.by_coordinates?
result[0].zipcode
```
### Zip code file
source https://github.com/ajithranka/zipcode/blob/gh-pages/data/zipcodes.csv
```
df_zip = pd.read_csv("https://raw.githubusercontent.com/ajithranka/zipcode/gh-pages/data/zipcodes.csv")
df_zip.shape
df_zip.head()
df.tail()
```
### concatenate two dataframes
```
df_zip_concat = pd.concat([df_zip, df], axis=1)
df_zip_concat.tail()
df_zip_concat.sort_values(by=['latitude', 'longitude','lon', 'lat'])
df.columns, df_zip.columns
def match(df,df_zip):
for i in range(0,2000):
if df['lon'][i] == df_zip['longitude'][i] and df['lat'][i]== df_zip['latitude'][i]:
return True
```
| github_jupyter |

> **Copyright (c) 2021 CertifAI Sdn. Bhd.**<br>
<br>
This program is part of OSRFramework. You can redistribute it and/or modify
<br>it under the terms of the GNU Affero General Public License as published by
<br>the Free Software Foundation, either version 3 of the License, or
<br>(at your option) any later version.
<br>
<br>This program is distributed in the hope that it will be useful
<br>but WITHOUT ANY WARRANTY; without even the implied warranty of
<br>MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
<br>GNU Affero General Public License for more details.
<br>
<br>You should have received a copy of the GNU Affero General Public License
<br>along with this program. If not, see <http://www.gnu.org/licenses/>.
<br>
INSTRUCTION: Follow the steps in the commented line for each section and run the code.
```
"""
Handson for Day_03 Data Preparation and Text Cleaning
Part 1: NLTK
if you have not downloaded NLTK make sure to type in terminal - pip install nltk
"""
"""
1: LOWERCASE TEXT (uncomment block of code below)
complete the missing code (indicated by <missing code>) to lower a text
"""
# text = "This is a SAMPLE TEXT"
# lowered_text = text.<missing code>
# print(lowered_text)
"""
2: TOKENIZE TEXT (uncomment block of code below)
complete the missing code (indicated by <missing code>) to tokenize the text
"""
# import nltk
# text = "This is a sentence."
# tokenized_text = nltk.<missing code>
# print(tokenized_text)
"""
3: STEM TEXT (uncomment block of code below)
complete the missing code (indicated by <missing code>) to stem the text
"""
# from nltk.stem.porter import PorterStemmer
# stemmer = PorterStemmer()
# text = "runs"
# stemmed_text = stemmer.<missing code>
# print(stemmed_text)
"""
4: LEMMATIZE TEXT (uncomment block of code below)
complete the missing code (indicated by <missing code>) to lemmatize the text
if you have not downloaded wordnet, just uncomment nltk.download('wordnet'), you only need to download once
"""
# import nltk
# # nltk.download('wordnet')
# nltk.download('wordnet')
# from nltk.stem import WordNetLemmatizer
# lemmatizer = WordNetLemmatizer()
# text = "feet"
# lemmatized_text = lemmatizer.<missing code>
# print(lemmatized_text)
"""
5: REMOVE STOP WORDS (uncomment block of code below)
complete the missing code (indicated by <missing code>) to remove stop words from the text
if you have not downloaded stopwords, just uncomment nltk.download('stopwords'), you only need to download once
"""
# import nltk
# # nltk.download('stopwords')
# from nltk.corpus import stopwords
# stop_words = set(stopwords.words('english'))
# text = "This is a sample text for testing stop words"
# filtered_sentence = []
# for word in text.split():
# # lower case word and check if it is not a stop word
# if <missing code>:
# filtered_sentence.append(word)
# print(filtered_sentence)
"""
6: REMOVE PUNCTUATION (uncomment block of code below)
complete the missing code (indicated by <missing code>) to remove punctuation from the text
"""
# import nltk
# sentence = "This is, a sample text,! for .,testing purpose."
# tokenizer = nltk.RegexpTokenizer(r"\w+")
# filtered_sentence = tokenizer.<missing code>
# print(filtered_sentence)
```
| github_jupyter |
# Project - Seminar Computer Vision by Deep Learning (CS4245) 2020/2021
Group Number: 20
Student 1: Stan Zwinkels
Student 2: Ted de Vries Lentsch
Date: June 14, 2021
## Instruction
For correct functioning of this notebook, the dataset [morado_5may](https://www.kaggle.com/teddevrieslentsch/morado-5may) must be in the same directory as this notebook.
## Import necessary libraries
```
# standard libraries
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import shutil
import time
# widgets
from IPython.display import display, clear_output
import ipywidgets
```
## Relabel
Make folder with for the annotations with the new labels.
```
root_path = 'morado_5may'
relabel_path = '{}/annotations_relabel'.format(root_path)
if os.path.isdir(relabel_path):
shutil.rmtree(relabel_path)
time.sleep(0.1)
os.makedirs(relabel_path)
else:
os.makedirs(relabel_path)
```
Below is the `ReLabelDataset` class for relabeling.
```
class ReLabelDataset(object):
def __init__(self, root):
self.root = root # directory to dataset
self.imgs = list(sorted(os.listdir('{}/images'.format(root)))) # load images
self.annots = list(sorted(os.listdir('{}/annotations'.format(root)))) # load annotations
self.classes = ['background', 'raw', 'ripe'] # classes
self.idx = 0 # image/annotation index
self.idx_last = -1 # last image/annotation index
self.row_number = -1 # number of the current row
self.start = True # initialize process
self.img = None # image
self.annot = None # annotation
self.done = False # whether all images have been labeled
def plot_patch(self):
with out:
annot = self.annot.loc[self.row_number,0:4].to_numpy()
img = self.img[int(annot[1]):(int(annot[3])+1),int(annot[0]):(int(annot[2])+1),:]
clear_output(True)
if not self.done:
plt.figure(figsize=(5, 5))
plt.imshow(img, zorder=-10)
plt.title('Old label: {}'.format(self.annot.loc[self.row_number, 4]))
plt.show()
else:
plt.figure()
plt.show()
def manage_ids(self):
if self.row_number==len(self.annot)-1:
self.save_annot()
self.row_number = 0
self.idx_last = self.idx
self.idx += 1
if self.idx==len(self.imgs):
self.done = True
else:
self.idx_last = self.idx
self.row_number += 1
def get_data(self):
if self.idx!=self.idx_last:
img_path = '{}/images/{}'.format(self.root, self.imgs[self.idx])
annot_path = '{}/annotations/{}'.format(self.root, self.annots[self.idx])
self.img = np.rot90(plt.imread(img_path), -1)
self.annot = pd.read_csv(annot_path, sep=',', header=None)
def save_annot(self):
annot_re_path = '{}/annotations_relabel/{}'.format(self.root, self.annots[self.idx])
self.annot.sort_values(by=[4], inplace=True)
self.annot.reset_index(drop=True, inplace=True)
self.annot.to_csv(annot_re_path, index=0, header=0)
print('The file {} has been relabeled!'.format(self.annots[self.idx]))
def button_click_action(self, label):
if not self.done:
self.get_data()
if not self.start:
self.annot.at[self.row_number,4] = label
self.start = False
self.manage_ids()
self.plot_patch()
def left_button_click(self, click):
self.button_click_action('raw')
def right_button_click(self, click):
self.button_click_action('ripe')
```
Below is the tool for relabeling. The process is started by clicking on one of the two buttons. The first annotation is then plotted. You can then indicate for each image to which class it belongs. If all the annotations for one image have been made, a new .csv file is saved in the `annotations_relabel` directory that was created above.
```
%matplotlib inline
relabeler = ReLabelDataset(root_path)
# create buttons for the 2 classes
button_left = ipywidgets.Button(description='Raw')
button_right = ipywidgets.Button(description='Ripe')
# assign functions to the press of the buttons
button_left.on_click(relabeler.left_button_click)
button_right.on_click(relabeler.right_button_click)
# output window for the plot
out = ipywidgets.Output()
# widget
ipywidgets.VBox([ipywidgets.HBox([button_left, button_right]), out])
```
| github_jupyter |
## Reinforcement Learning for seq2seq
This time we'll solve a problem of transribing hebrew words in english, also known as g2p (grapheme2phoneme)
* word (sequence of letters in source language) -> translation (sequence of letters in target language)
Unlike what most deep learning researchers do, we won't only train it to maximize likelihood of correct translation, but also employ reinforcement learning to actually teach it to translate with as few errors as possible.
### About the task
One notable property of Hebrew is that it's consonant language. That is, there are no wovels in the written language. One could represent wovels with diacritics above consonants, but you don't expect people to do that in everyay life.
Therefore, some hebrew characters will correspond to several english letters and others - to none, so we should use encoder-decoder architecture to figure that out.

_(img: esciencegroup.files.wordpress.com)_
Encoder-decoder architectures are about converting anything to anything, including
* Machine translation and spoken dialogue systems
* [Image captioning](http://mscoco.org/dataset/#captions-challenge2015) and [image2latex](https://openai.com/requests-for-research/#im2latex) (convolutional encoder, recurrent decoder)
* Generating [images by captions](https://arxiv.org/abs/1511.02793) (recurrent encoder, convolutional decoder)
* Grapheme2phoneme - convert words to transcripts
We chose simplified __Hebrew->English__ machine translation for words and short phrases (character-level), as it is relatively quick to train even without a gpu cluster.
```
# If True, only translates phrases shorter than 20 characters (way easier).
EASY_MODE = True
# Please keep it until you're done debugging your code
# If false, works with all phrases (please switch to this mode for homework assignment)
# way we translate. Either "he-to-en" or "en-to-he"
MODE = "he-to-en"
# maximal length of _generated_ output, does not affect training
MAX_OUTPUT_LENGTH = 50 if not EASY_MODE else 20
REPORT_FREQ = 100 # how often to evaluate validation score
```
### Step 1: preprocessing
We shall store dataset as a dictionary
`{ word1:[translation1,translation2,...], word2:[...],...}`.
This is mostly due to the fact that many words have several correct translations.
We have implemented this thing for you so that you can focus on more interesting parts.
__Attention python2 users!__ You may want to cast everything to unicode later during homework phase, just make sure you do it _everywhere_.
```
import numpy as np
from collections import defaultdict
word_to_translation = defaultdict(list) # our dictionary
bos = '_'
eos = ';'
with open("main_dataset.txt", encoding='utf8') as fin:
for line in fin:
en, he = line[:-1].lower().replace(bos, ' ').replace(eos,
' ').split('\t')
word, trans = (he, en) if MODE == 'he-to-en' else (en, he)
if len(word) < 3:
continue
if EASY_MODE:
if max(len(word), len(trans)) > 20:
continue
word_to_translation[word].append(trans)
print("size = ", len(word_to_translation))
# get all unique lines in source language
all_words = np.array(list(word_to_translation.keys()))
# get all unique lines in translation language
all_translations = np.array(
[ts for all_ts in word_to_translation.values() for ts in all_ts])
```
### split the dataset
We hold out 10% of all words to be used for validation.
```
from sklearn.model_selection import train_test_split
train_words, test_words = train_test_split(
all_words, test_size=0.1, random_state=42)
```
### Building vocabularies
We now need to build vocabularies that map strings to token ids and vice versa. We're gonna need these fellas when we feed training data into model or convert output matrices into english words.
```
from voc import Vocab
inp_voc = Vocab.from_lines(''.join(all_words), bos=bos, eos=eos, sep='')
out_voc = Vocab.from_lines(''.join(all_translations), bos=bos, eos=eos, sep='')
# Here's how you cast lines into ids and backwards.
batch_lines = all_words[:5]
batch_ids = inp_voc.to_matrix(batch_lines)
batch_lines_restored = inp_voc.to_lines(batch_ids)
print("lines")
print(batch_lines)
print("\nwords to ids (0 = bos, 1 = eos):")
print(batch_ids)
print("\nback to words")
print(batch_lines_restored)
```
Draw word/translation length distributions to estimate the scope of the task.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.title("words")
plt.hist(list(map(len, all_words)), bins=20)
plt.subplot(1, 2, 2)
plt.title('translations')
plt.hist(list(map(len, all_translations)), bins=20)
```
### Step 3: deploy encoder-decoder (1 point)
__assignment starts here__
Our architecture consists of two main blocks:
* Encoder reads words character by character and outputs code vector (usually a function of last RNN state)
* Decoder takes that code vector and produces translations character by character
Than it gets fed into a model that follows this simple interface:
* __`model.symbolic_translate(inp, **flags) -> out, logp`__ - takes symbolic int32 matrix of hebrew words, produces output tokens sampled from the model and output log-probabilities for all possible tokens at each tick.
* if given flag __`greedy=True`__, takes most likely next token at each iteration. Otherwise samples with next token probabilities predicted by model.
* __`model.symbolic_score(inp, out, **flags) -> logp`__ - takes symbolic int32 matrices of hebrew words and their english translations. Computes the log-probabilities of all possible english characters given english prefices and hebrew word.
That's all! It's as hard as it gets. With those two methods alone you can implement all kinds of prediction and training.
```
# set flags here if necessary
import theano
theano.config.floatX = 'float32'
import theano.tensor as T
import lasagne
from basic_model_theano import BasicTranslationModel
model = BasicTranslationModel(inp_voc, out_voc,
emb_size=64, hid_size=128)
# Play around with symbolic_translate and symbolic_score
inp = T.constant(np.random.randint(0, 10, [3, 5], dtype='int32'))
out = T.constant(np.random.randint(0, 10, [3, 5], dtype='int32'))
# translate inp (with untrained model)
sampled_out, logp = model.symbolic_translate(inp, greedy=False)
dummy_translate = theano.function([], sampled_out, updates=model.auto_updates)
print("\nSymbolic_translate output:\n", sampled_out, logp)
print("\nSample translations:\n", dummy_translate())
# score logp(out | inp) with untrained input
logp = model.symbolic_score(inp, out)
dummy_score = theano.function([], logp)
print("\nSymbolic_score output:\n", logp)
print("\nLog-probabilities (clipped):\n", dummy_score()[:, :2, :5])
# Prepare any operations you want here
inp = T.imatrix("input tokens [batch,time]")
trans, _ = <build symbolic translations with greedy = True >
translate_fun = theano.function([inp], trans, updates=model.auto_updates)
def translate(lines):
"""
You are given a list of input lines.
Make your neural network translate them.
:return: a list of output lines
"""
# Convert lines to a matrix of indices
lines_ix = <YOUR CODE >
# Compute translations in form of indices (call your function)
trans_ix = <YOUR CODE >
# Convert translations back into strings
return out_voc.to_lines(trans_ix)
print("Sample inputs:", all_words[:3])
print("Dummy translations:", translate(all_words[:3]))
assert trans.ndim == 2 and trans.dtype.startswith(
'int'), "trans must be a tensor of integers (token ids)"
assert translate(all_words[:3]) == translate(
all_words[:3]), "make sure translation is deterministic (use greedy=True and disable any noise layers)"
assert type(translate(all_words[:3])) is list and (type(translate(all_words[:1])[0]) is str or type(
translate(all_words[:1])[0]) is unicode), "translate(lines) must return a sequence of strings!"
print("Tests passed!")
```
### Scoring function
LogLikelihood is a poor estimator of model performance.
* If we predict zero probability once, it shouldn't ruin entire model.
* It is enough to learn just one translation if there are several correct ones.
* What matters is how many mistakes model's gonna make when it translates!
Therefore, we will use minimal Levenshtein distance. It measures how many characters do we need to add/remove/replace from model translation to make it perfect. Alternatively, one could use character-level BLEU/RougeL or other similar metrics.
The catch here is that Levenshtein distance is not differentiable: it isn't even continuous. We can't train our neural network to maximize it by gradient descent.
```
import editdistance # !pip install editdistance
def get_distance(word, trans):
"""
A function that takes word and predicted translation
and evaluates (Levenshtein's) edit distance to closest correct translation
"""
references = word_to_translation[word]
assert len(references) != 0, "wrong/unknown word"
return min(editdistance.eval(trans, ref) for ref in references)
def score(words, bsize=100):
"""a function that computes levenshtein distance for bsize random samples"""
assert isinstance(words, np.ndarray)
batch_words = np.random.choice(words, size=bsize, replace=False)
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
return np.array(distances, dtype='float32')
# should be around 5-50 and decrease rapidly after training :)
[score(test_words, 10).mean() for _ in range(5)]
```
## Step 2: Supervised pre-training
Here we define a function that trains our model through maximizing log-likelihood a.k.a. minimizing crossentropy.
```
from agentnet.learning.generic import get_values_for_actions, get_mask_by_eos
class llh_trainer:
# variable for correct answers
input_sequence = T.imatrix("input sequence [batch,time]")
reference_answers = T.imatrix("reference translations [batch, time]")
# Compute log-probabilities of all possible tokens at each step. Use model interface.
logprobs_seq = <YOUR CODE >
# compute mean crossentropy
crossentropy = - get_values_for_actions(logprobs_seq, reference_answers)
mask = get_mask_by_eos(T.eq(reference_answers, out_voc.eos_ix))
loss = T.sum(crossentropy * mask)/T.sum(mask)
# Build weight updates. Use model.weights to get all trainable params.
updates = <YOUR CODE >
train_step = theano.function(
[input_sequence, reference_answers], loss, updates=updates)
```
Actually run training on minibatches
```
import random
def sample_batch(words, word_to_translation, batch_size):
"""
sample random batch of words and random correct translation for each word
example usage:
batch_x,batch_y = sample_batch(train_words, word_to_translations,10)
"""
# choose words
batch_words = np.random.choice(words, size=batch_size)
# choose translations
batch_trans_candidates = list(map(word_to_translation.get, batch_words))
batch_trans = list(map(random.choice, batch_trans_candidates))
return inp_voc.to_matrix(batch_words), out_voc.to_matrix(batch_trans)
bx, by = sample_batch(train_words, word_to_translation, batch_size=3)
print("Source:")
print(bx)
print("Target:")
print(by)
from IPython.display import clear_output
from tqdm import tqdm, trange # or use tqdm_notebook,tnrange
loss_history = []
editdist_history = []
for i in trange(25000):
loss = llh_trainer.train_step(
*sample_batch(train_words, word_to_translation, 32))
loss_history.append(loss)
if (i+1) % REPORT_FREQ == 0:
clear_output(True)
current_scores = score(test_words)
editdist_history.append(current_scores.mean())
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.title('train loss / traning time')
plt.plot(loss_history)
plt.grid()
plt.subplot(132)
plt.title('val score distribution')
plt.hist(current_scores, bins=20)
plt.subplot(133)
plt.title('val score / traning time')
plt.plot(editdist_history)
plt.grid()
plt.show()
print("llh=%.3f, mean score=%.3f" %
(np.mean(loss_history[-10:]), np.mean(editdist_history[-10:])))
for word in train_words[:10]:
print("%s -> %s" % (word, translate([word])[0]))
test_scores = []
for start_i in trange(0, len(test_words), 32):
batch_words = test_words[start_i:start_i+32]
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
test_scores.extend(distances)
print("Supervised test score:", np.mean(test_scores))
```
## Preparing for reinforcement learning (2 points)
First we need to define loss function as a custom theano operation.
The simple way to do so is
```
@theano.compile.as_op(input_types,output_type(s),infer_shape)
def my_super_function(inputs):
return outputs
```
__Your task__ is to implement `_compute_levenshtein` function that takes matrices of words and translations, along with input masks, then converts those to actual words and phonemes and computes min-levenshtein via __get_distance__ function above.
```
@theano.compile.as_op([T.imatrix]*2, [T.fvector], lambda _, shapes: [shapes[0][:1]])
def _compute_levenshtein(words_ix, trans_ix):
"""
A custom theano operation that computes levenshtein loss for predicted trans.
Params:
- words_ix - a matrix of letter indices, shape=[batch_size,word_length]
- words_mask - a matrix of zeros/ones,
1 means "word is still not finished"
0 means "word has already finished and this is padding"
- trans_mask - a matrix of output letter indices, shape=[batch_size,translation_length]
- trans_mask - a matrix of zeros/ones, similar to words_mask but for trans_ix
Please implement the function and make sure it passes tests from the next cell.
"""
# convert words to strings
words = <restore words(a list of strings) from words_ix >
assert type(words) is list and type(
words[0]) is str and len(words) == len(words_ix)
# convert translations to lists
translations = <restore trans(a list of lists of phonemes) from trans_ix
assert type(translations) is list and type(
translations[0]) is str and len(translations) == len(trans_ix)
# computes levenstein distances. can be arbitrary python code.
distances = <apply get_distance to each pair of[words, translations] >
assert type(distances) in (list, tuple, np.ndarray) and len(
distances) == len(words_ix)
distances = np.array(list(distances), dtype='float32')
return distances
# forbid gradient
from theano.gradient import disconnected_grad
def compute_levenshtein(*args):
return disconnected_grad(_compute_levenshtein(*[arg.astype('int32') for arg in args]))
```
Simple test suite to make sure your implementation is correct. Hint: if you run into any bugs, feel free to use print from inside _compute_levenshtein.
```
# test suite
# sample random batch of (words, correct trans, wrong trans)
batch_words = np.random.choice(train_words, size=100)
batch_trans = list(map(random.choice, map(
word_to_translation.get, batch_words)))
batch_trans_wrong = np.random.choice(all_translations, size=100)
batch_words_ix = T.constant(inp_voc.to_matrix(batch_words))
batch_trans_ix = T.constant(out_voc.to_matrix(batch_trans))
batch_trans_wrong_ix = T.constant(out_voc.to_matrix(batch_trans_wrong))
# assert compute_levenshtein is zero for ideal translations
correct_answers_score = compute_levenshtein(
batch_words_ix, batch_trans_ix).eval()
assert np.all(correct_answers_score ==
0), "a perfect translation got nonzero levenshtein score!"
print("Everything seems alright!")
# assert compute_levenshtein matches actual scoring function
wrong_answers_score = compute_levenshtein(
batch_words_ix, batch_trans_wrong_ix).eval()
true_wrong_answers_score = np.array(
list(map(get_distance, batch_words, batch_trans_wrong)))
assert np.all(wrong_answers_score ==
true_wrong_answers_score), "for some word symbolic levenshtein is different from actual levenshtein distance"
print("Everything seems alright!")
```
Once you got it working...
* You may now want to __remove/comment asserts__ from function code for a slight speed-up.
* There's a more detailed tutorial on custom theano ops here: [docs](http://deeplearning.net/software/theano/extending/extending_theano.html), [example](https://gist.github.com/justheuristic/9f4ffef6162a8089c3260fc3bbacbf46).
## Self-critical policy gradient (2 points)
In this section you'll implement algorithm called self-critical sequence training (here's an [article](https://arxiv.org/abs/1612.00563)).
The algorithm is a vanilla policy gradient with a special baseline.
$$ \nabla J = E_{x \sim p(s)} E_{y \sim \pi(y|x)} \nabla log \pi(y|x) \cdot (R(x,y) - b(x)) $$
Here reward R(x,y) is a __negative levenshtein distance__ (since we minimize it). The baseline __b(x)__ represents how well model fares on word __x__.
In practice, this means that we compute baseline as a score of greedy translation, $b(x) = R(x,y_{greedy}(x)) $.

Luckily, we already obtained the required outputs: `model.greedy_translations, model.greedy_mask` and we only need to compute levenshtein using `compute_levenshtein` function.
```
class trainer:
input_sequence = T.imatrix("input tokens [batch,time]")
# use model to __sample__ symbolic translations given input_sequence
sample_translations, sample_logp = <your code here >
auto_updates = model.auto_updates
# use model to __greedy__ symbolic translations given input_sequence
greedy_translations, greedy_logp = <your code here >
greedy_auto_updates = model.auto_updates
# Note: you can use model.symbolic_translate(...,unroll_scan=True,max_len=MAX_OUTPUT_LENGTH)
# to run much faster at a cost of longer compilation
rewards = - compute_levenshtein(input_sequence, sample_translations)
baseline = <compute __negative__ levenshtein for greedy mode >
# compute advantage using rewards and baseline
advantage = <your code - compute advantage >
# compute log_pi(a_t|s_t), shape = [batch, seq_length]
logprobs_phoneme = get_values_for_actions(sample_logp, sample_translations)
# policy gradient
J = logprobs_phoneme*advantage[:, None]
mask = get_mask_by_eos(T.eq(sample_translations, out_voc.eos_ix))
loss = - T.sum(J*mask) / T.sum(mask)
# regularize with negative entropy. Don't forget the sign!
# note: for entropy you need probabilities for all tokens (sample_logp), not just phoneme_logprobs
entropy = <compute entropy matrix of shape[batch, seq_length], H = -sum(p*log_p), don't forget the sign!>
assert entropy.ndim == 2, "please make sure elementwise entropy is of shape [batch,time]"
loss -= 0.01*T.sum(entropy*mask) / T.sum(mask)
# compute weight updates, clip by norm
grads = T.grad(loss, model.weights)
grads = lasagne.updates.total_norm_constraint(grads, 50)
updates = lasagne.updates.adam(grads, model.weights, learning_rate=1e-5)
train_step = theano.function([input_sequence], loss,
updates=auto_updates+greedy_auto_updates+updates)
```
# Policy gradient training
```
for i in trange(100000):
loss_history.append(
trainer.train_step(sample_batch(
train_words, word_to_translation, 32)[0])
)
if (i+1) % REPORT_FREQ == 0:
clear_output(True)
current_scores = score(test_words)
editdist_history.append(current_scores.mean())
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.title('val score distribution')
plt.hist(current_scores, bins=20)
plt.subplot(122)
plt.title('val score / traning time')
plt.plot(editdist_history)
plt.grid()
plt.show()
print("J=%.3f, mean score=%.3f" %
(np.mean(loss_history[-10:]), np.mean(editdist_history[-10:])))
model.translate("EXAMPLE;")
```
### Results
```
for word in train_words[:10]:
print("%s -> %s" % (word, translate([word])[0]))
test_scores = []
for start_i in trange(0, len(test_words), 32):
batch_words = test_words[start_i:start_i+32]
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
test_scores.extend(distances)
print("Supervised test score:", np.mean(test_scores))
# ^^ If you get Out Of Memory, please replace this with batched computation
```
## Step 6: Make it actually work (5++ pts)
<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/do_something_scst.png width=400>
In this section we want you to finally __restart with EASY_MODE=False__ and experiment to find a good model/curriculum for that task.
We recommend you to start with the following architecture
```
encoder---decoder
P(y|h)
^
LSTM -> LSTM
^ ^
biLSTM -> LSTM
^ ^
input y_prev
```
__Note:__ you can fit all 4 state tensors of both LSTMs into a in a single state - just assume that it contains, for example, [h0, c0, h1, c1] - pack it in encode and update in decode.
Here are some cool ideas on what you can do then.
__General tips & tricks:__
* In some tensorflow versions and for some layers, it is required that each rnn/gru/lstm cell gets it's own `tf.variable_scope(unique_name, reuse=False)`.
* Otherwise it will complain about wrong tensor sizes because it tries to reuse weights from one rnn to the other.
* You will likely need to adjust pre-training time for such a network.
* Supervised pre-training may benefit from clipping gradients somehow.
* SCST may indulge a higher learning rate in some cases and changing entropy regularizer over time.
* It's often useful to save pre-trained model parameters to not re-train it every time you want new policy gradient parameters.
* When leaving training for nighttime, try setting REPORT_FREQ to a larger value (e.g. 500) not to waste time on it.
__Formal criteria:__
To get 5 points we want you to build an architecture that:
* _doesn't consist of single GRU_
* _works better_ than single GRU baseline.
* We also want you to provide either learning curve or trained model, preferably both
* ... and write a brief report or experiment log describing what you did and how it fared.
### Attention
There's more than one way to connect decoder to encoder
* __Vanilla:__ layer_i of encoder last state goes to layer_i of decoder initial state
* __Every tick:__ feed encoder last state _on every iteration_ of decoder.
* __Attention:__ allow decoder to "peek" at one (or several) positions of encoded sequence on every tick.
The most effective (and cool) of those is, of course, attention.
You can read more about attention [in this nice blog post](https://distill.pub/2016/augmented-rnns/). The easiest way to begin is to use "soft" attention with "additive" or "dot-product" intermediate layers.
__Tips__
* Model usually generalizes better if you no longer allow decoder to see final encoder state
* Once your model made it through several epochs, it is a good idea to visualize attention maps to understand what your model has actually learned
* There's more stuff [here](https://github.com/yandexdataschool/Practical_RL/blob/master/week8_scst/bonus.ipynb)
* If you opted for hard attention, we recommend [gumbel-softmax](https://blog.evjang.com/2016/11/tutorial-categorical-variational.html) instead of sampling. Also please make sure soft attention works fine before you switch to hard.
### UREX
* This is a way to improve exploration in policy-based settings. The main idea is that you find and upweight under-appreciated actions.
* Here's [video](https://www.youtube.com/watch?v=fZNyHoXgV7M&feature=youtu.be&t=3444)
and an [article](https://arxiv.org/abs/1611.09321).
* You may want to reduce batch size 'cuz UREX requires you to sample multiple times per source sentence.
* Once you got it working, try using experience replay with importance sampling instead of (in addition to) basic UREX.
### Some additional ideas:
* (advanced deep learning) It may be a good idea to first train on small phrases and then adapt to larger ones (a.k.a. training curriculum).
* (advanced nlp) You may want to switch from raw utf8 to something like unicode or even syllables to make task easier.
* (advanced nlp) Since hebrew words are written __with vowels omitted__, you may want to use a small Hebrew vowel markup dataset at `he-pron-wiktionary.txt`.
```
assert not EASY_MODE, "make sure you set EASY_MODE = False at the top of the notebook."
```
`[your report/log here or anywhere you please]`
__Contributions:__ This notebook is brought to you by
* Yandex [MT team](https://tech.yandex.com/translate/)
* Denis Mazur ([DeniskaMazur](https://github.com/DeniskaMazur)), Oleg Vasilev ([Omrigan](https://github.com/Omrigan/)), Dmitry Emelyanenko ([TixFeniks](https://github.com/tixfeniks)) and Fedor Ratnikov ([justheuristic](https://github.com/justheuristic/))
* Dataset is parsed from [Wiktionary](https://en.wiktionary.org), which is under CC-BY-SA and GFDL licenses.
| github_jupyter |
<a href="https://colab.research.google.com/github/joselvira/BiomecanicaPython/blob/master/Notebooks/Transformar_Bases_de_Datos.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# CÓMO TRANSFORMAR LA ORIENTACIÓN DE LAS BASES DE DATOS
Normalmente utilizamos bases de datos en las que vamos introduciendo resultados con una estructura de sujetos en filas y variables en columnas. Esta disposición podemos denominarla **horizontal** (en inglés se suele llamar *wide*) y es muy cómoda para trabajar con datos sencillos. Sin embargo, cuando empezamos a introducir distintos factores (como pre y postest, o distintos grupos de intervención) esta orientación se queda corta. Programas estadísticos como el SPSS requieren que los factores se apilen en **vertical** (en inglés *long*) cuando queremos hacer comparaciones con ANOVAS por ejemplo. Si tenemos pocos factores y pocos datos se puede hacer a mano directamente en la hoja de cálculo, pero si tenemos muchos y variados factores resulta más complicado y tedioso. Además, este formato es máas práctico a la hora de hacer representaciones gráficas rápidas teniendo en cuenta los factores.
También hay otro formato mixto (en inglés *tidy*), que coloca las etiquetas de clasificación de los factores en columnas, y mantiene cada variable en una columna diferente.
En este apartado veremos cómo podemos hacer la adaptación de bases de datos con orientación horizontal a vertical y al revés. Seguramente no se contemplarán todas las opciones posibles que se puedan dar en todos los estudios, pero al menos se abarcan las que pueden ser más frecuentes, y pueden servir como base por donde empezar para adaptarlas a las necesidades propias.
## DE HORIZONTAL A VERTICAL (de wide a long)
### Ejemplo sencillo
Creamos una base de datos sencilla con orientación horizontal.
Tiene un factor tiempo con tres niveles y un factor grupo con dos niveles.
```
import numpy as np
import pandas as pd
n = 3
Tiempo = ['base', 'pre', 'post']
Grupo = ['GE', 'GC']
np.random.seed(456) #fija la aleatoriedad
#Creamos datos aleatorios para cada grupo de datos
GEBase = np.random.normal(7.3, size=n)
GEPre = np.random.normal(7.4, size=n)
GEPost = np.random.normal(10.1, size=n)
GCBase = np.random.normal(7.5, size=n)
GCPre = np.random.normal(7.5, size=n)
GCPost = np.random.normal(7.5, size=n)
#Los junta en un dataframe
df = pd.DataFrame({'SujID': np.arange(n*len(Grupo))+1,
'Grupo': np.repeat(Grupo, n),
'Medida_base': np.hstack([GEBase, GCBase]),
'Medida_pre': np.hstack([GEPre, GCPre]),
'Medida_post': np.hstack([GEPost, GCPost]),
})
df
```
Ahora lo colocamos en disposición vertical.
```
#primero fuerza a poner los no del sujeto con 2 caracteres, si no los desordena
df['SujID'] = ['{0:02d}'.format(int(x)) for x in df['SujID']]
nomVars='Medida'
dfFactores = pd.melt(df, id_vars=['SujID', 'Grupo'], value_vars=df.filter(regex=nomVars).columns, var_name='NomVariable', value_name=nomVars)
dfFactores
```
Ahora se expande la variable creada NomVariable para que guarde en otra columna el factor tiempo.
```
dfFactores['Tiempo'] = dfFactores['NomVariable'].str.split('_', expand=True)[1]
```
Por último, se pueden ordenar las columnas.
```
dfFactores = dfFactores.reindex (columns=['SujID', 'NomVariable', 'Grupo', 'Tiempo', 'Medida'])
dfFactores
```
### Otro ejemplo con más variables dependientes
```
import numpy as np
import pandas as pd
n = 3
Tiempo = ['pre', 'post']
Grupo = ['GE', 'GC']
np.random.seed(456) #fija la aleatoriedad
#Creamos datos aleatorios para cada grupo de datos
GEPre1 = np.random.normal(7.4, size=n)
GEPost1 = np.random.normal(10.1, size=n)
GCPre1 = np.random.normal(7.5, size=n)
GCPost1 = np.random.normal(7.5, size=n)
GEPre2 = np.random.normal(27.4, size=n)
GEPost2 = np.random.normal(30.1, size=n)
GCPre2 = np.random.normal(27.5, size=n)
GCPost2 = np.random.normal(27.5, size=n)
#Los junta en un dataframe
df2 = pd.DataFrame({'SujID': np.arange(n*len(Grupo))+1,
'Grupo': np.repeat(Grupo, n),
'Medida1_pre': np.hstack([GEPre1, GCPre1]),
'Medida1_post': np.hstack([GEPost1, GCPost1]),
'Medida2_pre': np.hstack([GEPre2, GCPre2]),
'Medida2_post': np.hstack([GEPost2, GCPost2]),
})
df2
```
Ahora se pasa a formato vertical por factores. Se puede poner separando cada variable dependiente en una columna, o todo en vertical incluidas las variables dependientes (facilita crear gráficos por agrupaciones).
#### 1- Separando cada variable dependiente en una columna
```
#primero fuerza a poner los no del sujeto con 2 caracteres, si no los desordena
df2['SujID'] = ['{0:02d}'.format(int(x)) for x in df2['SujID']]
nomVars=['Medida1', 'Medida2']
dfFactores2 = pd.DataFrame()
for var in nomVars:
provis = pd.melt(df2, id_vars=['SujID', 'Grupo'], value_vars=df2.filter(regex=var).columns, var_name='Index', value_name=var)
dfFactores2[var] = provis.iloc[:, -1]
#Ahora hay que añadirle las otras variables
dfFactores2['SujID'] = provis['SujID']
#dfFactores2['NomVariable'] = provis['Index']
dfFactores2['Tiempo'] = provis['Index'].str.split('_', expand=True)[1]
dfFactores2['Grupo'] = provis['Grupo']
#Y se reordenan las columnas
dfFactores2 = dfFactores2.reindex (columns=['SujID', 'Grupo', 'Tiempo'] + nomVars)
dfFactores2
#provis
```
#### 2- Todo en vertical incluidas las variables dependientes
En value_vars introducir las variables que haya que ordenar, se puede hacer con las distintas opciones de Pandas.
```
dfFactores2_2 = pd.melt(df2, id_vars=['SujID', 'Grupo'], value_vars=df2.filter(regex='Medida').columns, var_name='Index', value_name='Medida')
#A partir de la variable Index, crea la columna Tiempo y NomVariable
dfFactores2_2['Tiempo'] = dfFactores2_2['Index'].str.split('_', expand=True)[1]
dfFactores2_2['NomVariable'] = dfFactores2_2['Index'].str.split('_', expand=True)[0]
#Y se reordenan las columnas. De paso se elimina la variable Index
dfFactores2_2 = dfFactores2_2.reindex (columns=['SujID', 'Grupo', 'Tiempo', 'NomVariable', 'Medida'])
dfFactores2_2
```
### Ejemplo más complejo con más variables
Ahora cargamos una base de datos de ejemplo, que contiene una muestra que realiza un experimento en el que se realiza un test de precisión en una diana, otro de control postural y otro test de salto vertical.
```
import pandas as pd
#Cargar la base de datos de ejemplo
urlDatos='https://raw.githubusercontent.com/joselvira/BiomecanicaPython/master/Datos/BasesDatos/BaseDatosDianas_Fake.xlsx'
nombreHoja = 'DatosHorizontal'
dfDatos = pd.read_excel(urlDatos, sheet_name=nombreHoja, index_col=None)
dfDatos
```
Hacemos una primera transformación.
```
#primero fuerza a poner los no del sujeto con 2 caracteres, si no los desordena
dfDatos['SujID'] = ['{0:02d}'.format(int(x)) for x in dfDatos['SujID']]
varDescrip = ['SujID', 'Grupo', 'Edad', 'Talla', 'Masa']
var=['Puntos', 'Equilibrio', 'Salto']
dfFactores = pd.melt(dfDatos, id_vars=varDescrip, value_vars=dfDatos.columns[5:], var_name='Index', value_name='Medida')
#A partir de la variable Index, crea la columna Tiempo y NomVariable
dfFactores['Tiempo'] = dfFactores['Index'].str.split('_', expand=True)[1]
dfFactores['NomVariable'] = dfFactores['Index'].str.split('_', expand=True)[0]
#Y se reordenan las columnas. De paso se elimina la variable Index
dfFactores = dfFactores.reindex (columns=varDescrip +['Tiempo', 'NomVariable', 'Medida'])
dfFactores
```
## DE VERTICAL A HORIZONTAL de long a wide)
### Pasa de disposición vertical a horizontal mixta (tidy)
En este caso mantenemos en horizontal el factor tiempo en columnas y mantiene en vertical el factor grupo, como en los análisis del SPSS.
Empezamos con un ejemplo sencillo de base de datos en disposición vertical para pasarlo a horizontal.
Tiene un factor tiempo con tres niveles y un factor grupo con dos niveles. Los datos originales se ordenan alternando el grupo (GE, GE, GC, GC, GE, GE, GC,GC...) mientras que el factor tiempo se coloca seguido (pre, pre, pre, pre, pre, pre, post, post, post, post, post, ...)
```
import numpy as np
import pandas as pd
n = 3
Tiempo = ['base', 'pre', 'post']
Grupo = ['GE', 'GC']
np.random.seed(456) #fija la aleatoriedad
#Creamos datos aleatorios para cada grupo de datos
GEBase = np.random.normal(7.3, size=n)
GEPre = np.random.normal(7.4, size=n)
GEPost = np.random.normal(10.1, size=n)
GCBase = np.random.normal(7.5, size=n)
GCPre = np.random.normal(7.5, size=n)
GCPost = np.random.normal(7.5, size=n)
#Los junta en un dataframe
df = pd.DataFrame({'SujID': np.tile(np.arange(n*len(Grupo))+1, len(Tiempo)),
'Grupo': np.tile(np.repeat(Grupo, len(Tiempo)), n),
'Tiempo': np.repeat(Tiempo, n*len(Grupo)),
'Medida': np.r_[GEBase, GCBase, GEPre, GCPre, GEPost, GCPost]
})
df
```
Ahora pasamos el factor tiempo a una disposición horizontal, manteniendo el factor grupo en vertical, como se hace en SPSS.
```
#primero fuerza a poner los no del sujeto con 2 caracteres, si no los desordena
df['SujID'] = ['{0:02d}'.format(int(x)) for x in df['SujID']]
#expande las variables dependientes a partir del nº de sujeto
dfHorizMixto = df.pivot(index='SujID', columns='Tiempo', values=['Medida'])
#ordena las variables como debe
dfHorizMixto = dfHorizMixto.reindex(columns=['base', 'pre', 'post'], level='Tiempo')
#aplasta el encabezado en una línea
dfHorizMixto.columns = dfHorizMixto.columns.map('_'.join).str.strip()
nonVarNuevas = list(dfHorizMixto.columns)
#pasa los sujetos de índice a columna
dfHorizMixto.reset_index(level=0, inplace=True)
varDescriptivas = ['Grupo']
for var in varDescriptivas:
dfHorizMixto[var] = df.loc[:dfHorizMixto.shape[0]-1, var]
dfHorizMixto = dfHorizMixto.reindex (columns= ['SujID'] + varDescriptivas + nonVarNuevas)
dfHorizMixto
```
### Ejemplo con más variables
Ahora cargamos una base de datos de ejemplo, que contiene una muestra que realiza un experimento en el que se realiza un test de precisión en una diana, otro de control postural y otro test de salto vertical.
```
import pandas as pd
#Cargar la base de datos de ejemplo
urlDatos='https://raw.githubusercontent.com/joselvira/BiomecanicaPython/master/Datos/BasesDatos/BaseDatosDianas_Fake.xlsx'
nombreHoja = 'DatosVertical'
dfDatos = pd.read_excel(urlDatos, sheet_name=nombreHoja, index_col=None)
dfDatos
#primero fuerza a poner los no del sujeto con 2 caracteres, si no los desordena
dfDatos['SujID'] = ['{0:02d}'.format(int(x)) for x in dfDatos['SujID']]
#expande las variables dependientes a partir del nº de sujeto
dfHorizMixto = dfDatos.pivot(index='SujID', columns='Tiempo', values=['Puntos', 'Equilibrio', 'Salto'])
#ordena las variables como debe
dfHorizMixto = dfHorizMixto.reindex(columns=['pre', 'post', 'retest'], level='Tiempo')
#aplasta el encabezado en una línea
dfHorizMixto.columns = dfHorizMixto.columns.map('_'.join).str.strip()
nonVarNuevas = list(dfHorizMixto.columns)
#pasa los sujetos de índice a columna
dfHorizMixto.reset_index(level=0, inplace=True)
varDescriptivas = ['SujGrupo', 'Grupo', 'Tiempo', 'Sexo', 'Edad', 'Talla', 'Masa']
for var in varDescriptivas:
dfHorizMixto[var] = dfDatos.loc[:dfHorizMixto.shape[0]-1, var]
dfHorizMixto = dfHorizMixto.reindex (columns= ['SujID'] + varDescriptivas + nonVarNuevas)
dfHorizMixto
```
| github_jupyter |
# Plot Entropy of Gaussian
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.integrate import quadrature
def exact_entropy(s):
return np.log(s*np.sqrt(2*np.pi*np.e))
sigmas = [0.4,0.8,1.2,2.0,3.5]
x_pts = np.linspace(-5,5,1000)
fig, axs = plt.subplots(1,2,figsize=(12,3))
axs[0].set_title("Entropy of Various Gaussian pdfs", fontsize=16)
for s in sigmas:
h = exact_entropy(s)
axs[0].plot(x_pts, norm.pdf(x_pts,loc=0,scale=s), label="$H={:0.2f}$".format(h), lw=3)
axs[0].legend(prop={'size': 14})
axs[1].set_title("Gaussian Entropy as a Function of $\sigma$", fontsize=16)
axs[1].plot(np.linspace(0.1,5,1000), exact_entropy(np.linspace(0.1,5,1000)), lw=3)
axs[1].set_ylabel("Differential Entropy", fontsize=14)
axs[1].set_xlabel("standard deviation $\sigma$", fontsize=14)
```
# Plot Entropy Element as Function of Information
```
x_pts = np.linspace(1e-9,1,1000)
fig, axs = plt.subplots(1,3,figsize=(9,2.5))
#axs[0].set_title("Information", fontsize=16)
for b in [1.2,1.3,1.6,2]:
I = -np.log(x_pts)/np.log(b)
axs[0].plot(x_pts, I, label=f"$b={b}$", lw=3)
axs[1].plot(x_pts, x_pts*I, lw=3)
axs[2].plot(I, x_pts*I, label=f"$b={b}$", lw=3)
axs[0].legend(prop={'size': 12})
axs[0].set_ylabel("Information", fontsize=14)
axs[0].set_xlabel("Probability", fontsize=14)
axs[0].legend(prop={'size': 12})
axs[0].set_ylim(-1,30)
axs[1].plot([np.e**(-1)]*2,[0,-(1/np.e)*(np.log(np.e**(-1))/np.log(1.2))],ls='dashed',c='k',label="$p^*$")
axs[1].set_ylabel("Entropy Element", fontsize=14)
axs[1].set_xlabel("Probability", fontsize=14)
axs[1].legend(prop={'size': 12})
axs[2].set_ylabel("Entropy Element", fontsize=14)
axs[2].set_xlabel("Information", fontsize=14)
#axs[2].legend(prop={'size': 11})
axs[2].set_xlim(-1,30)
plt.tight_layout()
```
# Compare Entropy of Gaussian to Entropy of GMM
```
class GMM(): # gaussian mixture model
def __init__(self, pis, params, normed=False):
self.params = np.array(params) # [[mu1, sig1], [mu2, sig2],...]
self.components = self.params.shape[0]
self.pis = np.array(pis)
self.max_val = 1
self.normed = normed
if self.normed:
vals = self.__call__(np.linspace(0,1,10000))
self.max_val = vals.max()
def __call__(self, x):
pis = self.pis
p = self.params
sz = self.components
vals = np.array([pis[i]*norm.pdf(x,*(p[i])) for i in range(sz)]).sum(axis=0)
if self.normed:
vals /= self.max_val
return vals
def sample(self, n_samples=1):
mode_id = np.random.choice(self.components, size=n_samples, replace=True, p=self.pis)
return np.array([norm.rvs(*(self.params[i])) for i in mode_id])
def entropy(p,domain):
def f_i(x):
p_i = p(x)
return -p_i*np.log(p_i)
quad_rslt, quad_err = quadrature(f_i, domain[0], domain[1], tol=1e-8, maxiter=1000)
return (quad_rslt, quad_err)
gmm_var = 0.5
num_modes = 2
gmm = GMM([1/num_modes]*num_modes, [[loc,gmm_var] for loc in np.linspace(-3,3,num_modes)])
g_var = 1.0
p = lambda x: norm.pdf(x,loc=0,scale=g_var)
x_pts = np.linspace(-6,6,1000)
domain = [-6,6]
plt.plot(x_pts, gmm(x_pts), label="$H={:0.5f}$".format(entropy(gmm,domain)[0]))
plt.plot(x_pts, p(x_pts), label="$H={:0.5f}$".format(exact_entropy(g_var)))
plt.title("Comparison of Differential Entropies", fontsize=14)
plt.legend()
5285.57-3215.80
```
| github_jupyter |
<a href="https://colab.research.google.com/github/yuanqing-wang/AFEP/blob/master/test_inference.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import sys
sys.path.append('..')
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
import numpy as np
np.warnings.filterwarnings('ignore')
import matplotlib
from matplotlib import pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
from titration import *
from bindingmodels import *
```
## preparation
let's get a protein stock and a ligand stock, as well as an empty cell
```
protein_stock = Solution(conc_p=0.1, d_conc_p=1e-4)
ligand_stock = Solution(conc_l=0.1, d_conc_l=1e-4)
complex_stock = Solution(conc_l=0.1, d_conc_l=1e-4, conc_p=0.1, d_conc_p=1e-4)
```
we need two wells, one to put ligand solution, one for protein-ligand complex
```
well_complex = SingleWell()
well_ligand = SingleWell()
```
## titration
This should model the real process of how the protein and ligand solutions were prepared.
```
# put ligand solution in both wells
for idx in range(10):
well_complex.inject(complex_stock, 1, 1e-3)
well_ligand.inject(ligand_stock, 1, 1e-3)
well_complex.select_non_zero()
well_ligand.select_non_zero()
```
To make life easier, we pre-calculate what the fluorescence intensities should look like, roughly.
```
# let's say:
delta_g_truth = -5.0 #kT
well_complex_concs_p_truth = well_complex.concs[0]
well_complex_concs_l_truth = well_complex.concs[1]
concs_p_truth, concs_l_truth, concs_pl_truth = TwoComponentBindingModel.equilibrium_concentrations(
delta_g_truth, well_complex_concs_p_truth, well_complex_concs_l_truth)
plt.clf()
plt.plot(concs_p_truth, label='protein')
plt.plot(concs_l_truth, label='ligand')
plt.plot(concs_pl_truth, label='complex')
plt.legend()
```
Let's arbitrarily make some fluorescence constants.
```
fi_p_truth = 0.2
fi_l_truth = 0.1
fi_pl_truth = 0.5
fi_plate_truth = 0.01
fi_buffer_truth = 0.01
cell_length = 1
fi_complex_truth = fi_p_truth * concs_p_truth + fi_l_truth * concs_l_truth + fi_pl_truth * concs_pl_truth + fi_buffer_truth * cell_length + fi_plate_truth
fi_ligand_truth = fi_l_truth * concs_l_truth + fi_pl_truth * concs_pl_truth + fi_buffer_truth * cell_length + fi_plate_truth
```
The world isn't perfect, so let's add some noise.
```
fi_complex = fi_complex_truth + np.random.normal(0, 0.01, fi_complex_truth.shape[0])
fi_ligand = fi_ligand_truth + np.random.normal(0, 0.01, fi_ligand_truth.shape[0])
```
Start to do some inference.
```
from inference import *
make_model(well_complex, well_ligand, fi_complex, fi_ligand)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# 非ガウス型観測による STS モデルの近似推論
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/probability/examples/STS_approximate_inference_for_models_with_non_Gaussian_observations"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/probability/examples/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
このノートブックでは、構造時系列(STS)モデルで適合と予測を行う際に、(非ガウス型)観測モデルを取り込む上で TFP 禁じ推論ツールを使用する方法を説明します。この例では、ポワソン方観測モデルを使用して、離散カウントデータを操作します。
```
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import bijectors as tfb
from tensorflow_probability import distributions as tfd
tf.enable_v2_behavior()
```
## 合成データ
まず、合成カウントデータを生成します。
```
num_timesteps = 30
observed_counts = np.round(3 + np.random.lognormal(np.log(np.linspace(
num_timesteps, 5, num=num_timesteps)), 0.20, size=num_timesteps))
observed_counts = observed_counts.astype(np.float32)
plt.plot(observed_counts)
```
## モデル
ランダムウォーク線形トレンドを使用して、単純なモデルを指定します。
```
def build_model(approximate_unconstrained_rates):
trend = tfp.sts.LocalLinearTrend(
observed_time_series=approximate_unconstrained_rates)
return tfp.sts.Sum([trend],
observed_time_series=approximate_unconstrained_rates)
```
このモデルは、観測された時系列で演算する代わりに、観測を管理するポワソン型レートパラメータの系列で演算します。
ポワソン型レートは正である必要があるため、バイジェクターを使用して、正値に渡って実際値の STS モデルを分布に変換します。`Softplus` 変換の $y = \log(1 + \exp(x))$ は正の値がほぼ線形であるため、自然な選択と言えますが、ほかの `Exp`(通常のランダムウォークを対数正規ランダムウォークに変換します)といった選択肢でも可能です。
```
positive_bijector = tfb.Softplus() # Or tfb.Exp()
# Approximate the unconstrained Poisson rate just to set heuristic priors.
# We could avoid this by passing explicit priors on all model params.
approximate_unconstrained_rates = positive_bijector.inverse(
tf.convert_to_tensor(observed_counts) + 0.01)
sts_model = build_model(approximate_unconstrained_rates)
```
非ガウス型観測モデルに近似推論を使用するために、STS モデルを TFP JointDistribution としてエンコーディングします。この同時分布の確率変数は STS モデル、潜在ポワソン分布レートの時系列、および観測されたカウントのパラメータです。
```
def sts_with_poisson_likelihood_model():
# Encode the parameters of the STS model as random variables.
param_vals = []
for param in sts_model.parameters:
param_val = yield param.prior
param_vals.append(param_val)
# Use the STS model to encode the log- (or inverse-softplus)
# rate of a Poisson.
unconstrained_rate = yield sts_model.make_state_space_model(
num_timesteps, param_vals)
rate = positive_bijector.forward(unconstrained_rate[..., 0])
observed_counts = yield tfd.Poisson(rate, name='observed_counts')
model = tfd.JointDistributionCoroutineAutoBatched(sts_with_poisson_likelihood_model)
```
### 推論の準備
指定された観測数で、モデル内の未観測の数量を推論します。まず、観測数に同時対数密度を条件づけます。
```
pinned_model = model.experimental_pin(observed_counts=observed_counts)
```
また、推論が STS モデルのパラメータへの制約(スケールが正であるなど)を考慮するように、制約付きのバイジェクターも必要です。
```
constraining_bijector = pinned_model.experimental_default_event_space_bijector()
```
## HMC による推論
HMC(特に NUTS)を使用して、モデルパラメータと潜在レートでの同時事後分布からサンプリングします。
これは、HMC で標準 STS モデルを適合するよりもはるかに低速化します。モデルの(相対的に少ない数の)パラメータのほかに、ポワソン分布レートの全系列も推論しなくてはいけないためです。そのため、相対的に少ないステップ数に実行することにします。推論の質が重要となるアプリケーションについては、これらの値を増加するか複数のチェーンを実行するのが合理的です。
```
#@title Sampler configuration
# Allow external control of sampling to reduce test runtimes.
num_results = 500 # @param { isTemplate: true}
num_results = int(num_results)
num_burnin_steps = 100 # @param { isTemplate: true}
num_burnin_steps = int(num_burnin_steps)
```
まず、サンプラーを指定し、`sample_chain` を使用してサンプルを生成するサンプリングカーネルを実行します。
```
sampler = tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.NoUTurnSampler(
target_log_prob_fn=pinned_model.unnormalized_log_prob,
step_size=0.1),
bijector=constraining_bijector)
adaptive_sampler = tfp.mcmc.DualAveragingStepSizeAdaptation(
inner_kernel=sampler,
num_adaptation_steps=int(0.8 * num_burnin_steps),
target_accept_prob=0.75)
initial_state = constraining_bijector.forward(
type(pinned_model.event_shape)(
*(tf.random.normal(part_shape)
for part_shape in constraining_bijector.inverse_event_shape(
pinned_model.event_shape))))
# Speed up sampling by tracing with `tf.function`.
@tf.function(autograph=False, jit_compile=True)
def do_sampling():
return tfp.mcmc.sample_chain(
kernel=adaptive_sampler,
current_state=initial_state,
num_results=num_results,
num_burnin_steps=num_burnin_steps,
trace_fn=None)
t0 = time.time()
samples = do_sampling()
t1 = time.time()
print("Inference ran in {:.2f}s.".format(t1-t0))
```
パラメータのトレースを調べて、推論のサニティチェックを行います。この場合、データに対し複数の説明が探られたようです。これは良いことではありますが、サンプルが多いほどチェーンの混合状態がどれだけうまく行っているかを判定しやすくなります。
```
f = plt.figure(figsize=(12, 4))
for i, param in enumerate(sts_model.parameters):
ax = f.add_subplot(1, len(sts_model.parameters), i + 1)
ax.plot(samples[i])
ax.set_title("{} samples".format(param.name))
```
ようやく成果を見ることができます。ポワソン分布のレートに対する事後分布を見てみましょう!また、観測数に対して 80% の予測間隔もプロットし、この間隔に、実際に観測した数の約 80% が含まれているかを確認します。
```
param_samples = samples[:-1]
unconstrained_rate_samples = samples[-1][..., 0]
rate_samples = positive_bijector.forward(unconstrained_rate_samples)
plt.figure(figsize=(10, 4))
mean_lower, mean_upper = np.percentile(rate_samples, [10, 90], axis=0)
pred_lower, pred_upper = np.percentile(np.random.poisson(rate_samples),
[10, 90], axis=0)
_ = plt.plot(observed_counts, color="blue", ls='--', marker='o', label='observed', alpha=0.7)
_ = plt.plot(np.mean(rate_samples, axis=0), label='rate', color="green", ls='dashed', lw=2, alpha=0.7)
_ = plt.fill_between(np.arange(0, 30), mean_lower, mean_upper, color='green', alpha=0.2)
_ = plt.fill_between(np.arange(0, 30), pred_lower, pred_upper, color='grey', label='counts', alpha=0.2)
plt.xlabel("Day")
plt.ylabel("Daily Sample Size")
plt.title("Posterior Mean")
plt.legend()
```
## 予測
観測数を予測するために、標準 STS ツールを使用して、潜在レートに対する予測分布を構築し(STS は実際値データをモデル化するように設計されているため、制約なしの空間で)、サンプリングされた予測をポワソン型観測モデルに通します。
```
def sample_forecasted_counts(sts_model, posterior_latent_rates,
posterior_params, num_steps_forecast,
num_sampled_forecasts):
# Forecast the future latent unconstrained rates, given the inferred latent
# unconstrained rates and parameters.
unconstrained_rates_forecast_dist = tfp.sts.forecast(sts_model,
observed_time_series=unconstrained_rate_samples,
parameter_samples=posterior_params,
num_steps_forecast=num_steps_forecast)
# Transform the forecast to positive-valued Poisson rates.
rates_forecast_dist = tfd.TransformedDistribution(
unconstrained_rates_forecast_dist,
positive_bijector)
# Sample from the forecast model following the chain rule:
# P(counts) = P(counts | latent_rates)P(latent_rates)
sampled_latent_rates = rates_forecast_dist.sample(num_sampled_forecasts)
sampled_forecast_counts = tfd.Poisson(rate=sampled_latent_rates).sample()
return sampled_forecast_counts, sampled_latent_rates
forecast_samples, rate_samples = sample_forecasted_counts(
sts_model,
posterior_latent_rates=unconstrained_rate_samples,
posterior_params=param_samples,
# Days to forecast:
num_steps_forecast=30,
num_sampled_forecasts=100)
forecast_samples = np.squeeze(forecast_samples)
def plot_forecast_helper(data, forecast_samples, CI=90):
"""Plot the observed time series alongside the forecast."""
plt.figure(figsize=(10, 4))
forecast_median = np.median(forecast_samples, axis=0)
num_steps = len(data)
num_steps_forecast = forecast_median.shape[-1]
plt.plot(np.arange(num_steps), data, lw=2, color='blue', linestyle='--', marker='o',
label='Observed Data', alpha=0.7)
forecast_steps = np.arange(num_steps, num_steps+num_steps_forecast)
CI_interval = [(100 - CI)/2, 100 - (100 - CI)/2]
lower, upper = np.percentile(forecast_samples, CI_interval, axis=0)
plt.plot(forecast_steps, forecast_median, lw=2, ls='--', marker='o', color='orange',
label=str(CI) + '% Forecast Interval', alpha=0.7)
plt.fill_between(forecast_steps,
lower,
upper, color='orange', alpha=0.2)
plt.xlim([0, num_steps+num_steps_forecast])
ymin, ymax = min(np.min(forecast_samples), np.min(data)), max(np.max(forecast_samples), np.max(data))
yrange = ymax-ymin
plt.title("{}".format('Observed time series with ' + str(num_steps_forecast) + ' Day Forecast'))
plt.xlabel('Day')
plt.ylabel('Daily Sample Size')
plt.legend()
plot_forecast_helper(observed_counts, forecast_samples, CI=80)
```
## VI 推論
変分推論は、ここで行っている近似カウントのように(標準 STS モデルの一つの時系列の*パラメータ*だけとは異なり)、全時系列を推論する場合に困難をきたす可能性があります。各時間ステップは隣接する時間ステップに相関しており、不確実性を過小評価する可能性があるため、変数に独立した事後分布があるという標準的な仮定はまったく間違っています。このため、全時系列に対する近似推論には、HMC がより適した選択肢である可能性があります。ただし、VI ははるかに高速であるため、モデルのプロトタイピングや、そのパフォーマンスが「十分に良好」であることが経験的に示される場合に役立つことがあります。
モデルを VI に適合するために、代替(サロゲート)事後分布を構築し、最適化します。
```
surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
event_shape=pinned_model.event_shape,
bijector=constraining_bijector)
# Allow external control of optimization to reduce test runtimes.
num_variational_steps = 1000 # @param { isTemplate: true}
num_variational_steps = int(num_variational_steps)
t0 = time.time()
losses = tfp.vi.fit_surrogate_posterior(pinned_model.unnormalized_log_prob,
surrogate_posterior,
optimizer=tf.optimizers.Adam(0.1),
num_steps=num_variational_steps)
t1 = time.time()
print("Inference ran in {:.2f}s.".format(t1-t0))
plt.plot(losses)
plt.title("Variational loss")
_ = plt.xlabel("Steps")
posterior_samples = surrogate_posterior.sample(50)
param_samples = posterior_samples[:-1]
unconstrained_rate_samples = posterior_samples[-1][..., 0]
rate_samples = positive_bijector.forward(unconstrained_rate_samples)
plt.figure(figsize=(10, 4))
mean_lower, mean_upper = np.percentile(rate_samples, [10, 90], axis=0)
pred_lower, pred_upper = np.percentile(
np.random.poisson(rate_samples), [10, 90], axis=0)
_ = plt.plot(observed_counts, color='blue', ls='--', marker='o',
label='observed', alpha=0.7)
_ = plt.plot(np.mean(rate_samples, axis=0), label='rate', color='green',
ls='dashed', lw=2, alpha=0.7)
_ = plt.fill_between(
np.arange(0, 30), mean_lower, mean_upper, color='green', alpha=0.2)
_ = plt.fill_between(np.arange(0, 30), pred_lower, pred_upper, color='grey',
label='counts', alpha=0.2)
plt.xlabel('Day')
plt.ylabel('Daily Sample Size')
plt.title('Posterior Mean')
plt.legend()
forecast_samples, rate_samples = sample_forecasted_counts(
sts_model,
posterior_latent_rates=unconstrained_rate_samples,
posterior_params=param_samples,
# Days to forecast:
num_steps_forecast=30,
num_sampled_forecasts=100)
forecast_samples = np.squeeze(forecast_samples)
plot_forecast_helper(observed_counts, forecast_samples, CI=80)
```
| github_jupyter |
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
```
### Helper Functions
```
import tensorflow as tf
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
```
### Model
```
#with tf.device("/cpu:0"):
# reshape
x = tf.placeholder('float', shape=[None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
# conv
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
z_conv1 = conv2d(x_image, W_conv1) + b_conv1
# relu -> pool
h_conv1 = tf.nn.relu(z_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# conv
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
z_conv2 = conv2d(h_pool1, W_conv2) + b_conv2
# relu -> pool
h_conv2 = tf.nn.relu(z_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# reshape -> fc
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
z_fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
# relu
h_fc1 = tf.nn.relu(z_fc1)
# dropout
keep_prob = tf.placeholder('float')
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# softmax weights
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
scores = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# softmax + cross-entropy
probs = tf.nn.softmax(scores)
y = tf.placeholder('float', shape=[None, 10])
cross_entropy = -tf.reduce_sum(y*tf.log(probs))
# evaluation
correct_predictions = tf.equal(tf.argmax(probs, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'))
```
### Train
```
sess = tf.InteractiveSession()
#with tf.device("/cpu:0"):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess.run(init)
def train(iters=2000):
for i in range(iters):
batch_xs, batch_ys = mnist.train.next_batch(50)
if not i % 100:
train_accuracy = accuracy.eval(feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
print 'step {}: training accuracy = {}'.format(i, train_accuracy)
yield train_accuracy
train_step.run(feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.5})
train_accuracies = list(train())
```
### Evaluate on Test Set
```
accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy import ndimage
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784')
x = mnist.data
y = mnist.target
e_k = np.zeros_like(x)
s_k = np.zeros_like(x)
n_k = np.zeros_like(x)
nw_k = np.zeros_like(x)
ne_k = np.zeros_like(x)
sw_k = np.zeros_like(x)
se_k = np.zeros_like(x)
w_k = np.zeros_like(x)
ka= np.array([[-3,-3,-3],[-3,0,-3],[5,5,5]])
na= np.array([[-3,-3,5],[-3,0,5],[-3,-3,5]])
wa= np.array([[5,5,5],[-3,0,-3],[-3,-3,-3]])
sa= np.array([[5,-3,-3],[5,0,-3],[5,-3,-3]])
nea= np.array([[-3,-3,-3],[-3,0,5],[-3,5,5]])
nwa= np.array([[-3,5,5],[-3,0,5],[-3,-3,-3]])
sea= np.array([[-3,-3,-3],[5,0,-3],[5,5,-3]])
swa= np.array([[5,5,-3],[5,0,-3],[-3,-3,-3]])
for i in range(70000):
e_k[i]=ndimage.convolve(x[i].reshape((28, 28)),ka,mode='nearest',cval=0.0).reshape(784)
s_k[i]=ndimage.convolve(x[i].reshape((28, 28)),sa,mode='nearest',cval=0.0).reshape(784)
n_k[i]=ndimage.convolve(x[i].reshape((28, 28)),na,mode='nearest',cval=0.0).reshape(784)
w_k[i]=ndimage.convolve(x[i].reshape((28, 28)),wa,mode='nearest',cval=0.0).reshape(784)
nw_k[i]=ndimage.convolve(x[i].reshape((28, 28)),nwa,mode='nearest',cval=0.0).reshape(784)
ne_k[i]=ndimage.convolve(x[i].reshape((28, 28)),nea,mode='nearest',cval=0.0).reshape(784)
sw_k[i]=ndimage.convolve(x[i].reshape((28, 28)),swa,mode='nearest',cval=0.0).reshape(784)
se_k[i]=ndimage.convolve(x[i].reshape((28, 28)),sea,mode='nearest',cval=0.0).reshape(784)
ldp_mat=np.zeros_like(x)
ldp_hist=np.zeros((70000,256))
for i in range(70000):
e=e_k[i].reshape((28,28))
s=s_k[i].reshape((28,28))
n=n_k[i].reshape((28,28))
w=w_k[i].reshape((28,28))
nw=nw_k[i].reshape((28,28))
ne=ne_k[i].reshape((28,28))
sw=sw_k[i].reshape((28,28))
se=se_k[i].reshape((28,28))
ldp=ldp_mat[i].reshape((28,28))
for k in range(28):
for j in range(28):
lst=[se[k][j],s[k][j],sw[k][j],w[k][j],nw[k][j],n[k][j],ne[k][j],e[k][j]]
l=[abs(h) for h in lst]
marr=np.argsort(l)
marr1=marr[::-1]
binary=np.zeros(8,dtype="uint8")
binary[marr1[0]]=1
binary[marr1[1]]=1
binary[marr1[2]]=1
d_no=binary[0]*2**7+binary[1]*2**6+binary[2]*2**5+binary[3]*2**4+binary[4]*2**3+binary[5]*2**2+binary[6]*2**1+binary[7]*2**0
ldp[k][j]=d_no
ldp_mat[i]=ldp.reshape(784)
from sklearn.model_selection import train_test_split
train_img, test_img, train_lbl, test_lbl = train_test_split( ldp_mat, mnist.target, test_size=1/7.0, random_state=0)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(train_img)
train_img = scaler.transform(train_img)
test_img = scaler.transform(test_img)
from sklearn.decomposition import PCA
pca = PCA(.94)
pca.fit(train_img)
train_img = pca.transform(train_img)
test_img = pca.transform(test_img)
from sklearn.svm import SVC
svc_model=SVC()
import time
f=time.time()
svc_model.fit(train_img, train_lbl)
q=time.time()
print(q-f)
y_predict=svc_model.predict(test_img)
from sklearn import metrics
d=svc_model.score(test_img,test_lbl)
print(d*100)
t=svc_model.score(train_img,train_lbl)
print(t*100)
```
| github_jupyter |
## 3.4 编辑段落
### 3.4.1 段落首行缩进调整
许多出版社要求文章段落必须首行缩进,若想调整段落首行缩进的距离,可以使用`\setlength{\parindent}{长度}`命令,在`{长度}`处填写需要设置的距离即可。
【**例3-10**】使用`\setlength{\parindent}{长度}`命令调整段落首行缩进为两字符。
```tex
\documentclass[12pt]{article}
\setlength{\parindent}{2em}
\begin{document}
In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
\end{document}
```
编译后效果如图3.4.1所示。
<p align="center">
<img align="middle" src="graphics/example3_4_1.png" width="600" />
</p>
<center><b>图3.4.1</b> 编译后效果</center>
当然,如果不想让段落自动首行缩进, 在段落前使用命令`\noindent`即可。
【**例3-11**】使用`\noindent`命令使第二段首行不缩进。
```tex
\documentclass[12pt]{article}
\setlength{\parindent}{2em}
\begin{document}
In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
\noindent In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
\end{document}
```
编译后效果如图3.4.2所示。
<p align="center">
<img align="middle" src="graphics/example3_4_2.png" width="600" />
</p>
<center><b>图3.4.2</b> 编译后效果</center>
需要注意的是,在段落设置在章节后面时,每一节后的第一段默认是不缩进的,为了使第一段向其他段一样缩进,可以在段落前使用`\hspace*{\parindent}`命令,也可以在源文件的前导代码中直接调用宏包`\usepackage{indentfirst}`。
【**例3-12**】使用`\hspace*{\parindent}`命令使章节后第一段首行缩进。
```tex
\documentclass[12pt]{article}
\setlength{\parindent}{2em}
\begin{document}
\section{Introduction}
\hspace*{\parindent}In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
\end{document}
```
编译后效果如图3.4.3所示。
<p align="center">
<img align="middle" src="graphics/example3_4_3.png" width="600" />
</p>
<center><b>图3.4.3</b> 编译后效果</center>
【**例3-13**】使用`\usepackage{indentfirst}`命令使章节后第一段首行缩进。
```tex
\documentclass[12pt]{article}
\setlength{\parindent}{2em}
\usepackage{indentfirst}
\begin{document}
\section{Introduction}
In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
In \LaTeX, We can use the setlength command to adjust the indentation distance of the first line. In this case, we set the indentation distance as 2em.
\end{document}
```
编译后效果如图3.4.4所示。
<p align="center">
<img align="middle" src="graphics/example3_4_4.png" width="600" />
</p>
<center><b>图3.4.4</b> 编译后效果</center>
### 3.4.2 段落间距调整
在使用LaTeX排版时,有时为了使段落与段落之间的区别更加明显,我们可以在段落之间设置一定的间距,最简单的方式是使用`\smallskip`、`\medskip`和`\bigskip`等命令。
> 参考[How to insert a blank line between any two paragraphs???](https://latex.org/forum/viewtopic.php?f=44&t=6934)
【**例3-14**】使用`\smallskip`、`\medskip`和`\bigskip`等命令调整不同的段落间距。
```tex
\documentclass[12pt]{article}
\begin{document}
How to set space between any two paragraphs?
\smallskip
How to set space between any two paragraphs?
\medskip
How to set space between any two paragraphs?
\bigskip
How to set space between any two paragraphs?
\end{document}
```
编译后效果如图3.4.5所示。
<p align="center">
<img align="middle" src="graphics/example3_4_5.png" width="600" />
</p>
<center><b>图3.4.5</b> 编译后效果</center>
设置段落间距的几种方法:[https://tex.stackexchange.com/questions/41476](https://tex.stackexchange.com/questions/41476)
### 3.4.3 段落添加文本框
有时因为文档没有图全都是文字,版面显得极其单调。如果想让版面有所变化,可以通过给文字加边框来实现对段落文本新增边框。在LaTeX中,我们可以使用`\fbox{}`命令对文本新增边框。
> 参考[How to put a box around multiple lines](https://latex.org/forum/viewtopic.php?f=44&t=4117)。
【**例3-15**】使用`\fbox{}`创建文本边框。
```tex
\documentclass[12pt]{article}
\begin{document}
\fbox{
\parbox{0.8\linewidth}{
In \LaTeX, we can use fbox and parbox to put a box around multiple lines. In this case, we set the linewidth as 0.8.
}
}
\end{document}
```
编译后效果如图3.4.6所示。
<p align="center">
<img align="middle" src="graphics/example3_4_6.png" width="600" />
</p>
<center><b>图3.4.6</b> 编译后效果</center>
### 3.4.4 段落对齐方式调整
LaTeX默认的对齐方式是两端对齐,有时在进行文档排版的过程中,我们为了突出某一段落的内容,会选择将其居中显示,在LaTeX中,我们可以使用`center`环境对文本进行居中对齐。另外还有一些出版商要求文档是左对齐或者右对齐,这时我们同样可以使用`flushleft`环境和`flushright`环境将文档设置为左对齐或右对齐。
【**例3-16**】分别使用`center`、`flushleft`和`flushright`环境对文本进行居中对齐、左对齐和右对齐。
```tex
\documentclass[12pt]{article}
\begin{document}
\begin{center}
This is latex-cookbook
\end{center}
\begin{flushleft}
This is latex-cookbook
\end{flushleft}
\begin{flushright}
This is latex-cookbook
\end{flushright}
\end{document}
```
编译后效果如图3.4.7所示。
<p align="center">
<img align="middle" src="graphics/example3_4_7.png" width="600" />
</p>
<center><b>图3.4.7</b> 编译后效果</center>
【回放】[**3.3 生成目录**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-3/section3.ipynb)
【继续】[**3.5 编辑文字**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-3/section5.ipynb)
### License
<div class="alert alert-block alert-danger">
<b>This work is released under the MIT license.</b>
</div>
| github_jupyter |
# Qt Demo
This will launch various Qt compatible packages
Nota: as of 2019-05-26th, PySide2-5.13+ compatibility is
- Ok for Qtconsole, Qtpy, pyzo, wppm, PyQtgraph, rx
- ToDo for Spyder, guidata, guiqwt
## Qt4 & Qt5 Dedicated Graphic libraries: PyQtgraph, guidata, guiqwt
```
# PyQtgraph (Scientific Graphics and GUI Library for Python)
import pyqtgraph.examples; pyqtgraph.examples.run()
# Guidata (Python library generating graphical user interfaces for easy dataset editing and display)
from guidata import tests; tests.run()
# Guiqwt (Efficient 2D plotting Python library based on PythonQwt)
from guiqwt import tests; tests.run()
#QtDemo (if present)
!if exist "%WINPYDIR%\Lib\site-packages\PyQt5\examples\qtdemo\qtdemo.py" "%WINPYDIR%\python.exe" "%WINPYDIR%\Lib\site-packages\PyQt5\examples\qtdemo\qtdemo.py"
!if exist "%WINPYDIR%\Lib\site-packages\PyQt4\examples\demos\qtdemo\qtdemo.pyw""%WINPYDIR%\pythonw.exe" "%WINPYDIR%\Lib\site-packages\PyQt4\examples\demos\qtdemo\qtdemo.pyw"
!if exist "%WINPYDIR%\Lib\site-packages\PySide2\examples\datavisualization" "%WINPYDIR%\python.exe" "%WINPYDIR%\Lib\site-packages\PySide2\examples\datavisualization\bars3d.py"
```
## Reactive programing: rx
```
# from https://github.com/ReactiveX/RxPY/blob/master/examples/timeflie
import sys
import rx
from rx import operators as ops
from rx.subjects import Subject
from rx.concurrency.mainloopscheduler import QtScheduler
try:
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
except ImportError:
try:
from PySide2 import QtCore
from PySide2.QtWidgets import QApplication, QLabel, QWidget
except ImportError:
raise ImportError('Please ensure either PyQT5 or PySide2 is available!')
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle("Rx for Python rocks")
self.resize(600, 600)
self.setMouseTracking(True)
# This Subject is used to transmit mouse moves to labels
self.mousemove = Subject()
def mouseMoveEvent(self, event):
self.mousemove.on_next((event.x(), event.y()))
def main():
app = QApplication(sys.argv)
scheduler = QtScheduler(QtCore)
window = Window()
window.show()
text = 'TIME FLIES LIKE AN ARROW'
def on_next(info):
label, (x, y), i = info
label.move(x + i*12 + 15, y)
label.show()
def handle_label(label, i):
delayer = ops.delay(i * 0.100)
mapper = ops.map(lambda xy: (label, xy, i))
return window.mousemove.pipe(
delayer,
mapper,
)
labeler = ops.flat_map_indexed(handle_label)
mapper = ops.map(lambda c: QLabel(c, window))
rx.from_(text).pipe(
mapper,
labeler,
).subscribe(on_next, on_error=print, scheduler=scheduler)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
!pip download --dest C:\WinP\a QtPy
```
| github_jupyter |
```
"""
Distributed Proximal Policy Optimization (Distributed PPO or DPPO) continuous
version implementation with distributed Tensorflow and Python’s multiprocessing
package. This implementation uses normalized running rewards with GAE. The code
is tested with Gym’s continuous action space environment, Pendulum-v0 on Colab.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install -q tf-nightly
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from multiprocessing import Process
# The following class is adapted from OpenAI's baseline:
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# This class is used for the normalization of rewards in this program before GAE computation.
class RunningStats(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
class PPO(object):
def __init__(self, scope, sess, env, global_PPO=None):
self.sess = sess
self.env = env
#OPT_A = tf.train.AdamOptimizer(A_LR, beta1=0.99, beta2=0.999, name='OPT_A')
#OPT_C = tf.train.AdamOptimizer(C_LR, beta1=0.99, beta2=0.999, name='OPT_C')
OPT_A = tf.train.AdamOptimizer(A_LR, name='OPT_A')
OPT_C = tf.train.AdamOptimizer(C_LR, name='OPT_C')
with tf.variable_scope(scope): # scope is either global or wid
self.state = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=True)
self.val = tf.layers.dense(h1, 1, name='val', trainable=True)
self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.discounted_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.discounted_r - self.val
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = OPT_C.minimize(self.closs)
with tf.variable_scope('cgrads'):
self.critic_grad_op = tf.gradients(self.closs, self.critic_params)
# actor
self.pi, self.pi_params = self._build_anet(scope, 'pi', self.env, trainable=True)
self.oldpi, self.oldpi_params = self._build_anet(scope, 'oldpi', self.env, trainable=True) # originally trainable=False
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, self.oldpi_params)]
self.act = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.adv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
ratio = self.pi.prob(self.act) / self.oldpi.prob(self.act)
surr = ratio * self.adv
self.aloss = -tf.reduce_mean(tf.minimum(surr, tf.clip_by_value(ratio, 1.-epsilon, 1.+epsilon)*self.adv))
with tf.variable_scope('atrain'):
self.atrain_op = OPT_A.minimize(self.aloss)
with tf.variable_scope('agrads'):
self.pi_grad_op = tf.gradients(self.aloss, self.pi_params)
if scope != net_scope: # not global
with tf.name_scope('params'): # push/pull from local/worker perspective
with tf.name_scope('push_to_global'):
self.push_actor_pi_params = OPT_A.apply_gradients(zip(self.pi_grad_op, global_PPO.pi_params))
self.push_critic_params = OPT_C.apply_gradients(zip(self.critic_grad_op, global_PPO.critic_params))
with tf.name_scope('pull_fr_global'):
self.pull_actor_pi_params = [local_params.assign(global_params) for local_params, global_params in zip(self.pi_params, global_PPO.pi_params)]
self.pull_critic_params = [local_params.assign(global_params) for local_params, global_params in zip(self.critic_params, global_PPO.critic_params)]
def update(self, s, a, r, adv):
self.sess.run(self.update_oldpi_op)
for _ in range(A_EPOCH): # train actor
self.sess.run(self.atrain_op, {self.state: s, self.act: a, self.adv: adv})
# update actor
self.sess.run([self.push_actor_pi_params,
self.pull_actor_pi_params],
{self.state: s, self.act: a, self.adv: adv})
for _ in range(C_EPOCH): # train critic
# update critic
self.sess.run(self.ctrain_op, {self.state: s, self.discounted_r: r})
self.sess.run([self.push_critic_params,
self.pull_critic_params],
{self.state: s, self.discounted_r: r})
def _build_anet(self, scope, name, env, trainable):
with tf.variable_scope(name):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=trainable)
mu = self.env.action_space.high * tf.layers.dense(h1, A_DIM, tf.nn.tanh, name='mu', trainable=trainable)
sigma = tf.layers.dense(h1, A_DIM, tf.nn.softplus, name='sigma', trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/' + name)
return norm_dist, params
def choose_action(self, s):
s = s[None, :]
a = self.sess.run(self.sample_op, {self.state: s})[0]
return np.clip(a, self.env.action_space.low, self.env.action_space.high)
def get_val(self, s):
if s.ndim < 2: s = s[None, :]
return self.sess.run(self.val, {self.state: s})[0, 0]
# This function is adapted from OpenAI's Baseline
# GAE computation
# returns TD lamda return & advantage
def add_vtarg_and_adv(self, R, done, V, v_s_, gamma, lam):
# Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
# last element is only used for last vtarg, but we already zeroed it if last new = 1
done = np.append(done, 0)
V_plus = np.append(V, v_s_)
T = len(R)
adv = gaelam = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-done[t+1]
delta = R[t] + gamma * V_plus[t+1] * nonterminal - V_plus[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
#print("adv=", adv.shape)
#print("V=", V.shape)
#print("V_plus=", V_plus.shape)
tdlamret = np.vstack(adv) + V
#print("tdlamret=", tdlamret.shape)
return tdlamret, adv # tdlamret is critic_target or Qs
class Worker(object):
def __init__(self, wid, GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.g_ppo = GLOBAL_PPO
self.ppo = PPO(wid, sess, self.env, GLOBAL_PPO)
self.running_stats_r = RunningStats()
self.sess = sess
self.GLOBAL_EP = GLOBAL_EP
self.GLOBAL_RUNNING_R = GLOBAL_RUNNING_R
def work(self):
T = 0
t = 0
SESS = self.sess
GLOBAL_EP = self.GLOBAL_EP
GLOBAL_RUNNING_R = self.GLOBAL_RUNNING_R
while SESS.run(GLOBAL_EP) < EP_MAX:
s = self.env.reset()
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
ep_r = 0
for t in range(EP_LEN):
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_done.append(done)
v = self.ppo.get_val(s)
buffer_V.append(v)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
self.running_stats_r.update(np.array(buffer_r))
buffer_r = np.clip( (np.array(buffer_r) - self.running_stats_r.mean) / self.running_stats_r.std, -stats_CLIP, stats_CLIP )
v_s_ = self.ppo.get_val(s_)
tdlamret, adv = self.ppo.add_vtarg_and_adv(np.vstack(buffer_r), np.vstack(buffer_done), np.vstack(buffer_V), v_s_, GAMMA, lamda)
bs, ba, br, b_adv = np.vstack(buffer_s), np.vstack(buffer_a), tdlamret, np.vstack(adv)
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
self.ppo.update(bs, ba, br, b_adv)
SESS.run(GLOBAL_EP.assign_add(1.0))
qe = GLOBAL_RUNNING_R.enqueue(ep_r)
SESS.run(qe)
GAME = 'Pendulum-v0'
env = gym.make(GAME).unwrapped
net_scope = 'global'
EP_MAX = 500 #500 # max number of episodes
EP_LEN = 200 # episode length
GAMMA = 0.9
lamda = 0.95 #0.95
hidden = 50 #100
A_LR = 0.0001 # actor's learning rate
C_LR = 0.0002 # critic's learning rate
BATCH = 32 # minibatch size
A_EPOCH = 10 # number of epoch
C_EPOCH = 10 # number of epoch
S_DIM, A_DIM = 3, 1 # state, action dimension
stats_CLIP = 10 # upper bound of RunningStats
epsilon=0.2
cluster = tf.train.ClusterSpec({
"worker": ["localhost:3331",
"localhost:3332",
"localhost:3333",
"localhost:3334"
],
"ps": ["localhost:3330"]
})
def parameter_server():
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="ps",
task_index=0)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
sess.run(tf.global_variables_initializer())
print("Parameter server: variables initialized")
while True:
time.sleep(1.0)
if sess.run(GLOBAL_RUNNING_R.size()) >= EP_MAX: # GLOBAL_EP starts from 0, hence +1 to max_global_episodes
time.sleep(10.0)
GLOBAL_RUNNING_R_list = []
ep_r_prev = 0.0
for i in range(sess.run(GLOBAL_RUNNING_R.size())):
ep_r = sess.run(GLOBAL_RUNNING_R.dequeue())
if i==0:
GLOBAL_RUNNING_R_list.append(ep_r) # for display
else:
GLOBAL_RUNNING_R_list.append(GLOBAL_RUNNING_R_list[-1]*0.9 + ep_r*0.1) # for display
break
# display
plt.plot(np.arange(len(GLOBAL_RUNNING_R_list)), GLOBAL_RUNNING_R_list)
plt.xlabel('episode')
plt.ylabel('reward')
plt.show()
#print("Parameter server: blocking...")
#server.join() # currently blocks forever
print("Parameter server: ended...")
def worker(worker_n):
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
"""
with tf.device(tf.train.replica_device_setter(
worker_device='/job:worker/task:' + str(worker_n),
cluster=cluster)):
"""
print("Worker %d: waiting for cluster connection..." % worker_n)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % worker_n)
#while sess.run(tf.report_uninitialized_variables()):
while (sess.run(tf.report_uninitialized_variables())).any(): # ********** .any() .all() **********
print("Worker %d: waiting for variable initialization..." % worker_n)
time.sleep(1.0)
print("Worker %d: variables initialized" % worker_n)
w = Worker(str(worker_n), GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess)
print("Worker %d: created" % worker_n)
sess.run(tf.global_variables_initializer()) # got to initialize after Worker creation
w.work()
print("Worker %d: w.work()" % worker_n)
#print("Worker %d: blocking..." % worker_n)
server.join() # currently blocks forever
print("Worker %d: ended..." % worker_n)
start_time = time.time()
ps_proc = Process(target=parameter_server, daemon=True)
w1_proc = Process(target=worker, args=(0, ), daemon=True)
w2_proc = Process(target=worker, args=(1, ), daemon=True)
w3_proc = Process(target=worker, args=(2, ), daemon=True)
w4_proc = Process(target=worker, args=(3, ), daemon=True)
ps_proc.start()
w1_proc.start()
w2_proc.start()
w3_proc.start()
w4_proc.start()
# if not join, parent will terminate before children
# & children will terminate as well cuz children are daemon
ps_proc.join()
#w1_proc.join()
#w2_proc.join()
#w3_proc.join()
#w4_proc.join()
for proc in [w1_proc,
w2_proc,
w3_proc,
w4_proc,
ps_proc]:
proc.terminate() # only way to kill server is to kill it's process
print('All done.')
print("--- %s seconds ---" % (time.time() - start_time))
```
| github_jupyter |
```
from IPython.display import Image
```
# CNTK 204: Sequence to Sequence Networks with Text Data
## Introduction and Background
This hands-on tutorial will take you through both the basics of sequence-to-sequence networks, and how to implement them in the Microsoft Cognitive Toolkit. In particular, we will implement a sequence-to-sequence model with attention to perform grapheme to phoneme translation. We will start with some basic theory and then explain the data in more detail, and how you can download it.
Andrej Karpathy has a [nice visualization](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) of five common paradigms of neural network architectures:
```
# Figure 1
Image(url="http://cntk.ai/jup/paradigms.jpg", width=750)
```
In this tutorial, we are going to be talking about the fourth paradigm: many-to-many where the length of the output does not necessarily equal the length of the input, also known as sequence-to-sequence networks. The input is a sequence with a dynamic length, and the output is also a sequence with some dynamic length. It is the logical extension of the many-to-one paradigm in that previously we were predicting some category (which could easily be one of `V` words where `V` is an entire vocabulary) and now we want to predict a whole sequence of those categories.
The applications of sequence-to-sequence networks are nearly limitless. It is a natural fit for machine translation (e.g. English input sequences, French output sequences); automatic text summarization (e.g. full document input sequence, summary output sequence); word to pronunciation models (e.g. character [grapheme] input sequence, pronunciation [phoneme] output sequence); and even parse tree generation (e.g. regular text input, flat parse tree output).
## Basic theory
A sequence-to-sequence model consists of two main pieces: (1) an encoder; and (2) a decoder. Both the encoder and the decoder are recurrent neural network (RNN) layers that can be implemented using a vanilla RNN, an LSTM, or GRU Blocks (here we will use LSTM). In the basic sequence-to-sequence model, the encoder processes the input sequence into a fixed representation that is fed into the decoder as a context. The decoder then uses some mechanism (discussed below) to decode the processed information into an output sequence. The decoder is a language model that is augmented with some "strong context" by the encoder, and so each symbol that it generates is fed back into the decoder for additional context (like a traditional LM). For an English to German translation task, the most basic setup might look something like this:
```
# Figure 2
Image(url="http://cntk.ai/jup/s2s.png", width=700)
```
The basic sequence-to-sequence network passes the information from the encoder to the decoder by initializing the decoder RNN with the final hidden state of the encoder as its initial hidden state. The input is then a "sequence start" tag (`<s>` in the diagram above) which primes the decoder to start generating an output sequence. Then, whatever word (or note or image, etc.) it generates at that step is fed in as the input for the next step. The decoder keeps generating outputs until it hits the special "end sequence" tag (`</s>` above).
A more complex and powerful version of the basic sequence-to-sequence network uses an attention model. While the above setup works well, it can start to break down when the input sequences get long. At each step, the hidden state `h` is getting updated with the most recent information, and therefore `h` might be getting "diluted" in information as it processes each token. Further, even with a relatively short sequence, the last token will always get the last say and therefore the thought vector will be somewhat biased/weighted towards that last word. To deal with this problem, we use an "attention" mechanism that allows the decoder to look not only at all of the hidden states from the input, but it also learns which hidden states, for each step in decoding, to put the most weight on. In this tutorial we will implement a sequence-to-sequence network that can be run either with or without attention enabled.
```
# Figure 3
Image(url="https://cntk.ai/jup/cntk204_s2s2.png", width=700)
```
The `Attention` layer above takes the current value of the hidden state in the Decoder, all of the hidden states in the Encoder, and calculates an augmented version of the hidden state to use. More specifically, the contribution from the Encoder's hidden states will represent a weighted sum of all of its hidden states where the highest weight corresponds both to the biggest contribution to the augmented hidden state and to the hidden state that will be most important for the Decoder to consider when generating the next word.
## Problem: Grapheme-to-Phoneme Conversion
The [grapheme](https://en.wikipedia.org/wiki/Grapheme) to [phoneme](https://en.wikipedia.org/wiki/Phoneme) problem is a translation task that takes the letters of a word as the input sequence (the graphemes are the smallest units of a writing system) and outputs the corresponding phonemes; that is, the units of sound that make up a language. In other words, the system aims to generate an unambigious representation of how to pronounce a given input word.
### Example
The graphemes or the letters are translated into corresponding phonemes:
> **Grapheme** : **|** T **|** A **|** N **|** G **|** E **|** R **|**
**Phonemes** : **|** ~T **|** ~AE **|** ~NG **|** ~ER **|**
## Task and Model Structure
As discussed above, the task we are interested in solving is creating a model that takes some sequence as an input, and generates an output sequence based on the contents of the input. The model's job is to learn the mapping from the input sequence to the output sequence that it will generate. The job of the encoder is to come up with a good representation of the input that the decoder can use to generate a good output. For both the encoder and the decoder, the LSTM does a good job at this.
Note that the LSTM is simply one of a whole set of different types of Blocks that can be used to implement an RNN. This is the code that is run for each step in the recurrence. In the Layers library, there are three built-in recurrent Blocks: the (vanilla) `RNN`, the `GRU`, and the `LSTM`. Each processes its input slightly differently and each has its own benefits and drawbacks for different types of tasks and networks. To get these blocks to run for each of the elements recurrently in a network, we create a `Recurrence` over them. This "unrolls" the network to the number of steps that are in the given input for the RNN layer.
## Importing CNTK and other useful libraries
CNTK is a Python module that contains several submodules like `io`, `learner`, `graph`, etc. We make extensive use of numpy as well.
```
from __future__ import print_function
import numpy as np
import os
import cntk as C
```
In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).
```
# Define a test environment
def isTest():
return ('TEST_DEVICE' in os.environ)
# Select the right target device when this notebook is being tested:
if 'TEST_DEVICE' in os.environ:
if os.environ['TEST_DEVICE'] == 'cpu':
C.device.try_set_default_device(C.device.cpu())
else:
C.device.try_set_default_device(C.device.gpu(0))
```
## Downloading the data
In this tutorial we will use a lightly pre-processed version of the CMUDict (version 0.7b) dataset from http://www.speech.cs.cmu.edu/cgi-bin/cmudict. The CMUDict data refers to the Carnegie Mellon University Pronouncing Dictionary and is an open-source machine-readable pronunciation dictionary for North American English. The data is in the CNTKTextFormatReader format. Here is an example sequence pair from the data, where the input sequence (S0) is in the left column, and the output sequence (S1) is on the right:
```
0 |S0 3:1 |# <s> |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
```
The code below will download the required files (training, testing, the single sequence above for visual validation, and a small vocab file) and put them in a local folder (the training file is ~34 MB, testing is ~4MB, and the validation file and vocab file are both less than 1KB).
```
import requests
def download(url, filename):
""" utility function to download a file """
response = requests.get(url, stream=True)
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
MODEL_DIR = "."
DATA_DIR = os.path.join('..', 'Examples', 'SequenceToSequence', 'CMUDict', 'Data')
# If above directory does not exist, just use current.
if not os.path.exists(DATA_DIR):
DATA_DIR = '.'
dataPath = {
'validation': 'tiny.ctf',
'training': 'cmudict-0.7b.train-dev-20-21.ctf',
'testing': 'cmudict-0.7b.test.ctf',
'vocab_file': 'cmudict-0.7b.mapping',
}
for k in sorted(dataPath.keys()):
path = os.path.join(DATA_DIR, dataPath[k])
if os.path.exists(path):
print("Reusing locally cached:", path)
else:
print("Starting download:", dataPath[k])
url = "https://github.com/Microsoft/CNTK/blob/v2.0/Examples/SequenceToSequence/CMUDict/Data/%s?raw=true"%dataPath[k]
download(url, path)
print("Download completed")
dataPath[k] = path
```
## Reader
To efficiently collect our data, randomize it for training, and pass it to the network, we use the CNTKTextFormat reader. We will create a small function that will be called when training (or testing) that defines the names of the streams in our data, and how they are referred to in the raw training data.
```
# Helper function to load the model vocabulary file
def get_vocab(path):
# get the vocab for printing output sequences in plaintext
vocab = [w.strip() for w in open(path).readlines()]
i2w = { i:w for i,w in enumerate(vocab) }
w2i = { w:i for i,w in enumerate(vocab) }
return (vocab, i2w, w2i)
# Read vocabulary data and generate their corresponding indices
vocab, i2w, w2i = get_vocab(dataPath['vocab_file'])
def create_reader(path, is_training):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features = StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True)
)), randomize = is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)
input_vocab_dim = 69
label_vocab_dim = 69
# Print vocab and the correspoding mapping to the phonemes
print("Vocabulary size is", len(vocab))
print("First 15 letters are:")
print(vocab[:15])
print()
print("Print dictionary with the vocabulary mapping:")
print(i2w)
```
We will use the above to create a reader for our training data. Let's create it now:
```
def create_reader(path, is_training):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features = C.io.StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True),
labels = C.io.StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True)
)), randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
# Train data reader
train_reader = create_reader(dataPath['training'], True)
# Validation data reader
valid_reader = create_reader(dataPath['validation'], True)
```
### Now let's set our model hyperparameters...
We have a number of settings that control the complexity of our network, the shapes of our inputs, and other options such as whether we will use an embedding (and what size to use), and whether or not we will employ attention. We set them now as they will be made use of when we build the network graph in the following sections.
```
hidden_dim = 512
num_layers = 2
attention_dim = 128
attention_span = 20
attention_axis = -3
use_attention = True
use_embedding = True
embedding_dim = 200
vocab = ([w.strip() for w in open(dataPath['vocab_file']).readlines()]) # all lines of vocab_file in a list
length_increase = 1.5
```
We will set two more parameters now: the symbols used to denote the start of a sequence (sometimes called 'BOS') and the end of a sequence (sometimes called 'EOS'). In this case, our sequence-start symbol is the tag $<s>$ and our sequence-end symbol is the end-tag $</s>$.
Sequence start and end tags are important in sequence-to-sequence networks for two reasons. The sequence start tag is a "primer" for the decoder; in other words, because we are generating an output sequence and RNNs require some input, the sequence start token "primes" the decoder to cause it to emit its first generated token. The sequence end token is important because the decoder will learn to output this token when the sequence is finished. Otherwise the network wouldn't know how long of a sequence to generate. For the code below, we setup the sequence start symbol as a `Constant` so that it can later be passed to the Decoder LSTM as its `initial_state`. Further, we get the sequence end symbol's index so that the Decoder can use it to know when to stop generating tokens.
```
sentence_start =C.Constant(np.array([w=='<s>' for w in vocab], dtype=np.float32))
sentence_end_index = vocab.index('</s>')
```
## Step 1: setup the input to the network
### Dynamic axes in CNTK (Key concept)
One of the important concepts in understanding CNTK is the idea of two types of axes:
- **static axes**, which are the traditional axes of a variable's shape, and
- **dynamic axes**, which have dimensions that are unknown until the variable is bound to real data at computation time.
The dynamic axes are particularly important in the world of recurrent neural networks. Instead of having to decide a maximum sequence length ahead of time, padding your sequences to that size, and wasting computation, CNTK's dynamic axes allow for variable sequence lengths that are automatically packed in minibatches to be as efficient as possible.
When setting up sequences, there are *two dynamic axes* that are important to consider. The first is the *batch axis*, which is the axis along which multiple sequences are batched. The second is the dynamic axis particular to that sequence. The latter is specific to a particular input because of variable sequence lengths in your data. For example, in sequence to sequence networks, we have two sequences: the **input sequence**, and the **output (or 'label') sequence**. One of the things that makes this type of network so powerful is that the length of the input sequence and the output sequence do not have to correspond to each other. Therefore, both the input sequence and the output sequence require their own unique dynamic axis.
We first create the `inputAxis` for the input sequence and the `labelAxis` for the output sequence. We then define the inputs to the model by creating sequences over these two unique dynamic axes. Note that `InputSequence` and `LabelSequence` are *type declarations*. This means that the `InputSequence` is a type that consists of a sequence over the `inputAxis` axis.
```
# Source and target inputs to the model
inputAxis = C.Axis('inputAxis')
labelAxis = C.Axis('labelAxis')
InputSequence = C.layers.SequenceOver[inputAxis]
LabelSequence = C.layers.SequenceOver[labelAxis]
```
## Step 2: define the network
As discussed before, the sequence-to-sequence network is, at its most basic, an RNN (LSTM) encoder followed by an RNN (LSTM) decoder, and a dense output layer. We will implement both the Encoder and the Decoder using the CNTK Layers library. Both of these will be created as CNTK Functions. Our `create_model()` Python function creates both the `encode` and `decode` CNTK Functions. The `decode` function directly makes use of the `encode` function and the return value of `create_model()` is the CNTK Function `decode` itself.
We start by passing the input through an embedding (learned as part of the training process). So that this function can be used in the `Sequential` block of the Encoder and the Decoder whether we want an embedding or not, we will use the `identity` function if the `use_embedding` parameter is `False`. We then declare the Encoder layers as follows:
First, we pass the input through our `embed` function and then we stabilize it. This adds an additional scalar parameter to the learning that can help our network converge more quickly during training. Then, for each of the number of LSTM layers that we want in our encoder, except the final one, we set up an LSTM recurrence. The final recurrence will be a `Fold` if we are not using attention because we only pass the final hidden state to the decoder. If we are using attention, however, then we use another normal LSTM `Recurrence` that the Decoder will put its attention over later on.
Below we see a diagram of how the layered version of the sequence-to-sequence network with attention works. As the code shows below, the output of each layer of the Encoder and Decoder is used as the input to the layer just above it. The Attention model focuses on the top layer of the Encoder and informs the first layer of the Decoder.
```
# Figure 4
Image(url="https://cntk.ai/jup/cntk204_s2s3.png", width=900)
```
For the decoder, we first define several sub-layers: the `Stabilizer` for the decoder input, the `Recurrence` blocks for each of the decoder's layers, the `Stabilizer` for the output of the stack of LSTMs, and the final `Dense` output layer. If we are using attention, then we also create an `AttentionModel` function `attention_model` which returns an augmented version of the decoder's hidden state with emphasis placed on the encoder hidden states that should be most used for the given step while generating the next output token.
We then build the CNTK Function `decode`. The decorator `@Function` turns a regular Python function into a proper CNTK Function with the given arguments and return value. The Decoder works differently during training than it does during test time. During training, the history (i.e. input) to the Decoder `Recurrence` consists of the ground-truth labels. This means that while generating $y^{(t=2)}$, for example, the input will be $y^{(t=1)}$. During evaluation, or "test time", however, the input to the Decoder will be the actual output of the model. For a greedy decoder -- which we are implementing here -- that input is therefore the `hardmax` of the final `Dense` layer.
The Decoder Function `decode` takes two arguments: (1) the `input` sequence; and (2) the Decoder `history`. First, it runs the `input` sequence through the Encoder function `encode` that we setup earlier. We then get the `history` and map it to its embedding if necessary. Then the embedded representation is stabilized before running it through the Decoder's `Recurrence`. For each layer of `Recurrence`, we run the embedded `history` (now represented as `r`) through the `Recurrence`'s LSTM. If we are not using attention, we run it through the `Recurrence` with its initial state set to the value of the final hidden state of the encoder (note that since we run the Encoder backwards when not using attention that the "final" hidden state is actually the first hidden state in chronological time). If we are using attention, however, then we calculate the auxiliary input `h_att` using our `attention_model` function and we splice that onto the input `x`. This augmented `x` is then used as input for the Decoder's `Recurrence`.
Finally, we stabilize the output of the Decoder, put it through the final `Dense` layer `proj_out`, and label the output using the `Label` layer which allows for simple access to that layer later on.
```
# create the s2s model
def create_model(): # :: (history*, input*) -> logP(w)*
# Embedding: (input*) --> embedded_input*
embed = C.layers.Embedding(embedding_dim, name='embed') if use_embedding else identity
# Encoder: (input*) --> (h0, c0)
# Create multiple layers of LSTMs by passing the output of the i-th layer
# to the (i+1)th layer as its input
# Note: We go_backwards for the plain model, but forward for the attention model.
with C.layers.default_options(enable_self_stabilization=True, go_backwards=not use_attention):
LastRecurrence = C.layers.Fold if not use_attention else C.layers.Recurrence
encode = C.layers.Sequential([
embed,
C.layers.Stabilizer(),
C.layers.For(range(num_layers-1), lambda:
C.layers.Recurrence(C.layers.LSTM(hidden_dim))),
LastRecurrence(C.layers.LSTM(hidden_dim), return_full_state=True),
(C.layers.Label('encoded_h'), C.layers.Label('encoded_c')),
])
# Decoder: (history*, input*) --> unnormalized_word_logp*
# where history is one of these, delayed by 1 step and <s> prepended:
# - training: labels
# - testing: its own output hardmax(z) (greedy decoder)
with C.layers.default_options(enable_self_stabilization=True):
# sub-layers
stab_in = C.layers.Stabilizer()
rec_blocks = [C.layers.LSTM(hidden_dim) for i in range(num_layers)]
stab_out = C.layers.Stabilizer()
proj_out = C.layers.Dense(label_vocab_dim, name='out_proj')
# attention model
if use_attention: # maps a decoder hidden state and all the encoder states into an augmented state
attention_model = C.layers.AttentionModel(attention_dim,
attention_span,
attention_axis,
name='attention_model') # :: (h_enc*, h_dec) -> (h_dec augmented)
# layer function
@C.Function
def decode(history, input):
encoded_input = encode(input)
r = history
r = embed(r)
r = stab_in(r)
for i in range(num_layers):
rec_block = rec_blocks[i] # LSTM(hidden_dim) # :: (dh, dc, x) -> (h, c)
if use_attention:
if i == 0:
@C.Function
def lstm_with_attention(dh, dc, x):
h_att = attention_model(encoded_input.outputs[0], dh)
x = C.splice(x, h_att)
return rec_block(dh, dc, x)
r = C.layers.Recurrence(lstm_with_attention)(r)
else:
r = C.layers.Recurrence(rec_block)(r)
else:
# unlike Recurrence(), the RecurrenceFrom() layer takes the initial hidden state as a data input
r = C.layers.RecurrenceFrom(rec_block)(*(encoded_input.outputs + (r,))) # :: h0, c0, r -> h
r = stab_out(r)
r = proj_out(r)
r = C.layers.Label('out_proj_out')(r)
return r
return decode
```
The network that we defined above can be thought of as an "abstract" model that must first be wrapped to be used. In this case, we will use it first to create a "training" version of the model (where the history for the Decoder will be the ground-truth labels), and then we will use it to create a greedy "decoding" version of the model where the history for the Decoder will be the `hardmax` output of the network. Let's set up these model wrappers next.
## Training
Before starting training, we will define the training wrapper, the greedy decoding wrapper, and the criterion function used for training the model. Let's start with the training wrapper.
```
def create_model_train(s2smodel):
# model used in training (history is known from labels)
# note: the labels must NOT contain the initial <s>
@C.Function
def model_train(input, labels): # (input*, labels*) --> (word_logp*)
# The input to the decoder always starts with the special label sequence start token.
# Then, use the previous value of the label sequence (for training) or the output (for execution).
past_labels = C.layers.Delay(initial_state=sentence_start)(labels)
return s2smodel(past_labels, input)
return model_train
```
Above, we create the CNTK Function `model_train` again using the `@Function` decorator. This function takes the input sequence `input` and the output sequence `labels` as arguments. The `past_labels` are setup as the `history` for the model we created earlier by using the `Delay` layer. This will return the previous time-step value for the input `labels` with an `initial_state` of `sentence_start`. Therefore, if we give the labels `['a', 'b', 'c']`, then `past_labels` will contain `['<s>', 'a', 'b', 'c']` and then return our abstract base model called with the history `past_labels` and the input `input`.
Let's go ahead and create the greedy decoding model wrapper now as well:
```
def create_model_greedy(s2smodel):
# model used in (greedy) decoding (history is decoder's own output)
@C.Function
@C.layers.Signature(InputSequence[C.layers.Tensor[input_vocab_dim]])
def model_greedy(input): # (input*) --> (word_sequence*)
# Decoding is an unfold() operation starting from sentence_start.
# We must transform s2smodel (history*, input* -> word_logp*) into a generator (history* -> output*)
# which holds 'input' in its closure.
unfold = C.layers.UnfoldFrom(lambda history: s2smodel(history, input) >> C.hardmax,
# stop once sentence_end_index was max-scoring output
until_predicate=lambda w: w[...,sentence_end_index],
length_increase=length_increase)
return unfold(initial_state=sentence_start, dynamic_axes_like=input)
return model_greedy
```
Above we create a new CNTK Function `model_greedy` which this time only takes a single argument. This is of course because when using the model at test time we don't have any labels -- it is the model's job to create them for us! In this case, we use the `UnfoldFrom` layer which runs the base model with the current `history` and funnels it into the `hardmax`. The `hardmax`'s output then becomes part of the `history` and we keep unfolding the `Recurrence` until the `sentence_end_index` has been reached. The maximum length of the output sequence (the maximum unfolding of the Decoder) is determined by a multiplier passed to `length_increase`. In this case we set `length_increase` to `1.5` above so the maximum length of each output sequence is 1.5x its input.
The last thing we will do before setting up the training loop is define the function that will create the criterion function for our model.
```
def create_criterion_function(model):
@C.Function
@C.layers.Signature(input=InputSequence[C.layers.Tensor[input_vocab_dim]],
labels=LabelSequence[C.layers.Tensor[label_vocab_dim]])
def criterion(input, labels):
# criterion function must drop the <s> from the labels
postprocessed_labels = C.sequence.slice(labels, 1, 0) # <s> A B C </s> --> A B C </s>
z = model(input, postprocessed_labels)
ce = C.cross_entropy_with_softmax(z, postprocessed_labels)
errs = C.classification_error(z, postprocessed_labels)
return (ce, errs)
return criterion
```
Above, we create the criterion function which drops the sequence-start symbol from our labels for us, runs the model with the given `input` and `labels`, and uses the output to compare to our ground truth. We use the loss function `cross_entropy_with_softmax` and get the `classification_error` which gives us the percent-error per-word of our generation accuracy. The CNTK Function `criterion` returns these values as a tuple and the Python function `create_criterion_function(model)` returns that CNTK Function.
Now let's move on to creating the training loop...
```
def train(train_reader, valid_reader, vocab, i2w, s2smodel, max_epochs, epoch_size):
# create the training wrapper for the s2smodel, as well as the criterion function
model_train = create_model_train(s2smodel)
criterion = create_criterion_function(model_train)
# also wire in a greedy decoder so that we can properly log progress on a validation example
# This is not used for the actual training process.
model_greedy = create_model_greedy(s2smodel)
# Instantiate the trainer object to drive the model training
minibatch_size = 72
lr = 0.001 if use_attention else 0.005
learner = C.fsadagrad(model_train.parameters,
lr = C.learning_rate_schedule([lr]*2+[lr/2]*3+[lr/4], C.UnitType.sample, epoch_size),
momentum = C.momentum_as_time_constant_schedule(1100),
gradient_clipping_threshold_per_sample=2.3,
gradient_clipping_with_truncation=True)
trainer = C.Trainer(None, criterion, learner)
# Get minibatches of sequences to train with and perform model training
total_samples = 0
mbs = 0
eval_freq = 100
# print out some useful training information
C.logging.log_number_of_parameters(model_train) ; print()
progress_printer = C.logging.ProgressPrinter(freq=30, tag='Training')
# a hack to allow us to print sparse vectors
sparse_to_dense = create_sparse_to_dense(input_vocab_dim)
for epoch in range(max_epochs):
while total_samples < (epoch+1) * epoch_size:
# get next minibatch of training data
mb_train = train_reader.next_minibatch(minibatch_size)
# do the training
trainer.train_minibatch({criterion.arguments[0]: mb_train[train_reader.streams.features],
criterion.arguments[1]: mb_train[train_reader.streams.labels]})
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
# every N MBs evaluate on a test sequence to visually show how we're doing
if mbs % eval_freq == 0:
mb_valid = valid_reader.next_minibatch(1)
# run an eval on the decoder output model (i.e. don't use the groundtruth)
e = model_greedy(mb_valid[valid_reader.streams.features])
print(format_sequences(sparse_to_dense(mb_valid[valid_reader.streams.features]), i2w))
print("->")
print(format_sequences(e, i2w))
# visualizing attention window
if use_attention:
debug_attention(model_greedy, mb_valid[valid_reader.streams.features])
total_samples += mb_train[train_reader.streams.labels].num_samples
mbs += 1
# log a summary of the stats for the epoch
progress_printer.epoch_summary(with_metric=True)
# done: save the final model
model_path = "model_%d.cmf" % epoch
print("Saving final model to '%s'" % model_path)
s2smodel.save(model_path)
print("%d epochs complete." % max_epochs)
```
In the above function, we created one version of the model for training (plus its associated criterion function) and one version of the model for evaluation. Normally this latter version would not be required but here we have done it so that we can periodically sample from the non-training model to visually understand how our model is converging by seeing the kinds of sequences that it generates as the training progresses.
We then setup some standard variables required for the training loop. We set the `minibatch_size` (which refers to the total number of elements -- NOT sequences -- in a minibatch), the initial learning rate `lr`, we initialize a `learner` using the `adam_sgd` algorithm and a `learning_rate_schedule` that slowly reduces our learning rate. We make use of gradient clipping to help control exploding gradients, and we finally create our `Trainer` object `trainer`.
We make use of CNTK's `ProgressPrinter` class which takes care of calculating average metrics per minibatch/epoch and we set it to update every 30 minibatches. And finally, before starting the training loop, we initialize a function called `sparse_to_dense` which we use to properly print out the input sequence data that we use for validation because it is sparse. That function is defined just below:
```
# dummy for printing the input sequence below. Currently needed because input is sparse.
def create_sparse_to_dense(input_vocab_dim):
I = C.Constant(np.eye(input_vocab_dim))
@C.Function
@C.layers.Signature(InputSequence[C.layers.SparseTensor[input_vocab_dim]])
def no_op(input):
return C.times(input, I)
return no_op
```
Inside the training loop, we proceed much like many other CNTK networks. We request the next bunch of minibatch data, we perform our training, and we print our progress to the screen using the `progress_printer`. Where we diverge from the norm, however, is where we run an evaluation using our `model_greedy` version of the network and run a single sequence, "ABADI" through to see what the network is currently predicting.
Another difference in the training loop is the optional attention window visualization. Calling the function `debug_attention` shows the weight that the Decoder put on each of the Encoder's hidden states for each of the output tokens that it generated. This function, along with the `format_sequences` function required to print the input/output sequences to the screen, are given below.
```
# Given a vocab and tensor, print the output
def format_sequences(sequences, i2w):
return [" ".join([i2w[np.argmax(w)] for w in s]) for s in sequences]
# to help debug the attention window
def debug_attention(model, input):
q = C.combine([model, model.attention_model.attention_weights])
#words, p = q(input) # Python 3
words_p = q(input)
words = words_p[0]
p = words_p[1]
seq_len = words[0].shape[attention_axis-1]
span = 7 #attention_span #7 # test sentence is 7 tokens long
p_sq = np.squeeze(p[0][:seq_len,:span,0,:]) # (batch, len, attention_span, 1, vector_dim)
opts = np.get_printoptions()
np.set_printoptions(precision=5)
print(p_sq)
np.set_printoptions(**opts)
```
Let's try training our network for a small part of an epoch. In particular, we'll run through 25,000 tokens (about 3% of one epoch):
```
model = create_model()
train(train_reader, valid_reader, vocab, i2w, model, max_epochs=1, epoch_size=25000)
```
As we can see above, while the loss has come down quite a ways, the output sequence is still quite a ways off from what we expect. Uncomment the code below to run for a full epoch (notice that we switch the `epoch_size` parameter to the actual size of the training data) and by the end of the first epoch you will already see a very good grapheme-to-phoneme translation model running!
```
# Uncomment the line below to train the model for a full epoch
#train(train_reader, valid_reader, vocab, i2w, model, max_epochs=1, epoch_size=908241)
```
## Testing the network
Now that we've trained a sequence-to-sequence network for graphme-to-phoneme translation, there are two important things we should do with it. First, we should test its accuracy on a held-out test set. Then, we should try it out in an interactive environment so that we can put in our own input sequences and see what the model predicts. Let's start by determining the test string error rate.
At the end of training, we saved the model using the line `s2smodel.save(model_path)`. Therefore, to test it, we will need to first `load` that model and then run some test data through it. Let's `load` the model, then create a reader configured to access our testing data. Note that we pass `False` to the `create_reader` function this time to denote that we are in testing mode so we should only pass over the data a single time.
```
# load the model for epoch 0
model_path = "model_0.cmf"
model = C.Function.load(model_path)
# create a reader pointing at our testing data
test_reader = create_reader(dataPath['testing'], False)
```
Now we need to define our testing function. We pass the `reader`, the learned `s2smodel`, and the vocabulary map `i2w` so that we can directly compare the model's predictions to the test set labels. We loop over the test set, evaluate the model on minibatches of size 512 for efficiency, and keep track of the error rate. Note that below we test *per-sequence*. This means that every single token in a generated sequence must match the tokens in the label for that sequence to be considered as correct.
```
# This decodes the test set and counts the string error rate.
def evaluate_decoding(reader, s2smodel, i2w):
model_decoding = create_model_greedy(s2smodel) # wrap the greedy decoder around the model
progress_printer = C.logging.ProgressPrinter(tag='Evaluation')
sparse_to_dense = create_sparse_to_dense(input_vocab_dim)
minibatch_size = 512
num_total = 0
num_wrong = 0
while True:
mb = reader.next_minibatch(minibatch_size)
if not mb: # finish when end of test set reached
break
e = model_decoding(mb[reader.streams.features])
outputs = format_sequences(e, i2w)
labels = format_sequences(sparse_to_dense(mb[reader.streams.labels]), i2w)
# prepend sentence start for comparison
outputs = ["<s> " + output for output in outputs]
num_total += len(outputs)
num_wrong += sum([label != output for output, label in zip(outputs, labels)])
rate = num_wrong / num_total
print("string error rate of {:.1f}% in {} samples".format(100 * rate, num_total))
return rate
```
Now we will evaluate the decoding using the above function. If you use the version of the model we trained above with just a small 50000 sample of the training data, you will get an error rate of 100% because we cannot possibly get every single token correct with such a small amount of training. However, if you uncommented the training line above that trains the network for a full epoch, you should have ended up with a much-improved model that showed approximately the following training statistics:
```
Finished Epoch[1 of 300]: [Training] loss = 0.878420 * 799303, metric = 26.23% * 799303 1755.985s (455.2 samples/s);
```
Now let's evaluate the model's test set performance below.
```
# print the string error rate
evaluate_decoding(test_reader, model, i2w)
```
If you did not run the training for the full first epoch, the output above will be a `1.0` meaning 100% string error rate. If, however, you uncommented the line to perform training for a full epoch, you should get an output of `0.569`. A string error rate of `56.9` is actually not bad for a single pass over the data. Let's now modify the above `evaluate_decoding` function to output the per-phoneme error rate. This means that we are calculating the error at a higher precision and also makes things easier in some sense because with the string error rate we could have every phoneme correct but one in each example and still end up with a 100% error rate. Here is the modified version of that function:
```
# This decodes the test set and counts the string error rate.
def evaluate_decoding(reader, s2smodel, i2w):
model_decoding = create_model_greedy(s2smodel) # wrap the greedy decoder around the model
progress_printer = C.logging.ProgressPrinter(tag='Evaluation')
sparse_to_dense = create_sparse_to_dense(input_vocab_dim)
minibatch_size = 512
num_total = 0
num_wrong = 0
while True:
mb = reader.next_minibatch(minibatch_size)
if not mb: # finish when end of test set reached
break
e = model_decoding(mb[reader.streams.features])
outputs = format_sequences(e, i2w)
labels = format_sequences(sparse_to_dense(mb[reader.streams.labels]), i2w)
# prepend sentence start for comparison
outputs = ["<s> " + output for output in outputs]
for s in range(len(labels)):
for w in range(len(labels[s])):
num_total += 1
if w < len(outputs[s]): # in case the prediction is longer than the label
if outputs[s][w] != labels[s][w]:
num_wrong += 1
rate = num_wrong / num_total
print("{:.1f}".format(100 * rate))
return rate
# print the phoneme error rate
test_reader = create_reader(dataPath['testing'], False)
evaluate_decoding(test_reader, model, i2w)
```
If you're using the model that was trained for one full epoch, then you should get a phoneme error rate of around 10%. Not bad! This means that for each of the 383,294 phonemes in the test set, our model predicted nearly 90% of them correctly (if you used the quickly-trained version of the model then you will get an error rate of around 45%). Now, let's work with an interactive session where we can input our own input sequences and see how the model predicts their pronunciation (i.e. phonemes). Additionally, we will visualize the Decoder's attention for these samples to see which graphemes in the input it deemed to be important for each phoneme that it produces. Note that in the examples below the results will only be good if you use a model that has been trained for at least one epoch.
## Interactive session
Here we will write an interactive function to make it easy to interact with the trained model and try out your own input sequences that do not appear in the test set. Please note that the results will be very poor if you just use the model that was trained for a very short amount of time. The model we used just above that was trained for one epoch does a good job, and if you have the time and patience to train the model for a full 30 epochs, it will perform very nicely.
We will first import some graphics libraries that make the attention visualization possible and then we will define the `translate` function that takes a numpy-based representation of the input and runs our model.
```
# imports required for showing the attention weight heatmap
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def translate(tokens, model_decoding, vocab, i2w, show_attention=False):
vdict = {v:i for i,v in enumerate(vocab)}
try:
w = [vdict["<s>"]] + [vdict[c] for c in tokens] + [vdict["</s>"]]
except:
print('Input contains an unexpected token.')
return []
# convert to one_hot
query = C.Value.one_hot([w], len(vdict))
pred = model_decoding(query)
pred = pred[0] # first sequence (we only have one) -> [len, vocab size]
if use_attention:
pred = pred[:,0,0,:] # attention has extra dimensions
# print out translation and stop at the sequence-end tag
prediction = np.argmax(pred, axis=-1)
translation = [i2w[i] for i in prediction]
# show attention window (requires matplotlib, seaborn, and pandas)
if use_attention and show_attention:
q = C.combine([model_decoding.attention_model.attention_weights])
att_value = q(query)
# get the attention data up to the length of the output (subset of the full window)
att_value = att_value[0][0:len(prediction),0:len(w),0,0] # -> (len, span)
# set up the actual words/letters for the heatmap axis labels
columns = [i2w[ww] for ww in prediction]
index = [i2w[ww] for ww in w]
dframe = pd.DataFrame(data=np.fliplr(att_value.T), columns=columns, index=index)
sns.heatmap(dframe)
plt.show()
return translation
```
The `translate` function above takes a list of letters input by the user as `tokens`, the greedy decoding version of our model `model_decoding`, the vocabulary `vocab`, a map of index to vocab `i2w`, and the `show_attention` option which determines if we will visualize the attention vectors or not.
We convert our input into a `one_hot` representation, run it through the model with `model_decoding(query)` and, since each prediction is actually a probability distribution over the entire vocabulary, we take the `argmax` to get the most probable token for each step.
To visualize the attention window, we use `combine` to turn the `attention_weights` into a CNTK Function that takes the inputs that we expect. This way, when we run the function `q`, the output will be the values of the `attention_weights`. We do some data manipulation to get this data into the format that `sns` expects, and we show the visualization.
Finally, we need to write the user-interaction loop which allows a user to enter multiple inputs.
```
def interactive_session(s2smodel, vocab, i2w, show_attention=False):
model_decoding = create_model_greedy(s2smodel) # wrap the greedy decoder around the model
import sys
print('Enter one or more words to see their phonetic transcription.')
while True:
if isTest(): # Testing a prefilled text for routine testing
line = "psychology"
else:
line = input("> ")
if line.lower() == "quit":
break
# tokenize. Our task is letter to sound.
out_line = []
for word in line.split():
in_tokens = [c.upper() for c in word]
out_tokens = translate(in_tokens, model_decoding, vocab, i2w, show_attention=True)
out_line.extend(out_tokens)
out_line = [" " if tok == '</s>' else tok[1:] for tok in out_line]
print("=", " ".join(out_line))
sys.stdout.flush()
if isTest(): #If test environment we will test the translation only once
break
```
The above function simply creates a greedy decoder around our model and then continually asks the user for an input which we pass to our `translate` function. Visualizations of the attention will continue being appended to the notebook until you exit the loop by typing `quit`. Please uncomment the following line to try out the interaction session.
```
interactive_session(model, vocab, i2w, show_attention=True)
```
Notice how the attention weights show how important different parts of the input are for generating different tokens in the output. For tasks like machine translation, where the order of one-to-one words often changes due to grammatical differences between languages, this becomes very interesting as we see the attention window move further away from the diagonal that is mostly displayed in grapheme-to-phoneme translations.
## What's next
With the above model, you have the basics for training a powerful sequence-to-sequence model with attention in a number of distinct domains. The only major changes required are preparing a dataset with pairs input and output sequences and in general the rest of the building blocks will remain the same. Good luck, and have fun!
| github_jupyter |
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-mnist-tutorial/keras_02_mnist_dense.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Parameters
```
BATCH_SIZE = 128
EPOCHS = 10
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
```
### Imports
```
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import IPython.display as display
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
tf.enable_eager_execution()
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
"""
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
"""
# Matplotlib config
plt.ioff()
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=1)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0', figsize=(16,9))
# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.apply(tf.data.experimental.unbatch()).batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, validation_labels in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
break
for training_digits, training_labels in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
break
else:
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
fig = plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
plt.grid(b=None)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
display.display(fig)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
def plot_learning_rate(lr_func, epochs):
xx = np.arange(epochs+1, dtype=np.float)
y = [lr_decay(x) for x in xx]
fig, ax = plt.subplots(figsize=(9, 6))
ax.set_xlabel('epochs')
ax.set_title('Learning rate\ndecays from {:0.3g} to {:0.3g}'.format(y[0], y[-2]))
ax.minorticks_on()
ax.grid(True, which='major', axis='both', linestyle='-', linewidth=1)
ax.grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
ax.step(xx,y, linewidth=3, where='post')
display.display(fig)
class PlotTraining(tf.keras.callbacks.Callback):
def __init__(self, sample_rate=1, zoom=1):
self.sample_rate = sample_rate
self.step = 0
self.zoom = zoom
self.steps_per_epoch = 60000//BATCH_SIZE
def on_train_begin(self, logs={}):
self.batch_history = {}
self.batch_step = []
self.epoch_history = {}
self.epoch_step = []
self.fig, self.axes = plt.subplots(1, 2, figsize=(16, 7))
plt.ioff()
def on_batch_end(self, batch, logs={}):
if (batch % self.sample_rate) == 0:
self.batch_step.append(self.step)
for k,v in logs.items():
# do not log "batch" and "size" metrics that do not change
# do not log training accuracy "acc"
if k=='batch' or k=='size':# or k=='acc':
continue
self.batch_history.setdefault(k, []).append(v)
self.step += 1
def on_epoch_end(self, epoch, logs={}):
plt.close(self.fig)
self.axes[0].cla()
self.axes[1].cla()
self.axes[0].set_ylim(0, 1.2/self.zoom)
self.axes[1].set_ylim(1-1/self.zoom/2, 1+0.1/self.zoom/2)
self.epoch_step.append(self.step)
for k,v in logs.items():
# only log validation metrics
if not k.startswith('val_'):
continue
self.epoch_history.setdefault(k, []).append(v)
display.clear_output(wait=True)
for k,v in self.batch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.batch_step) / self.steps_per_epoch, v, label=k)
for k,v in self.epoch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.epoch_step) / self.steps_per_epoch, v, label=k, linewidth=3)
self.axes[0].legend()
self.axes[1].legend()
self.axes[0].set_xlabel('epochs')
self.axes[1].set_xlabel('epochs')
self.axes[0].minorticks_on()
self.axes[0].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[0].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
self.axes[1].minorticks_on()
self.axes[1].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[1].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
display.display(self.fig)
```
### tf.data.Dataset: parse files and prepare training and validation datasets
Please read the [best practices for building](https://www.tensorflow.org/guide/performance/datasets) input pipelines with tf.data.Dataset
```
AUTO = tf.data.experimental.AUTOTUNE
def read_label(tf_bytestring):
label = tf.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(AUTO) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
```
### Let's have a look at the data
```
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
```
### Keras model
If you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: [Tensorflow and deep learning without a PhD](https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/#featured-code-sample)
```
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(28*28,)),
tf.keras.layers.Dense(200, activation='relu'),
tf.keras.layers.Dense(60, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# utility callback that displays training curves
plot_training = PlotTraining(sample_rate=10, zoom=1)
```
### Train and validate the model
```
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
print("Steps per epoch: ", steps_per_epoch)
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[plot_training])
```
### Visualize predictions
```
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
```
## License
---
author: Martin Gorner<br>
twitter: @martin_gorner
---
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---
This is not an official Google product but sample code provided for an educational purpose
| github_jupyter |
# CX 4230, Spring 2016: [22] Input modeling
This notebook includes sample code to accompany the slides from the Monday, February 29 class. It does not contain any exercises.
```
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
%matplotlib inline
X = np.array ([105.84, 28.92, 98.64, 55.64,
128.04, 45.60, 67.80, 105.12,
48.48, 51.84, 173.40, 51.96,
54.12, 68.64, 93.12, 68.88,
84.12, 68.64, 41.52, 127.92,
42.12, 17.88, 33.00])
print (len (X), "observations:")
print (X)
```
For the next code cell, refer to the documentation for Scipy's [`linregress()`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html).
```
from scipy.stats import linregress
T = np.arange (len (X))
slope, intercept, rvalue, pvalue, stderr = linregress (T, X)
print ("Slope:", slope)
print ("Intercept:", intercept)
print ("p-value:", pvalue)
```
For the next code cell, refer to the documentation for Numpy's [`pad()`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html#numpy.pad) function.
```
# Running means (fixed w)
w = 2 # window size
n, r = len (X), len (X) % w
n_w = (n-r) / w
X_w = np.reshape (X if r == 0 else X[:-r], (n_w, w))
np.pad (X_w, ((0, 0), (0, 1)), 'mean')
def calc_windowed_mean (X, w):
n, r = len (X), len (X) % w
n_w = (n - r) / w
if r == 0:
X_w = np.reshape (X, (n_w, w))
else:
X_w = np.reshape (X[:-r], (n_w, w))
# Add column of mean values
X_w = np.pad (X_w, ((0, 0), (0, 1)), 'mean')
T_w = np.arange (0, n-r, w) + w/2
return X_w, T_w
# Demo
calc_windowed_mean (X, 2)
fig = plt.figure (figsize=(18, 6))
ax = fig.add_subplot (111)
for w in range (1, len (X)+1, 5):
X_w, T_w = calc_windowed_mean (X, w)
xp, yp = T_w, X_w[:, -1:]
ax.plot (xp, yp, 'o:', label=str (w))
ax.legend ()
def sample_mean (X):
return np.mean (X)
sample_mean (X)
def sample_autocovar (X, h):
n = len (X)
n_h = n - abs (h)
X_t = X[:n_h]
X_t_h = X[abs (h):n]
mu = sample_mean (X)
return np.sum ((X_t_h - mu) * (X_t - mu)) / n
# Demo
sample_autocovar (X, 3)
def sample_autocorr (X, h=None):
n = len (X)
if h is not None:
assert abs (h) < n
return sample_autocovar (X, h) / sample_autocovar (X, 0)
else:
C = np.zeros (2*n-1)
H = np.arange (-(n-1), n)
for h in H:
C[n-1+h] = sample_autocorr (X, h)
return C, H
assert False
# Demo
sample_autocorr (X)
def viz_autocorr (X):
C, H = sample_autocorr (X)
fig = plt.figure (figsize=(18, 6))
ax = fig.add_subplot (111)
ax.stem (H, C, '-.')
plt.title ('Lag autocorrelations')
ax.set_xlabel ('Lag')
return fig, ax, C, H
# Demo
_, _, _, _ = viz_autocorr (X)
```
The following code cell shows an alternative way to implement the sample autocorrelation measure using Scipy's built-in [`correlate()`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate.html) function.
```
from scipy.signal import correlate
def sample_autocorr2 (X, h=None):
n = len (X)
mu_X = np.mean (X)
Y = correlate ((X - mu_X)/n, (X - mu_X)/n)
C = Y / Y[int (len (Y)/2)]
H = np.arange (-(n-1), n)
if h is not None:
assert -n < h < n
return C[-(n-1)+h]
else:
return C, H
def viz_autocorr2 (X):
C, H = sample_autocorr2 (X)
fig = plt.figure (figsize=(18, 6))
ax = fig.add_subplot (111)
ax.stem (H, C, '-.')
plt.title ('Lag autocorrelations (Method 2)')
ax.set_xlabel ('Lag')
return fig, ax, C, H
# Demo
_, _, _, _ = viz_autocorr2 (X)
```
| github_jupyter |
# Ex2 - Getting and Knowing your Data
Check out [Chipotle Exercises Video Tutorial](https://www.youtube.com/watch?v=lpuYZ5EUyS8&list=PLgJhDSE2ZLxaY_DigHeiIDC1cD09rXgJv&index=2) to watch a data scientist go through the exercises
This time we are going to pull data directly from the internet.
Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
### Step 1. Import the necessary libraries
```
import pandas as pd
import numpy as np
```
### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
### Step 3. Assign it to a variable called chipo.
```
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo = pd.read_csv(url, sep = '\t')
```
### Step 4. See the first 10 entries
```
chipo.head(10)
```
### Step 5. What is the number of observations in the dataset?
```
# Solution 1
chipo.shape[0] # entries <= 4622 observations
# Solution 2
chipo.info() # entries <= 4622 observations
```
### Step 6. What is the number of columns in the dataset?
```
chipo.shape[1]
```
### Step 7. Print the name of all the columns.
```
chipo.columns
```
### Step 8. How is the dataset indexed?
```
chipo.index
```
### Step 9. Which was the most-ordered item?
```
c = chipo.groupby('item_name')
c = c.sum()
c = c.sort_values(['quantity'], ascending=False)
c.head(1)
```
### Step 10. For the most-ordered item, how many items were ordered?
```
c = chipo.groupby('item_name')
c = c.sum()
c = c.sort_values(['quantity'], ascending=False)
c.head(1)
```
### Step 11. What was the most ordered item in the choice_description column?
```
c = chipo.groupby('choice_description').sum()
c = c.sort_values(['quantity'], ascending=False)
c.head(1)
# Diet Coke 159
```
### Step 12. How many items were orderd in total?
```
total_items_orders = chipo.quantity.sum()
total_items_orders
```
### Step 13. Turn the item price into a float
#### Step 13.a. Check the item price type
```
chipo.item_price.dtype
```
#### Step 13.b. Create a lambda function and change the type of item price
```
dollarizer = lambda x: float(x[1:-1])
chipo.item_price = chipo.item_price.apply(dollarizer)
```
#### Step 13.c. Check the item price type
```
chipo.item_price.dtype
```
### Step 14. How much was the revenue for the period in the dataset?
```
revenue = (chipo['quantity']* chipo['item_price']).sum()
print('Revenue was: $' + str(np.round(revenue,2)))
```
### Step 15. How many orders were made in the period?
```
orders = chipo.order_id.value_counts().count()
orders
```
### Step 16. What is the average revenue amount per order?
```
# Solution 1
chipo['revenue'] = chipo['quantity'] * chipo['item_price']
order_grouped = chipo.groupby(by=['order_id']).sum()
order_grouped.mean()['revenue']
# Solution 2
chipo.groupby(by=['order_id']).sum().mean()['revenue']
```
### Step 17. How many different items are sold?
```
chipo.item_name.value_counts().count()
```
| github_jupyter |
```
import tensorflow as tf
import numpy as np
import random
import matplotlib.pyplot as plt
from zipfile import ZipFile
def unzip(nm):
with ZipFile(nm,"r") as zip:
zip.extractall()
unzip("archive.zip")
random.seed(123)
np.random.seed(123)
tf.random.set_seed(123)
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"TB_Chest_Radiography_Database",
validation_split = 0.2,
subset = "training",
seed = 123,
shuffle = True,
image_size = (224,224),
batch_size = 32,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"TB_Chest_Radiography_Database",
validation_split = 0.2,
subset = "validation",
seed = 123,
shuffle = True,
image_size = (224,224),
batch_size = 32,
)
from tensorflow.data.experimental import cardinality
val_batches = cardinality(val_ds)
test_ds = val_ds.take(val_batches//5)
val_ds = val_ds.skip(val_batches//5)
class_names = train_ds.class_names
plt.figure(figsize=(12,12))
for images,labels in train_ds.take(1):
for i in range(4):
ax = plt.subplot(2,2,i+1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
```
### Data Augmentation
```
from tensorflow.keras.layers import RandomZoom, RandomRotation
data_augmentation = tf.keras.Sequential([
RandomZoom(0.2),
RandomRotation(0.1),
])
```
### Model: CNN from Scratch
```
from tensorflow.keras import Input, Model
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
class Tuber():
def model(self,input):
self.x = data_augmentation(input)
self.x = Rescaling(1./255)(self.x)
self.x = Conv2D(64,3,activation="relu",padding="same",strides=(2,2))(self.x)
self.x = MaxPooling2D()(self.x)
self.x = Conv2D(128,3,activation="relu",padding="same",strides=(2,2))(self.x)
self.x = Conv2D(128,3,activation="relu",padding="same",strides=(2,2))(self.x)
self.x = Conv2D(256,3,activation="relu",padding="same",strides=(2,2))(self.x)
self.x = MaxPooling2D()(self.x)
self.x = Flatten()(self.x)
self.x = Dense(128,activation="relu")(self.x)
self.x = Dropout(0.2,seed=123)(self.x)
self.x = Dense(64,activation="relu")(self.x)
self.x = Dropout(0.2,seed=123)(self.x)
self.outputs = Dense(2,activation="sigmoid")(self.x)
self.model = Model(input,self.outputs,name="Tuber")
return self.model
tuber = Tuber()
model = tuber.model(Input(shape=(224,224,3)))
model.summary()
model.compile(RMSprop(),SparseCategoricalCrossentropy(),metrics=["accuracy"])
```
### Train and evaluate the model
```
if __name__=="__main__":
initial_epochs = 50
loss0,accuracy0 = model.evaluate(val_ds)
checkpoint = ModelCheckpoint("tuberculosis.hdf5",save_weights_only=False,monitor="val_accuracy",save_best_only=True)
model.fit(train_ds,epochs=initial_epochs,validation_data=val_ds,callbacks=[checkpoint])
best = load_model("tuberculosis.hdf5")
val_loss,val_accuracy = best.evaluate(val_ds)
test_loss,test_accuracy = best.evaluate(test_ds)
print("\nVal accuracy: {:.2f} %".format(100*val_accuracy))
print("Val loss: {:.2f} %".format(100*val_loss))
print("\nTest accuracy: {:.2f} %".format(100*test_accuracy))
print("Test loss: {:.2f} %".format(100*test_loss))
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from plotnine import *
from mizani.transforms import trans
```
### Guitar Neck ###
*Using a transformed x-axis to visualise guitar chords*
The x-axis is transformed to resemble the narrowing width of frets on a 25.5 inch Strat. To do that
we create custom transformation.
The key parts of *any* transform object are the `transform` and `inverse` functions.
```
class frets_trans(trans):
"""
Frets Transformation
"""
number_of_frets = 23 # Including fret 0
domain = (0, number_of_frets-1)
@staticmethod
def transform(x):
x = np.asarray(x)
return 25.5 - (25.5 / (2 ** (x/12)))
@staticmethod
def inverse(x):
x = np.asarray(x)
return 12 * np.log2(25.5/(25.5-x))
@classmethod
def breaks_(cls, limits):
# Fixed major breaks
return cls.domain
@classmethod
def minor_breaks(cls, major, limits):
# The major breaks as passed to this method are in transformed space.
# The minor breaks are calculated in data space to reveal the
# non-linearity of the scale.
_major = cls.inverse(major)
minor = cls.transform(np.linspace(*_major, cls.number_of_frets))
return minor
```
The above transform is different from most in that, breaks and minor breaks do not change. This is common of very specialized scales. It can also be a key requirement when creating graphics for demontration purposes.
Some chord Data
```
# Notes: the 0 fret is an open strum, all other frets are played half-way between fret bars.
# The strings are 1:low E, 2: A, 3: D, 4: G, 5: B, 6: E
c_chord = pd.DataFrame({
'Fret': [0, 2.5, 1.5, 0, 0.5, 0],
'String': [1, 2, 3, 4, 5, 6]
})
# Sequence based on the number of notes in the chord
c_chord['Sequence'] = list(range(1, 1+len(c_chord['Fret'])))
# Standard markings for a Stratocaster
markings = pd.DataFrame({
'Fret': [2.5, 4.5, 6.5, 8.5, 11.5, 11.5, 14.5, 16.5, 18.5, 20.5],
'String': [3.5, 3.5, 3.5, 3.5, 2, 5, 3.5, 3.5, 3.5, 3.5]
})
```
Visualizing the chord
```
# Look and feel of the graphic
neck_color = '#FFDDCC'
fret_color = '#998888'
string_color = '#AA9944'
neck_theme = theme(
figure_size=(10, 2),
panel_background=element_rect(fill=neck_color),
panel_grid_major_y=element_line(color=string_color, size=2.2),
panel_grid_major_x=element_line(color=fret_color, size=2.2),
panel_grid_minor_x=element_line(color=fret_color, size=1)
)
# The plot
(ggplot(c_chord, aes('Fret', 'String'))
+ geom_path(aes(color='Sequence'), size=3)
+ geom_point(aes(color='Sequence'), fill='#FFFFFF', size=3)
+ geom_point(data=markings, fill='#000000', size=4)
+ scale_x_continuous(trans=frets_trans)
+ scale_y_continuous(breaks=range(0, 7), minor_breaks=[])
+ guides(color=False)
+ neck_theme
)
```
**Credit**: This example was motivated by [Jonathan Vitale](https://github.com/jonvitale) who wanted to create graphics for a guitar scale trainer.
| github_jupyter |
<a href="https://colab.research.google.com/github/pachterlab/GFCP_2021/blob/main/notebooks/vcy_scvelo_comparison.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Figure 1: The user-facing workflows of `velocyto` and `scVelo`
In this notebook, we reanalyze the La Manno et al. forebrain dataset using the default settings in `velocyto` and `scVelo`. The resulting PCA velocity embeddings lead to different conclusions about the relationships between the cells.
# Dependencies
```
%%capture
pip install scvelo==0.2.3 --quiet
%%capture
pip install --upgrade git+https://github.com/meichenfang/velocyto.py.git
%%capture
pip install scanpy
%%capture
pip install umap-learn
!mkdir -p figure
!mkdir -p data
```
Obtain the data.
```
from google.colab import drive
drive.mount('/content/drive')
!cp -r /content/drive/MyDrive/rnavelocity/hgForebrainGlut.loom /content/data
# from urllib.request import urlretrieve
# urlretrieve("http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom", "data/hgForebrainGlut.loom")
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.manifold import TSNE
import umap
import velocyto as vcy
import anndata as ad
import warnings
import scvelo as scv
from vis import *
```
## velocyto
```
vlm = vcy.VelocytoLoom(loom_filepath="data/hgForebrainGlut.loom")
vlm.ca
preprocess(vlm)
```
#### phasePlots
Display a sample phase plot for ELAVL4, with imputation using $k=30$ neighbors (as in `scVelo`).
```
gene_idx_spec=int(np.where(vlm.ra['Gene']=="ELAVL4")[0][0])
getImputed(vlm, knn_k=50)
def plotPhase2(ax, vlm, gene_idx):
'''
Plot phase portrait
Parameters
----------
Returns
-------
'''
y=vlm.Ux[gene_idx,:]
x=vlm.Sx[gene_idx,:]
k=vlm.gammas[gene_idx]
b=vlm.q[gene_idx]
ax.scatter(x, y, c=vlm.colors)
ax.set_xlabel('spliced')
ax.set_ylabel('unspliced')
x_=np.array([np.amin(x), np.amax(x)])
ax.plot(x_, x_*k+b, color='black',linewidth=4,linestyle='dashed')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return
fig, ax = plt.subplots(figsize=(5,5))
plotPhase2(ax, vlm, gene_idx_spec)
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])
plt.savefig('phase.pdf')
```
### Velocity Grid Embedding
This workflow reproduces the [notebook](https://github.com/velocyto-team/velocyto-notebooks/blob/master/python/hgForebrainGlutamatergic.ipynb) used to generate Fig. 4 of "RNA Velocity of Single Cells."
```
vlm = vcy.VelocytoLoom("data/hgForebrainGlut.loom")
labels = vlm.ca["Clusters"]
manual_annotation = {str(i):[i] for i in labels}
annotation_dict = {v:k for k, values in manual_annotation.items() for v in values }
clusters = np.array([annotation_dict[i] for i in labels])
colors20 = np.vstack((plt.cm.tab20b(np.linspace(0., 1, 20))[::2], plt.cm.tab20c(np.linspace(0, 1, 20))[1::2]))
vlm.set_clusters(clusters, cluster_colors_dict={k:colors20[v[0] % 20,:] for k,v in manual_annotation.items()})
# just to find the initial cell size
vlm.normalize("S", size=True, log=False)
vlm.normalize("U", size=True, log=False)
vlm.score_detection_levels(min_expr_counts=30, min_cells_express=20,
min_expr_counts_U=0, min_cells_express_U=0)
vlm.filter_genes(by_detection_levels=True)
vlm.score_cv_vs_mean(2000, plot=True, max_expr_avg=50, winsorize=True, winsor_perc=(1,99.8), svr_gamma=0.01, min_expr_cells=50)
vlm.filter_genes(by_cv_vs_mean=True)
vlm.score_detection_levels(min_expr_counts=0, min_cells_express=0,
min_expr_counts_U=25, min_cells_express_U=20)
vlm.score_cluster_expression(min_avg_U=0.007, min_avg_S=0.06)
vlm.filter_genes(by_detection_levels=True, by_cluster_expression=True)
vlm.normalize_by_total(min_perc_U=0.5)
vlm.adjust_totS_totU(normalize_total=True, fit_with_low_U=False, svr_C=1, svr_gamma=1e-04)
vlm.perform_PCA()
#plt.plot(np.cumsum(vlm.pca.explained_variance_ratio_)[:100])
n_comps = np.where(np.diff(np.diff(np.cumsum(vlm.pca.explained_variance_ratio_))>0.0055))[0][0]
vlm.pcs[:,1] *= -1
!pip install igraph --quiet
from sklearn.neighbors import NearestNeighbors
import igraph
nn = NearestNeighbors(n_neighbors=50)
nn.fit(vlm.pcs[:,:4])
knn_pca = nn.kneighbors_graph(mode='distance')
knn_pca = knn_pca.tocoo()
G = igraph.Graph(list(zip(knn_pca.row, knn_pca.col)), directed=False, edge_attrs={'weight': knn_pca.data})
VxCl = G.community_multilevel(return_levels=False, weights="weight")
labels = np.array(VxCl.membership)
from numpy_groupies import aggregate, aggregate_np
k = 550
vlm.knn_imputation(n_pca_dims=n_comps,k=k, balanced=True,
b_sight=np.minimum(k*8, vlm.S.shape[1]-1),
b_maxl=np.minimum(k*4, vlm.S.shape[1]-1))
vlm.normalize_median()
vlm.fit_gammas(maxmin_perc=[2,95], limit_gamma=True)
vlm.normalize(which="imputed", size=False, log=True)
vlm.Pcs = np.array(vlm.pcs[:,:2], order="C")
vlm.predict_U()
vlm.calculate_velocity()
vlm.calculate_shift()
vlm.extrapolate_cell_at_t(delta_t=1)
vlm.estimate_transition_prob(hidim="Sx_sz", embed="Pcs", transform="log", psc=1,
n_neighbors=150, knn_random=True, sampled_fraction=1)
vlm.calculate_embedding_shift(sigma_corr = 0.05, expression_scaling=False)
vlm.calculate_grid_arrows(smooth=0.9, steps=(25, 25), n_neighbors=200)
# labels = vlm.ca["Clusters"]
# cluster_colors_dict={l:colors20[l % 20,:] for l in labels}
# vlm.colors=[cluster_colors_dict[label] for label in labels]
plt.figure(None,(9,9))
vlm.plot_grid_arrows(scatter_kwargs_dict={"alpha":0.7, "lw":0.7, "edgecolor":"0.4", "s":70, "rasterized":True},
min_mass=2.9, angles='xy', scale_units='xy',
headaxislength=2.75, headlength=5, headwidth=4.8, quiver_scale=0.35, scale_type="absolute")
# plt.plot(pc_obj.projections[pc_obj.ixsort,0], pc_obj.projections[pc_obj.ixsort,1], c="w", lw=6, zorder=1000000)
# plt.plot(pc_obj.projections[pc_obj.ixsort,0], pc_obj.projections[pc_obj.ixsort,1], c="k", lw=3, zorder=2000000)
plt.gca().invert_xaxis()
plt.axis("off")
plt.axis("equal");
```
## scvelo
```
# update to the latest version, if not done yet.
import scvelo as scv
scv.logging.print_version()
scv.settings.verbosity = 3 # show errors(0), warnings(1), info(2), hints(3)
scv.settings.presenter_view = True # set max width size for presenter view
scv.set_figure_params('scvelo') # for beautified visualization
adata = scv.read('data/hgForebrainGlut.loom', cache=True)
scv.pp.filter_and_normalize(adata, min_shared_counts=20, n_top_genes=2000)
scv.pp.moments(adata, n_pcs=30, n_neighbors=30)
scv.tl.velocity(adata)
adata
scv.tl.velocity_graph(adata)
#Get colors
labels = vlm.ca["Clusters"]
cluster_colors_dict={l:colors20[l % 20,:] for l in labels}
#colors=[cluster_colors_dict[label] for str(label) in labels]
cluster_colors_dict
scv.pl.velocity_embedding_stream(adata, basis='pca',color='Clusters',palette=cluster_colors_dict)
```
| github_jupyter |
```
%matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
```
Compare Field Across Mesh Regions
=================================
Here is some velocity data from a glacier modelling simulation that is
compared across nodes in the simulation. We have simplified the mesh to
have the simulation node value already on the mesh.
This was originally posted to
[pyvista/pyvista-support\#83](https://github.com/pyvista/pyvista-support/issues/83).
The modeling results are courtesy of [Urruty
Benoit](https://github.com/BenoitURRUTY) and are from the
[Elmer/Ice](http://elmerice.elmerfem.org) simulation software.
```
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
import numpy as np
# Load the sample data
mesh = examples.download_antarctica_velocity()
mesh["magnitude"] = np.linalg.norm(mesh["ssavelocity"], axis=1)
mesh
```
Here is a helper to extract regions of the mesh based on the simulation
node.
```
def extract_node(node):
idx = mesh["node_value"] == node
return mesh.extract_points(idx)
p = pv.Plotter()
p.add_mesh(mesh, scalars="node_value")
for node in np.unique(mesh["node_value"]):
loc = extract_node(node).center
p.add_point_labels(loc, [f"Node {node}"])
p.show(cpos="xy")
vel_dargs = dict(scalars="magnitude", clim=[1e-3, 1e4], cmap='Blues', log_scale=True)
mesh.plot(cpos="xy", **vel_dargs)
a = extract_node(12)
b = extract_node(20)
pl = pv.Plotter()
pl.add_mesh(a, **vel_dargs)
pl.add_mesh(b, **vel_dargs)
pl.show(cpos='xy')
```
plot vectors without mesh
```
pl = pv.Plotter()
pl.add_mesh(a.glyph(orient="ssavelocity", factor=20), **vel_dargs)
pl.add_mesh(b.glyph(orient="ssavelocity", factor=20), **vel_dargs)
pl.camera_position = [(-1114684.6969340036, 293863.65389149904, 752186.603224546),
(-1114684.6969340036, 293863.65389149904, 0.0),
(0.0, 1.0, 0.0)]
pl.show()
```
Compare directions. Normalize them so we can get a reasonable direction
comparison.
```
flow_a = a.point_arrays['ssavelocity'].copy()
flow_a /= np.linalg.norm(flow_a, axis=1).reshape(-1, 1)
flow_b = b.point_arrays['ssavelocity'].copy()
flow_b /= np.linalg.norm(flow_b, axis=1).reshape(-1, 1)
# plot normalized vectors
pl = pv.Plotter()
pl.add_arrows(a.points, flow_a, mag=10000, color='b', label='flow_a')
pl.add_arrows(b.points, flow_b, mag=10000, color='r', label='flow_b')
pl.add_legend()
pl.camera_position = [(-1044239.3240694795, 354805.0268606294, 484178.24825854995),
(-1044239.3240694795, 354805.0268606294, 0.0),
(0.0, 1.0, 0.0)]
pl.show()
```
flow\_a that agrees with the mean flow path of flow\_b
```
agree = flow_a.dot(flow_b.mean(0))
pl = pv.Plotter()
pl.add_mesh(a, scalars=agree, cmap='bwr',
scalar_bar_args={'title': 'Flow agreement with block b'})
pl.add_mesh(b, color='w')
pl.show(cpos='xy')
agree = flow_b.dot(flow_a.mean(0))
pl = pv.Plotter()
pl.add_mesh(a, color='w')
pl.add_mesh(b, scalars=agree, cmap='bwr',
scalar_bar_args={'title': 'Flow agreement with block a'})
pl.show(cpos='xy')
```
| github_jupyter |
```
# Import Python packages
import pickle
# Import Third party packages
import numpy as np
import matplotlib.pyplot as plt
S1_terms = ['u', 'du/dx', 'f']
S2_terms = ['u', 'du/dx', 'f', 'u^{2}']
S3_terms = ['du/dx', 'f']
S4_terms = ['f']
true_terms = [S1_terms, S2_terms, S3_terms, S4_terms]
tags = ["S1", "NLSL", "S3", "S4"]
def compute_spurious_terms(results, true_terms):
# Count the number of incorrectly ID'ed terms and missing terms
for result in results:
coeffs = result['coeffs']
spurious_terms = 0
# Count the incorrect terms identified
for term in coeffs:
if term not in true_terms:
# if it isn't, increment counter
spurious_terms += 1
# Count if any terms are missing from learned model
for term in true_terms:
if term not in coeffs:
# if it isn't, increment counter
spurious_terms += 1
result['spurious'] = spurious_terms
return results
# Create empty list for plotting
plot_lists = []
# Prepare the results list
for tag, true_term in zip(tags, true_terms):
# define file stem, load data, compute spurious terms
file_stem = "./data/Fig4a-{}-".format(tag)
results = pickle.load(open(file_stem +"results.pickle", "rb"))
results = compute_spurious_terms(results, true_term)
print(tag, results[0]['coeffs'].keys())
# Prepare lists for plotting
plot_nm = [result['noise_mag'] for result in results]
plot_losses = [result['loss'] for result in results]
min_loss = min(plot_losses)
plot_losses = [loss-min_loss for loss in plot_losses]
plot_spurious = [result['spurious'] for result in results]
# save to plot_lists:
plot_lists.append([plot_nm, plot_losses, plot_spurious])
import matplotlib as mpl
mpl.rcParams["legend.markerscale"] = 1.5
mpl.rcParams["legend.labelspacing"] = 1.2
mpl.rcParams["legend.handlelength"] = 3.5
mpl.rcParams["legend.handletextpad"] = 20
pltstyle=dict(linestyle=None,marker='o')
figsize = (6,4)
# Create figure
fig, axs = plt.subplots(4,1, sharex=True, figsize=figsize)
axs = axs.flatten()
for ax1, plot_list in zip(axs,plot_lists):
# Get the results:
plot_nm, plot_losses, plot_spurious = plot_list
# set axes
#ax1.autoscale(False, axis='y')
ax2 = ax1.twinx()
ax1.plot(plot_nm, plot_losses, color='black', label = "PDE Find Loss Error", **pltstyle)
ax2.plot(plot_nm, plot_spurious, color='red', label="# Spurious Terms", **pltstyle)
ax2.spines['right'].set_color('red')
# Place the legend
lines = ax1.get_lines()+ax2.get_lines()
labels = [line.get_label() for line in lines]
labels = ['' for line in lines]
# adjust axis scales
#ax1.set_ylim([0,50])
#ax2.set_ylim([0,10])
# Turn off all the tick labels
#ax1.tick_params(labelbottom=False, labelleft=False)
#ax2.tick_params(labelright=False)
#ax2.tick_params(axis='y', colors='red')
## Save figure
#plt.savefig('./Figs/4a-NLSL-noise-vs-error.svg', dpi=600, transparent=True)
plt.show()
# Create separate axes
legend_figsize = (figsize[0]*2, figsize[1]/5)
plt.figure(figsize=legend_figsize)
ax = plt.gca()
for spine in ax.spines:
ax.spines[spine].set_visible(False)
ax.tick_params(labelleft=False, labelbottom=False, left=False, bottom=False)
plt.legend(lines, labels, ncol=2, loc='center', frameon=False)
#plt.savefig('./Figs/4-legend.svg', dpi=600, transparent=True)
```
| github_jupyter |
```
# This notebook is used to decide on a tolerable level of corruptableness.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import entropy as KL_divergence
from slda.topic_models import BLSLDA
from modules.helpers import plot_images
# Generate topics
# We assume a vocabulary of 'rows'^2 terms, and create 'rows'*2 "topics",
# where each topic assigns exactly 'rows' consecutive terms equal probability.
rows = 3
V = rows * rows
K = rows * 2
N = K * K
D = 10000
seed = 42
topics = []
topic_base = np.concatenate((np.ones((1, rows)) * (1/rows),
np.zeros((rows-1, rows))), axis=0).ravel()
for i in range(rows):
topics.append(np.roll(topic_base, i * rows))
topic_base = np.concatenate((np.ones((rows, 1)) * (1/rows),
np.zeros((rows, rows-1))), axis=1).ravel()
for i in range(rows):
topics.append(np.roll(topic_base, i))
topics = np.array(topics)
# Generate documents from topics
# We generate D documents from these V topics by sampling D topic
# distributions, one for each document, from a Dirichlet distribution with
# parameter α=(1,…,1)
alpha = np.ones(K)
np.random.seed(seed)
thetas = np.random.dirichlet(alpha, size=D)
topic_assignments = np.array([np.random.choice(range(K), size=N, p=theta)
for theta in thetas])
word_assignments = np.array([[np.random.choice(range(V), size=1,
p=topics[topic_assignments[d, n]])[0]
for n in range(N)] for d in range(D)])
doc_term_matrix = np.array([np.histogram(word_assignments[d], bins=V,
range=(0, V - 1))[0] for d in range(D)])
#Generate responses
# choose parameter values
mu = 0.
nu2 = 1.
np.random.seed(seed)
eta = np.random.normal(loc=mu, scale=nu2, size=K)
# plot histogram of pre-responses
zeta = np.array([np.dot(eta, thetas[i]) for i in range(D)])
# choose parameter values
y = (zeta >= 0).astype(int)
_K = K
_alpha = alpha
_beta = np.repeat(0.01, V)
_mu = mu
_nu2 = nu2
_b = 7.25
n_iter = 1500
blslda = BLSLDA(_K, _alpha, _beta, _mu, _nu2, _b, n_iter, seed=seed, n_report_iter=100)
blslda.fit(doc_term_matrix, y)
results = blslda.phi
for res in results:
minimized_KL = 1
for topic in topics:
KL = KL_divergence(topic, res)
if KL < minimized_KL:
minimized_KL = KL
print(minimized_KL)
plot_images(plt, results, (rows, rows), (2, rows))
plt.figure()
plt.plot(blslda.loglikelihoods)
plt.figure()
plt.plot(np.diff(blslda.loglikelihoods)[-100:])
```
| github_jupyter |
# Chapter 1, figures 3 and 4
This notebook will show you how to produce figures 1.3 and 1.4 after the predictive modeling is completed.
The predictive modeling itself, unfortunately, doesn't fit in a notebook. The number-crunching can take several hours, and although logistic regression itself is not complicated, the practical details -- dates, authors, multiprocessing to speed things up, etc -- turn it into a couple thousand lines of code. (If you want to dig into that, see ```chapter1/code/biomodel.py```, and the scripts in ```/logistic``` at the top level of the repo.)
Without covering those tangled details, this notebook can still explore the results of modeling in enough depth to give you a sense of some important choices made along the way.
### Define modeling parameters
I start by finding an optimal number of features for the model, and also a value for C (the regularization constant). To do this I run a "grid search" that tests different values of both parameters. (I use the "gridsearch" option in biomodel, aka: ```python3 biomodel.py gridsearch```.) The result looks like this:

where darker red squares indicate higher accuracies. I haven't labeled the axes correctly, but the vertical axis here is number of features (from 800 to 2500), and the horizontal axis is the C parameter (from .0012 to 10, logarithmically).
It's important to use the same sample size for this test that you plan to use in the final model: in this case a rather small group of 150 volumes (75 positive and 75 negative), because I want to be able to run models in periods as small as 20 years. With such a small sample, it's important to run the gridsearch several times, since the selection of a particular 150 volumes introduces considerable random variability into the process.
One could tune the C parameter for each sample, and I try that in a different chapter, but my experience is that it introduces complexity without actually changing results--plus I get anxious about overfitting through parameter selection. Probably better just to confirm results with multiple samples and multiple C settings. A robust result should hold up.
I've tested the differentiation of genres with multiple parameter settings, and it does hold up. But for figure 1.3, I settled on 1100 features (words) and C = 0.015 as settings that fairly consistently produce good results for the biography / fiction boundary. Then it's possible to
### Assess accuracy across time: Figure 1.3
I do this by running ```python3 biomodel.py usenewdata``` (the contrast between 'new' and 'old' metadata will become relevant later in this notebook). That produces a file of results visualized below.
```
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
import random
accuracy_df = pd.read_csv('../modeloutput/finalbiopredicts.csv')
accuracy_df.head()
# I "jitter" results horizontally because we often have multiple results with the same x and y coordinates.
def jitteraframe(df, yname):
jitter = dict()
for i in df.index:
x = df.loc[i, 'center']
y = df.loc[i, yname]
if x not in jitter:
jitter[x] = set()
elif y in jitter[x]:
dodge = random.choice([-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6])
x = x + dodge
df.loc[i, 'center'] = x
if x not in jitter:
jitter[x] = set()
jitter[x].add(y)
jitteraframe(accuracy_df, 'accuracy')
fig, ax = plt.subplots(figsize = (9, 9))
ax.margins(0.1)
ax.plot(accuracy_df.center, accuracy_df.accuracy, marker = 'o', linestyle = '', alpha = 0.5)
ax.annotate('accuracy', xy = (1700,1), fontsize = 16)
plt.show()
```
#### assessment
There's a lot of random variation with this small sample size, but it's still perfectly clear that accuracy rises across this timeline. It may not be a linear relationship: it looks like the boundary between fiction and biography may be sharpest around 1910, and rather than a smooth line, it might be two regimes divided around 1850. But it's still quite clear that accuracy rises: if we modeled it simply as a linear correlation, it would be strong and significant.
```
from scipy.stats import pearsonr
pearsonr(accuracy_df.floor, accuracy_df.accuracy)
```
The first number is the correlation coefficient; the second a p value.
### Plotting individual volume probabilities: Figure 1.4
In a sense plotting individual volumes is extremely simple. My modeling process writes files that record the metadata for each volume along with a column **logistic** that reports the predicted probability of being in the positive class (in this case, fiction). We can just plot the probabilities on the y axis, and dates used for modeling on the x axis. Have done that below.
```
root = '../modeloutput/'
frames = []
for floor in range(1700, 2000, 50):
sourcefile = root + 'theninehundred' + str(floor) + '.csv'
thisframe = pd.read_csv(sourcefile)
frames.append(thisframe)
df = pd.concat(frames)
df.head()
groups = df.groupby('realclass')
groupnames = {0: 'biography', 1: 'fiction'}
groupcolors = {0: 'k', 1: 'r'}
fig, ax = plt.subplots(figsize = (9, 9))
ax.margins(0.1)
for code, group in groups:
ax.plot(group.dateused, group.logistic, marker='o', linestyle='', ms=6, alpha = 0.66, color = groupcolors[code], label=groupnames[code])
ax.legend(numpoints = 1, loc = 'upper left')
plt.show()
```
#### caveats
The pattern you see above is real, and makes a nice visual emblem of generic differentiation. However, there are some choices involved worth reflection. The probabilities plotted above were produced by six models, trained on 50-year segments of the timeline, using 1100 features **and a C setting of 0.00008**. That C setting works fine, but it's much lower than the one I chose as optimal for assessing accuracy. What happens if we use instead C = 0.015, and in fact simply reuse the evidence from figure 1.3 unchanged?
The accuracies recorded in ```finalpredictbio.csv``` come from a series of models named ```cleanpredictbio``` (plus some more info). I haven't saved all of them, but we have the last model in each sequence of 15. We can plot those probabilities.
```
root = '../modeloutput/'
frames = []
for floor in range(1700, 2000, 20):
if floor == 1720:
continue
# the first model covers 40 years
sourcefile = root + 'cleanpredictbio' + str(floor) + '2017-10-15.csv'
thisframe = pd.read_csv(sourcefile)
frames.append(thisframe)
df = pd.concat(frames)
bio = []
fic = []
for i in range (1710, 1990):
segment = df[(df.dateused > (i - 10)) & (df.dateused < (i + 10))]
bio.append(np.mean(segment[segment.realclass == 0].logistic))
fic.append(np.mean(segment[segment.realclass == 1].logistic))
groups = df.groupby('realclass')
groupnames = {0: 'biography', 1: 'fiction'}
groupcolors = {0: 'k', 1: 'r'}
fig, ax = plt.subplots(figsize = (9, 9))
ax.margins(0.1)
for code, group in groups:
ax.plot(group.dateused, group.logistic, marker='o', linestyle='', ms=6, alpha = 0.5, color = groupcolors[code], label=groupnames[code])
ax.plot(list(range(1710,1990)), bio, c = 'k')
ax.plot(list(range(1710,1990)), fic, c = 'r')
ax.legend(numpoints = 1, loc = 'upper left')
plt.show()
```
Whoa, that's a different picture!
If you look closely, there's still a pattern of differentiation: probabilities are more dispersed in the early going, and probs of fiction and biography overlap more. Later on, a space opens up between the genres. I've plotted the mean trend lines to confirm the divergence.
But the picture *looks* very different. This model uses less aggressive regularization (the bigger C constant makes it more confident), so most probabilities hit the walls around 1.0 or 0.0.
This makes it less obvious, visually, that differentiation is a phenomenon affecting the whole genre. We actually *do* see a significant change in medians here, as well as means. But it would be hard to see with your eyeballs, because the trend lines are squashed toward the edges.
So I've chosen to use more aggressive regularization (and a smaller number of examples) for the illustration in the book. That's a debatable choice, and a consequential one: as I acknowledge above, it changes the way we understand the word *differentiation.* I think there are valid reasons for the choice. Neither of the illustrations above is "truer" than than the other; they are alternate, valid perspectives on the same evidence. But if you want to use this kind of visualization, it's important to recognize that tuning the regularization constant will very predictably give you this kind of choice. It can't make a pattern of differentiation appear out of thin air, but it absolutely does change the distribution of probabilities across the y axis. It's a visual-rhetorical choice that needs acknowledging.
| github_jupyter |
# Ch 9 Multi-Agent Reinforcement Learning
##### Listing 9.3
```
import numpy as np
import torch
from matplotlib import pyplot as plt
def init_grid(size=(10,)):
grid = torch.randn(*size)
grid[grid > 0] = 1
grid[grid <= 0] = 0
grid = grid.byte() #A
return grid
def get_reward(s,a): #B
r = -1
for i in s:
if i == a:
r += 0.9
r *= 2.
return r
```
##### Listing 9.4
```
def gen_params(N,size): #A
ret = []
for i in range(N):
vec = torch.randn(size) / 10.
vec.requires_grad = True
ret.append(vec)
return ret
```
##### Listing 9.5
```
def qfunc(s,theta,layers=[(4,20),(20,2)],afn=torch.tanh):
l1n = layers[0]
l1s = np.prod(l1n) #A
theta_1 = theta[0:l1s].reshape(l1n) #B
l2n = layers[1]
l2s = np.prod(l2n)
theta_2 = theta[l1s:l2s+l1s].reshape(l2n)
bias = torch.ones((1,theta_1.shape[1]))
l1 = s @ theta_1 + bias #C
l1 = torch.nn.functional.elu(l1)
l2 = afn(l1 @ theta_2) #D
return l2.flatten()
```
##### Listing 9.6
```
def get_substate(b): #A
s = torch.zeros(2)
if b > 0: #B
s[1] = 1
else:
s[0] = 1
return s
def joint_state(s): #C
s1_ = get_substate(s[0]) #D
s2_ = get_substate(s[1])
ret = (s1_.reshape(2,1) @ s2_.reshape(1,2)).flatten() #E
return ret
```
##### Listing 9.7
```
plt.figure(figsize=(8,5))
size = (20,) #A
hid_layer = 20 #B
params = gen_params(size[0],4*hid_layer+hid_layer*2) #C
grid = init_grid(size=size)
grid_ = grid.clone() #D
print(grid)
plt.imshow(np.expand_dims(grid,0))
```
##### Listing 9.8
```
epochs = 200
lr = 0.001 #A
losses = [[] for i in range(size[0])] #B
for i in range(epochs):
for j in range(size[0]): #C
l = j - 1 if j - 1 >= 0 else size[0]-1 #D
r = j + 1 if j + 1 < size[0] else 0 #E
state_ = grid[[l,r]] #F
state = joint_state(state_) #G
qvals = qfunc(state.float().detach(),params[j],layers=[(4,hid_layer),(hid_layer,2)])
qmax = torch.argmax(qvals,dim=0).detach().item() #H
action = int(qmax)
grid_[j] = action #I
reward = get_reward(state_.detach(),action)
with torch.no_grad(): #J
target = qvals.clone()
target[action] = reward
loss = torch.sum(torch.pow(qvals - target,2))
losses[j].append(loss.detach().numpy())
loss.backward()
with torch.no_grad(): #K
params[j] = params[j] - lr * params[j].grad
params[j].requires_grad = True
with torch.no_grad(): #L
grid.data = grid_.data
```
##### Visualization of 1D Ising Model
```
fig,ax = plt.subplots(2,1)
for i in range(size[0]):
ax[0].scatter(np.arange(len(losses[i])),losses[i])
print(grid,grid.sum())
ax[1].imshow(np.expand_dims(grid,0))
```
##### Listing 9.9
```
from collections import deque #A
from random import shuffle #B
def softmax_policy(qvals,temp=0.9): #C
soft = torch.exp(qvals/temp) / torch.sum(torch.exp(qvals/temp)) #D
action = torch.multinomial(soft,1) #E
return action
```
##### Listing 9.10
```
def get_coords(grid,j): #A
x = int(np.floor(j / grid.shape[0])) #B
y = int(j - x * grid.shape[0]) #C
return x,y
def get_reward_2d(action,action_mean): #D
r = (action*(action_mean-action/2)).sum()/action.sum() #E
return torch.tanh(5 * r) #F
x1 = get_reward_2d(torch.Tensor([1,0]),torch.Tensor([0.25, 0.75]))
x2 = get_reward_2d(torch.Tensor([0,1]),torch.Tensor([0.25, 0.75]))
print(x1,x2)
```
##### Listing 9.11
```
def mean_action(grid,j):
x,y = get_coords(grid,j) #A
action_mean = torch.zeros(2) #B
for i in [-1,0,1]: #C
for k in [-1,0,1]:
if i == k == 0:
continue
x_,y_ = x + i, y + k
x_ = x_ if x_ >= 0 else grid.shape[0] - 1
y_ = y_ if y_ >= 0 else grid.shape[1] - 1
x_ = x_ if x_ < grid.shape[0] else 0
y_ = y_ if y_ < grid.shape[1] else 0
cur_n = grid[x_,y_]
s = get_substate(cur_n) #D
action_mean += s
action_mean /= action_mean.sum() #E
return action_mean
size = (10,10)
J = np.prod(size)
hid_layer = 10
layers = [(2,hid_layer),(hid_layer,2)]
params = gen_params(1,2*hid_layer+hid_layer*2)
grid = init_grid(size=size)
grid_ = grid.clone()
grid__ = grid.clone()
plt.imshow(grid)
print(grid.sum())
```
##### Listing 9.12
```
epochs = 75
lr = 0.0001
num_iter = 3 #A
losses = [ [] for i in range(size[0])] #B
replay_size = 50 #C
replay = deque(maxlen=replay_size) #D
batch_size = 10 #E
gamma = 0.9 #F
losses = [[] for i in range(J)]
for i in range(epochs):
act_means = torch.zeros((J,2)) #G
q_next = torch.zeros(J) #H
for m in range(num_iter): #I
for j in range(J): #J
action_mean = mean_action(grid_,j).detach()
act_means[j] = action_mean.clone()
qvals = qfunc(action_mean.detach(),params[0],layers=layers)
action = softmax_policy(qvals.detach(),temp=0.5)
grid__[get_coords(grid_,j)] = action
q_next[j] = torch.max(qvals).detach()
grid_.data = grid__.data
grid.data = grid_.data
actions = torch.stack([get_substate(a.item()) for a in grid.flatten()])
rewards = torch.stack([get_reward_2d(actions[j],act_means[j]) for j in range(J)])
exp = (actions,rewards,act_means,q_next) #K
replay.append(exp)
shuffle(replay)
if len(replay) > batch_size: #L
ids = np.random.randint(low=0,high=len(replay),size=batch_size) #M
exps = [replay[idx] for idx in ids]
for j in range(J):
jacts = torch.stack([ex[0][j] for ex in exps]).detach()
jrewards = torch.stack([ex[1][j] for ex in exps]).detach()
jmeans = torch.stack([ex[2][j] for ex in exps]).detach()
vs = torch.stack([ex[3][j] for ex in exps]).detach()
qvals = torch.stack([ qfunc(jmeans[h].detach(),params[0],layers=layers) \
for h in range(batch_size)])
target = qvals.clone().detach()
target[:,torch.argmax(jacts,dim=1)] = jrewards + gamma * vs
loss = torch.sum(torch.pow(qvals - target.detach(),2))
losses[j].append(loss.item())
loss.backward()
with torch.no_grad():
params[0] = params[0] - lr * params[0].grad
params[0].requires_grad = True
fig,ax = plt.subplots(2,1)
fig.set_size_inches(10,10)
ax[0].plot(np.array(losses).mean(axis=0))
ax[1].imshow(grid)
```
##### Listing 9.13
```
import magent
import math
from scipy.spatial.distance import cityblock #A
map_size = 30
env = magent.GridWorld("battle", map_size=map_size) #B
env.set_render_dir("MAgent/build/render") #C
team1, team2 = env.get_handles() #D
```
##### Listing 9.14
```
hid_layer = 25
in_size = 359
act_space = 21
layers = [(in_size,hid_layer),(hid_layer,act_space)]
params = gen_params(2,in_size*hid_layer+hid_layer*act_space) #A
map_size = 30
width = height = map_size
n1 = n2 = 16 #B
gap = 1 #C
epochs = 100
replay_size = 70
batch_size = 25
side1 = int(math.sqrt(n1)) * 2
pos1 = []
for x in range(width//2 - gap - side1, width//2 - gap - side1 + side1, 2): #D
for y in range((height - side1)//2, (height - side1)//2 + side1, 2):
pos1.append([x, y, 0])
side2 = int(math.sqrt(n2)) * 2
pos2 = []
for x in range(width//2 + gap, width//2 + gap + side2, 2): #E
for y in range((height - side2)//2, (height - side2)//2 + side2, 2):
pos2.append([x, y, 0])
env.reset()
env.add_agents(team1, method="custom", pos=pos1) #F
env.add_agents(team2, method="custom", pos=pos2)
plt.imshow(env.get_global_minimap(30,30)[:,:,:].sum(axis=2))
```
##### Listing 9.15
```
def get_neighbors(j,pos_list,r=6): #A
neighbors = []
pos_j = pos_list[j]
for i,pos in enumerate(pos_list):
if i == j:
continue
dist = cityblock(pos,pos_j)
if dist < r:
neighbors.append(i)
return neighbors
get_neighbors(5,env.get_pos(team1))
```
##### Listing 9.16
```
def get_onehot(a,l=21): #A
x = torch.zeros(21)
x[a] = 1
return x
def get_scalar(v): #B
return torch.argmax(v)
def get_mean_field(j,pos_list,act_list,r=7,l=21): #C
neighbors = get_neighbors(j,pos_list,r=r) #D
mean_field = torch.zeros(l)
for k in neighbors:
act_ = act_list[k]
act = get_onehot(act_)
mean_field += act
tot = mean_field.sum()
mean_field = mean_field / tot if tot > 0 else mean_field #E
return mean_field
```
##### Listing 9.17
```
def infer_acts(obs,param,layers,pos_list,acts,act_space=21,num_iter=5,temp=0.5):
N = acts.shape[0] #A
mean_fields = torch.zeros(N,act_space)
acts_ = acts.clone() #B
qvals = torch.zeros(N,act_space)
for i in range(num_iter): #C
for j in range(N): #D
mean_fields[j] = get_mean_field(j,pos_list,acts_)
for j in range(N): #E
state = torch.cat((obs[j].flatten(),mean_fields[j]))
qs = qfunc(state.detach(),param,layers=layers)
qvals[j,:] = qs[:]
acts_[j] = softmax_policy(qs.detach(),temp=temp)
return acts_, mean_fields, qvals
def init_mean_field(N,act_space=21):
mean_fields = torch.abs(torch.rand(N,act_space))
for i in range(mean_fields.shape[0]):
mean_fields[i] = mean_fields[i] / mean_fields[i].sum()
return mean_fields
```
##### Listing 9.18
```
def train(batch_size,replay,param,layers,J=64,gamma=0.5,lr=0.001):
ids = np.random.randint(low=0,high=len(replay),size=batch_size)
exps = [replay[idx] for idx in ids]
losses = []
jobs = torch.stack([ex[0] for ex in exps]).detach() #stack
jacts = torch.stack([ex[1] for ex in exps]).detach()
jrewards = torch.stack([ex[2] for ex in exps]).detach()
jmeans = torch.stack([ex[3] for ex in exps]).detach()
vs = torch.stack([ex[4] for ex in exps]).detach()
qs = []
for h in range(batch_size):
state = torch.cat((jobs[h].flatten(),jmeans[h]))
qs.append(qfunc(state.detach(),param,layers=layers))
qvals = torch.stack(qs)
target = qvals.clone().detach()
target[:,jacts] = jrewards + gamma * torch.max(vs,dim=1)[0] #20 = 20 + 20
loss = torch.sum(torch.pow(qvals - target.detach(),2))
losses.append(loss.detach().item())
loss.backward()
#SGD
with torch.no_grad():
param = param - lr * param.grad
param.requires_grad = True
return np.array(losses).mean()
```
##### Listing 9.19
```
N1 = env.get_num(team1) #A
N2 = env.get_num(team2)
step_ct = 0
acts_1 = torch.randint(low=0,high=act_space,size=(N1,)) #B
acts_2 = torch.randint(low=0,high=act_space,size=(N2,))
replay1 = deque(maxlen=replay_size) #C
replay2 = deque(maxlen=replay_size)
qnext1 = torch.zeros(N1) #D
qnext2 = torch.zeros(N2)
act_means1 = init_mean_field(N1,act_space) #E
act_means2 = init_mean_field(N2,act_space)
rewards1 = torch.zeros(N1) #F
rewards2 = torch.zeros(N2)
losses1 = []
losses2 = []
```
##### Listing 9.20
```
def team_step(team,param,acts,layers):
obs = env.get_observation(team) #A
ids = env.get_agent_id(team) #B
obs_small = torch.from_numpy(obs[0][:,:,:,[1,4]]) #C
agent_pos = env.get_pos(team) #D
acts, act_means, qvals = infer_acts(obs_small,\
param,layers,agent_pos,acts) #E
return acts, act_means, qvals, obs_small, ids
def add_to_replay(replay,obs_small, acts,rewards,act_means,qnext): #F
for j in range(rewards.shape[0]): #G
exp = (obs_small[j], acts[j],rewards[j],act_means[j],qnext[j])
replay.append(exp)
return replay
```
##### Listing 9.21
```
for i in range(epochs):
done = False
while not done: #A
acts_1, act_means1, qvals1, obs_small_1, ids_1 = \
team_step(team1,params[0],acts_1,layers) #B
env.set_action(team1, acts_1.detach().numpy().astype(np.int32)) #C
acts_2, act_means2, qvals2, obs_small_2, ids_2 = \
team_step(team2,params[0],acts_2,layers)
env.set_action(team2, acts_2.detach().numpy().astype(np.int32))
done = env.step() #D
_, _, qnext1, _, ids_1 = team_step(team1,params[0],acts_1,layers) #E
_, _, qnext2, _, ids_2 = team_step(team2,params[0],acts_2,layers)
env.render() #F
rewards1 = torch.from_numpy(env.get_reward(team1)).float() #G
rewards2 = torch.from_numpy(env.get_reward(team2)).float()
#
#
#
replay1 = add_to_replay(replay1, obs_small_1, acts_1,rewards1,act_means1,qnext1) #A
replay2 = add_to_replay(replay2, obs_small_2, acts_2,rewards2,act_means2,qnext2)
shuffle(replay1) #B
shuffle(replay2)
ids_1_ = list(zip(np.arange(ids_1.shape[0]),ids_1)) #C
ids_2_ = list(zip(np.arange(ids_2.shape[0]),ids_2))
env.clear_dead() #D
ids_1 = env.get_agent_id(team1) #E
ids_2 = env.get_agent_id(team2)
ids_1_ = [i for (i,j) in ids_1_ if j in ids_1] #F
ids_2_ = [i for (i,j) in ids_2_ if j in ids_2]
acts_1 = acts_1[ids_1_] #G
acts_2 = acts_2[ids_2_]
step_ct += 1
if step_ct > 250:
break
if len(replay1) > batch_size and len(replay2) > batch_size: #H
loss1 = train(batch_size,replay1,params[0],layers=layers,J=N1)
loss2 = train(batch_size,replay2,params[1],layers=layers,J=N1)
losses1.append(loss1)
losses2.append(loss2)
```
| github_jupyter |
# Store tracts and rental listings in PostGIS
...for a fast spatial-join of listings to tracts.
First, create the database from command prompt if it doesn't already exist:
```
createdb -U postgres craigslist_tracts
psql -U postgres -d craigslist_tracts -c "CREATE EXTENSION postgis;"
```
More info in the psycopg2 docs: http://initd.org/psycopg/docs/usage.html
```
import geopandas as gpd
import pandas as pd
import psycopg2
from shapely.geometry import Point
from keys import pg_user, pg_pass, pg_host, pg_port, pg_db
crs = {'init' : 'epsg:4326'}
%%time
# load tracts and project to 4326
tracts = gpd.read_file('data/us_census_tracts_2014')
tracts = tracts.to_crs(crs)
print(len(tracts))
%%time
# load listings and set initial crs to 4326
listings = pd.read_csv('data/craigslist_listings_cleaned.csv')
geometry = listings.apply(lambda row: Point((row['lng'], row['lat'])), axis=1)
listings = gpd.GeoDataFrame(listings, geometry=geometry, crs=crs)
print(len(listings))
assert tracts.crs == listings.crs
# srid is the numeric spatial reference ID PostGIS uses
srid = tracts.crs['init'].strip('epsg:')
#listings = listings.sample(1000)
#tracts = tracts[['GEOID', 'ALAND', 'geometry']].sample(1000)
```
## Upload tracts and listings to PostGIS
```
connection = psycopg2.connect(database=pg_db,
user=pg_user,
password=pg_pass,
host=pg_host,
port=pg_port)
cursor = connection.cursor()
# list all tables
cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)'")
cursor.fetchall()
```
#### add tracts table
```
# drop tracts table if it already exists, then create tracts table
cursor.execute("DROP TABLE IF EXISTS tracts")
cursor.execute("CREATE TABLE tracts (id SERIAL PRIMARY KEY, geoid VARCHAR NOT NULL, aland BIGINT NOT NULL)")
cursor.execute("SELECT AddGeometryColumn ('tracts', 'geom', %s, 'MULTIPOLYGON', 2)", [srid])
cursor.execute("CREATE INDEX tract_index ON tracts USING GIST(geom)")
connection.commit()
%%time
cursor.execute("DELETE FROM tracts")
# insert each tract into the tracts table one at a time
for label, row in tracts.iterrows():
geoid = row['GEOID']
aland = row['ALAND']
geometry_wkt = row['geometry'].wkt
query = """INSERT INTO tracts (geoid, aland, geom)
VALUES (%s, %s, ST_Multi(ST_GeomFromText(%s, %s)))"""
data = (geoid, aland, geometry_wkt, srid)
cursor.execute(query, data)
connection.commit()
```
#### add listings table
```
# drop listings table if it already exists, then create listings table
cursor.execute("DROP TABLE IF EXISTS listings")
cursor.execute("""CREATE TABLE listings (id SERIAL PRIMARY KEY,
date VARCHAR NOT NULL,
region VARCHAR NOT NULL,
bedrooms INTEGER,
rent REAL,
sqft REAL)""")
cursor.execute("SELECT AddGeometryColumn ('listings', 'geom', %s, 'POINT', 2)", [srid])
cursor.execute("CREATE INDEX listing_index ON listings USING GIST(geom)")
connection.commit()
%%time
cursor.execute("DELETE FROM listings")
# insert each listing into the listings table one at a time
for label, row in listings.iterrows():
date = row['date']
region = row['region']
bedrooms = row['bedrooms']
rent = row['rent']
sqft = row['sqft']
geometry_wkt = row['geometry'].wkt
# bedrooms can be null, but must be None for psycopg2 to insert it as a null value, not a 'NaN' string
if pd.isnull(bedrooms):
bedrooms = None
query = """
INSERT INTO listings (date, region, bedrooms, rent, sqft, geom)
VALUES (%s, %s, %s, %s, %s, ST_GeomFromText(%s, %s))
"""
data = (date, region, bedrooms, rent, sqft, geometry_wkt, srid)
cursor.execute(query, data)
connection.commit()
```
#### optimize the database
```
%%time
# vacuum and analyze the database to optimize it after building indices and inserting rows
original_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
cursor.execute("VACUUM ANALYZE")
connection.commit()
connection.set_isolation_level(original_isolation_level)
```
#### verify SRIDs, row counts, and data
```
# look up the SRIDs
cursor.execute("""SELECT
Find_SRID('public', 'tracts', 'geom') as tracts_srid,
Find_SRID('public', 'listings', 'geom') as listings_srid""")
cursor.fetchall()
cursor.execute("SELECT count(*) AS exact_count FROM tracts")
rows = cursor.fetchall()
rows[0][0]
cursor.execute("SELECT geoid, aland, ST_AsText(geom) FROM tracts LIMIT 3")
rows = cursor.fetchall()
gpd.GeoDataFrame(rows, columns=['GEOID', 'ALAND', 'geometry'])
cursor.execute("SELECT count(*) AS exact_count FROM listings")
rows = cursor.fetchall()
rows[0][0]
cursor.execute("""SELECT date, region, bedrooms, rent, sqft, ST_AsText(geom)
FROM listings LIMIT 3""")
rows = cursor.fetchall()
gpd.GeoDataFrame(rows, columns=['date', 'region', 'bedrooms', 'rent', 'sqft', 'geometry'])
```
## all done
```
cursor.close()
connection.close()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jonfisik/Projects/blob/master/VetoresPython.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
u = [1,2]
v = [2,1]
# somou listas
u + v
u = np.array(u)
v = np.array(v)
# soma de vetores
u + v
#----------------------------------------------
w1 = np.array([2,3])
w2 = np.array([4,-1])
# Produto escalar ou interno função --> .dot()
w1.dot(w2)
w2.dot(w1)
# Módulo função --> .norm(vetor)
modulo_w1 = np.linalg.norm(w1) # foi atribuido o valor da norma a uma variável
modulo_w2 = np.linalg.norm(w2) # Idem
modulo_w1
modulo_w2
np.linalg.norm(w1)
#----------------------------------------------------- 15/10/2020
v = np.array([1,2,3,4])
v
type(v)
# Descrição de uma função
?np.array
lista = [3,5,66,20]
type(lista)
# Transformar uma lista em um vetor
v1 = np.array(lista)
v2 = np.array([1,2,3,4])
v3 = np.array((4,3,2,1))
v1
v2
v3
# Representação de vetores
e1 = np.array([1,0,0])
e2 = np.array([0,1,0])
e3 = np.array([0,0,1])
#------------------------------------------
def plotVectors(vecs, cols, alpha=2):
''' função para plotar vetores'''
plt.figure()
plt.axvline(x=0, color='#A9A9A9', zorder=0)
plt.axhline(y=0, color='#A9A9A9', zorder=0)
for i in range(len(vecs)):
x = np.concatenate([[0,0],vecs[i]])
plt.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles='xy', scale_units='xy', scale=1, color=cols[i],
alpha=alpha)
laranja = '#FF9A13'
azul = '#1190FF'
resultante = '#11FFFF'
plotVectors([[2,3], [4,-1], [6,2]], [laranja, azul, resultante])
plt.xlim(-1,7)
plt.ylim(-2,7)
#Cores
cor1 = '#FF0000'
cor2 = '#FF0000'
corRes = '#11FFFF'
# Vetores
a = np.array([2,3])
b = np.array([3,1])
# Soma
r = a + b
# Função
plotVectors([a, b, r], [cor1, cor2, corRes])
# Plano cartesiano
plt.xlim(-1,6)
plt.ylim(-5,10)
plotVectors([e1,e2],[cor1,cor2])
plt.xlim(-1,1.5)
plt.ylim(-1,1.5)
# Ângulo entre vetores
def ang_2vetores(v,u):
v_escalar_u = v.dot(u)
vn = np.linalg.norm(v)
un = np.linalg.norm(u)
r = v_escalar_u/(vn*un) # cosseno do angulo
ang = np.arccos(r) # ang em radianos
return (180/np.pi)*ang # ang em graus
u = np.array([0,1])
v = np.array([1,0])
red = 'red'
blue = 'blue'
plotVectors([u,v], [red, blue])
plt.xlim(-1,1.5)
plt.ylim(-1,1.5)
ang_2vetores(u,v)
A = np.array([1,1])
B = np.array([1,0])
red = 'red'
blue = 'blue'
plotVectors([A, B], [red, blue])
plt.xlim(-1,1.5)
plt.ylim(-1,1.5)
ang_2vetores(A,B)
# Indexação de vetores
# vetor
x = [1,2,3,4,5]
vx = np.array(x)
# Tamanho do vetor
len(vx)
# posição inicial em python começa em "0"
posicao_2 = vx[2]
posicao_2
posicao_0 = vx[0]
posicao_0
```
| github_jupyter |
[](http://rpi.analyticsdojo.com)
<center><h1>Introduction to MatplotLab - Python</h1></center>
<center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center>
This has been adopted from the [IPython notebook](http://ipython.org/notebook.html) available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures).
### Introduction
Matplotlib is a 2D and 3D graphics library for generating scientific figures.
- Easy to get started
- Support for $\LaTeX$ formatted labels and texts
- Great control of every element in a figure, including figure size and DPI.
- High-quality output in many formats, including PNG, PDF, SVG, EPS, and PGF.
- GUI for interactively exploring figures *and* support for headless generation of figure files (useful for batch jobs).
- More information at the Matplotlib web page: http://matplotlib.org/
### Using `matplotlib` with Jupyter
- `%matplotlib inline` ensures that visuals will be provided in a notebook rather than opening a new image.
- Import matplotlab. It is a common package and has already been installed.
- Import the matplotlib.pyplot module under the name plt.
```
# This is not strictly for a Python rather.
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
```
### `matplotlib` Library
- Objects that one can apply functions and actions on, and no object or program states should be global.
- Created figure instance in the `fig` variable.
- And from it we create a new axis instance `axes` using the `add_axes` method in the `Figure` class instance `fig`.
```
#This generates the x and y variables we will be plotting.
import numpy as np
x = np.linspace(0, 5, 10)
y = x ** 2
#This initiates the Figure.
fig = plt.figure()
#This creates the size of the figure [left, bottom, width, height (range 0 to 1)]
axes = fig.add_axes([0, 0, .75, .75])
#This creates a x, y, and the color of the graph
axes.plot(x, y, 'blue')
axes.set_xlabel('X Axis')
axes.set_ylabel('Y Azis')
axes.set_title('Title');
fig = plt.figure()
#This creates the size of the figure [left, bottom, width, height (range 0 to 1)]
axes1 = fig.add_axes([0, 0, 1, 1]) # main axes
axes1.plot(x, y, 'red')
axes1.set_xlabel('x')
axes1.set_ylabel('y')
axes1.set_title('title')
#This creates the size of the figure [left, bottom, width, height (range 0 to 1)]
axes2 = fig.add_axes([0.1, 0.3, 0.3, 0.3]) # inset axes
axes2.plot(y, x, 'green')
axes2.set_xlabel('y')
axes2.set_ylabel('x')
axes2.set_title('insert title');
```
### Subplots
- If we don't care about being explicit about where our plot axes are placed in the figure canvas, then we can use one of the many axis layout managers in matplotlib.
- Subplots can be used to list multiple related plots.
```
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'r')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
```
### Figure size, Aspect Ratio and DPI
- Matplotlib allows the aspect ratio, DPI and figure size to be specified when the `Figure` object is created using the `figsize` and `dpi` keyword arguments.
- `figsize` is a tuple of the width and height of the figure in inches, and `dpi` is the dots-per-inch (pixel per inch).
- To create an 800x400 pixel, 100 dots-per-inch figure, we can do:
```
fig = plt.figure(figsize=(8,4), dpi=100)
```
The same arguments can also be passed to layout managers, such as the `subplots` function:
```
fig, axes = plt.subplots(figsize=(12,3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
```
### Saving Figures
- To save a figure to a file we can use the `savefig` method in the `Figure` class.
- Here we can also optionally specify the DPI and choose between different output formats.
```
fig.savefig("filename.png")
fig.savefig("filename.png", dpi=200)
```
### Legends
- Legends for curves in a figure can be added with the `legend`.
- The legend function takes an optional keyword argument loc that can be used to specify where in the figure the legend is to be drawn
- Use the label="label text" keyword argument when plots or other objects are added to the figure.
- See http://matplotlib.org/users/legend_guide.html#legend-location for more details.
- The following adjust the location:
```
ax.legend(loc=0) # let matplotlib decide the optimal location
ax.legend(loc=1) # upper right corner
ax.legend(loc=2) # upper left corner
ax.legend(loc=3) # lower left corner
ax.legend(loc=4) # lower right corner```
```
fig, ax = plt.subplots()
ax.plot(x, x**2, label="y = x**2")
ax.plot(x, x**3, label="y = x**3")
ax.legend(loc=2); # upper left corner
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title');
```
### Formatting Text: LaTeX, Fontsize, Font Family
- Matplotlib has great support for LaTeX. All we need to do is to use dollar signs encapsulate LaTeX in any text (legend, title, label, etc.). For example, "$y=x^3$".
- We can also change the global font size and font family, which applies to all text elements in a figure (tick labels, axis labels and titles, legends, etc.).
```
# Update the matplotlib configuration parameters:
matplotlib.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix'})
#matplotlib.rcParams.update({'font.size': 18, 'font.family': 'serif'})
fig, ax = plt.subplots()
ax.plot(x, x**2, label=r"$y = \alpha^2$")
ax.plot(x, x**3, label=r"$y = \alpha^3$")
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$y$')
ax.set_title('title');
```
### Formatting Colors, Linewidths, and Linetypes
- We can use the MATLAB-like syntax where 'b' means blue...
-'b.-' means a blue line with dots.
-'g--' means a green line with dashed lines.
```
# MATLAB style line color and style
ax.plot(x, x**2, 'b.-') # blue line with dots
ax.plot(x, x**3, 'g--') # green dashed line
```
We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the `color` and `alpha` keyword arguments:
```
fig, ax = plt.subplots()
ax.plot(x, x, 'r.-', label=r"$y = \alpha$", alpha=0.5) #
ax.plot(x, x**2, label=r"$y = \alpha^2$", color="#1155dd", alpha=0.5)
ax.plot(x, x**3, 'y--', label=r"$y = \alpha^3$", ) # green dashed line
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$y$')
ax.set_title('title');
```
### Linewidth
- To change the line width, we can use the `linewidth` or `lw` keyword argument. The line style can be selected using the `linestyle` or `ls` keyword arguments:
```
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+1, color="blue", linewidth=0.25)
ax.plot(x, x+2, color="blue", linewidth=0.50)
ax.plot(x, x+3, color="blue", linewidth=1.00)
ax.plot(x, x+4, color="blue", linewidth=2.00)
# possible linestype options ‘-‘, ‘--’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x+5, color="red", lw=2, linestyle='-')
ax.plot(x, x+6, color="red", lw=2, ls='-.')
ax.plot(x, x+7, color="red", lw=2, ls=':')
# custom dash
line, = ax.plot(x, x+8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ...
# possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x+ 9, color="green", lw=2, ls='--', marker='+')
ax.plot(x, x+10, color="green", lw=2, ls='--', marker='o')
ax.plot(x, x+11, color="green", lw=2, ls='--', marker='s')
ax.plot(x, x+12, color="green", lw=2, ls='--', marker='1')
# marker size and color
ax.plot(x, x+13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=2, markeredgecolor="blue");
```
### Control Over Axis Appearance
The appearance of the axes is an important aspect of a figure that we often need to modify to make a publication quality graphics. We need to be able to control where the ticks and labels are placed, modify the font size and possibly the labels used on the axes. In this section we will look at controling those properties in a matplotlib figure.
#### Plot range
### Control Over Axis Appearance
- Configure ranges of the axes with `set_ylim` and `set_xlim` methods or `axis('tight')` for automatrically getting "tightly fitted" axes ranges.
```
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x**2, x, x**3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x**2, x, x**3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x**2, x, x**3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
```
#### Logarithmic scale
- It is also possible to set a logarithmic scale for one or both axes.
- Each of the axes' scales are set seperately using `set_xscale` and `set_yscale` methods which accept one parameter (with the value "log" in this case):
```
fig, axes = plt.subplots(1, 2, figsize=(10,4))
axes[0].plot(x, x**2, x, np.exp(x))
axes[0].set_title("Normal scale")
axes[1].plot(x, x**2, x, np.exp(x))
axes[1].set_yscale("log")
axes[1].set_title("Logarithmic scale (y)");
```
### Other 2D plot styles
- 'scatter`
- `step`
- `bar`
- For more see http://matplotlib.org/gallery.html.
```
n = np.array([0,1,2,3,4,5])
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(1, 3, figsize=(12,3))
axes[0].scatter(n, n + 0.25*np.random.randn(len(n)))
axes[0].set_title("scatter")
axes[1].step(n, n**2, lw=2)
axes[1].set_title("step")
axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5)
axes[2].set_title("bar")
# A histogram
n = np.random.randn(100000)
fig, axes = plt.subplots(1, 2, figsize=(12,4))
axes[0].hist(n)
axes[0].set_title("Default histogram")
axes[0].set_xlim((min(n), max(n)))
axes[1].hist(n, cumulative=True, bins=50)
axes[1].set_title("Cumulative detailed histogram")
axes[1].set_xlim((min(n), max(n)));
import pandas as pd
df = pd.read_csv('../input/iris.csv')
df.head()
# This plots using the .plot extension from Pandas dataframes.
df.plot(kind="scatter", x="sepal_length", y="sepal_width")
```
Copyright [AnalyticsDojo](http://rpi.analyticsdojo.com) 2016.
This work is licensed under the [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/) license agreement
This has been adopted from the [IPython notebook](http://ipython.org/notebook.html) available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures) by J.R. Johansson.
| github_jupyter |
# Automated ML
```
from azureml.core import Workspace, Experiment
from azureml.data.dataset_factory import TabularDatasetFactory
from train import clean_data
import pandas as pd
from sklearn.model_selection import train_test_split
import os
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.train.automl import AutoMLConfig
from azureml.widgets import RunDetails
```
We start by setting up our experiment in our workspace.
```
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'pcos_automl'
experiment=Experiment(ws, experiment_name)
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
run = experiment.start_logging()
```
## Dataset
The dataset for PCOS is a real-time data set that taken from a survey conducted among 119 women between the
ages of 18 and 22. The dataset is primarily based on their lifestyle and food intake habits. The symptoms i.e.
attributes are classified based on classification algorithms to predict whether the patient may have PCOS or not. The
database consists of 119 samples with 18 attributes belonging to two different classes (maybe or maybe not).
There are 14 binary attributes and 4 categorical attributes. <br/>
PCOS-Survey/PCOSData. (2017). GitHub. Retrieved 30
November 2017, from https://github.com/PCOSSurvey/PCOSData
```
ds = TabularDatasetFactory.from_delimited_files(path="https://raw.githubusercontent.com/priyanshisharma/AI-Champ/master/pcos_data.csv")
```
Here, we clean and observe our data.
```
x, y = clean_data(ds)
df = pd.concat([x, pd.DataFrame(y)], axis = 1)
df.head()
```
Now we split our data in order to fe
```
len(df)
outname2='training_dataset3.csv'
outdir2='training3/'
if not os.path.exists(outdir2):
os.mkdir(outdir2)
df_train, df_test = train_test_split(df, test_size=0.8)
fullpath2=os.path.join(outdir2,outname2)
df_test.to_csv(fullpath2)
outname='validation_dataset3.csv'
outdir='validation3/'
if not os.path.exists(outdir):
os.mkdir(outdir)
fullpath=os.path.join(outdir,outname)
df_test.to_csv(fullpath)
len(df_train)
```
Now we store our dataset in our default datastore in order to access it.
```
datastore = ws.get_default_datastore()
datastore.upload(src_dir = "training3/", target_path = "data/")
datastore.upload(src_dir = "validation3/", target_path = "data/")
training_data = TabularDatasetFactory.from_delimited_files(path = [(datastore, ("data/training_dataset3.csv"))])
validation_data = TabularDatasetFactory.from_delimited_files(path = [(datastore, ("data/validation_dataset3.csv"))])
len(training_data.to_pandas_dataframe())
```
## AutoML Configuration
We start by setting up our compute cluster, where we will run our automl run.
```
cpu_cluster_name = "cpucluster-aml"
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
```
We've used the following configuration for our run:
|Setting |Reasons|
|-|-|
|**experiment_timeout_minutes**| Maximum amount of time in minutes that all iterations combined can take before the experiment terminates. I've taken this to be 30 mins due to the presence of 730 rows. |
|**max_concurrent_iterations**|These are the iterations occuring simultaneously and has to be equal to the number of nodes in the cluster(5-1))|
|**n_cross_validations**|Using 5 cross validations to avoi8d overfitting) |
|**primary_metric**|Since the data isn't quite balanced, Weighted Average Precision Score |
|**task**|Classification |
|**compute_target**|This is the compute cluster we will be using |
|**training_data**|This is the training dataset stored in the default datastore |
|**label_column_name**|This is the target variable|
```
training_data
# TODO: Put your automl settings here
automl_settings = {
"experiment_timeout_minutes" :30,
"max_concurrent_iterations": 4,
"n_cross_validations": 3,
"primary_metric": 'average_precision_score_weighted',
}
# TODO: Put your automl config here
automl_config = AutoMLConfig(
experiment_timeout_minutes=30,
n_cross_validations=3,
task="classification",
primary_metric="average_precision_score_weighted",
compute_target=cpu_cluster,
training_data=training_data,
label_column_name="PCOS",
max_cores_per_iteration=-1,
enable_onnx_compatible_models=True
)
```
Submitting the run
```
remote_run = experiment.submit(config = automl_config, show_output = True)
```
## Run Details
The `Rundetails` widget, as the name suggests gives us greater insight about how the Run is proceeding, enabling us to monitor and understand the situation, thereby dealing with it accordingly.
```
RunDetails(remote_run).show()
```
## Best Model
The best performing model is the `VotingEnsemble` with a score of 0.9006. It maybe observed to be derived from the following:
|**Field**|Value|
|-|-|
|**Ensembled Iterations**|0, 14, 15, 6, 26|
|**Ensembled Algorithms**|'LightGBM', 'RandomForest', 'XGBoostClassifier', 'ExtremeRandomTrees', 'XGBoostClassifier'|
|**Ensemble Weights**|0.3333333333333333, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666|
|**Best Individual Pipeline Score**|"0.9005922928114609"|
```
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
model_ml = best_run.register_model(model_name='PCOS_auto_ml', model_path='./')
```
## Retrieve and Save ONNX Model
```
from azureml.automl.runtime.onnx_convert import OnnxConverter
b_run , onnx_mdl = remote_run.get_output(return_onnx_model=True)
onnx_fl_path = "./best_model.onnx"
OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)
```
### Predict with the ONNX model, using onnxruntime package
```
import sys
import json
from azureml.automl.core.onnx_convert import OnnxConvertConstants
from azureml.train.automl import constants
if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:
python_version_compatible = True
else:
python_version_compatible = False
import onnxruntime
from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper
def get_onnx_res(run):
res_path = 'onnx_resource.json'
run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)
with open(res_path) as f:
onnx_res = json.load(f)
return onnx_res
if python_version_compatible:
mdl_bytes = onnx_mdl.SerializeToString()
onnx_res = get_onnx_res(b_run)
df_test['Column1_1'] = 0.0
onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)
pred_onnx, pred_prob_onnx = onnxrt_helper.predict(df_test)
print(pred_onnx)
print(pred_prob_onnx)
else:
print('Please use Python version 3.6 or 3.7 to run the inference helper.')
```
## Model Deployment
Being the better performing model, I shall hereby deploy the `VotingEnsemble` model.
```
from azureml.core.model import Model
from azureml.core import Environment
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice
os.makedirs('./amlmodel', exist_ok=True)
best_run.download_file('/outputs/model.pkl',os.path.join('./amlmodel','automl_best_model_cc.pkl'))
for f in best_run.get_file_names():
if f.startswith('outputs'):
output_file_path = os.path.join('./amlmodel', f.split('/')[-1])
print(f'Downloading from {f} to {output_file_path} ...')
best_run.download_file(name=f, output_file_path=output_file_path)
```
TODO: In the cell below, send a request to the web service you deployed to test it.
```
model=best_run.register_model(
model_name = 'automl-bestmodel-cc',
model_path = './outputs/model.pkl',
model_framework=Model.Framework.SCIKITLEARN,
description='Cervical Cancer Prediction'
)
# Download the conda environment file and define the environement
best_run.download_file('outputs/conda_env_v_1_0_0.yml', 'conda_env.yml')
myenv = Environment.from_conda_specification(name = 'myenv',
file_path = 'conda_env.yml')
# download the scoring file produced by AutoML
best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'score_auto_cc.py')
# set inference config
inference_config = InferenceConfig(entry_script= 'score_auto_cc.py',
environment=myenv)
# set Aci Webservice config
aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, auth_enabled=True)
service = Model.deploy(workspace=ws,
name='automl-bestmodel-cc',
models=[model],
inference_config=inference_config,
deployment_config=aci_config,
overwrite=True)
service
# wait for deployment to finish and display the scoring uri and swagger uri
service.wait_for_deployment(show_output=True)
print('Service state:')
print(service.state)
print('Scoring URI:')
print(service.scoring_uri)
print('Swagger URI:')
print(service.swagger_uri)
import json
# select 3 samples from the dataframe
x_df=df.sample(3)
y_df = x_df.pop('PCOS')
x_df['Column1'] = 0.0
# convert the records to a json data file
recored=x_df.to_dict(orient='records')
scoring_json = json.dumps({'data': recored})
print(scoring_json)
```
Consumint the endpoint using `endpoint.py`
```
!python3 endpoint.py
output = service.run(scoring_json)
output
y_df
```
Enabling logging using `logs.py`.
```
!python3 logs.py
```
| github_jupyter |
## Training a recommendation model for Google Analytics data using BigQuery ML
This notebook accompanies the article
[Training a recommendation model for Google Analytics data using BigQuery ML](https://towardsdatascience.com/training-a-recommendation-model-for-google-analytics-data-using-bigquery-ml-2327f9a2e8e9)
## Use time spent on page as ranking
```
%%bigquery df
WITH CTE_visitor_content_time AS (
SELECT
fullVisitorID AS visitorId,
visitNumber,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId,
hits.time AS hit_time
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
GROUP BY
fullVisitorId,
visitNumber,
latestContentId,
hits.time ),
CTE_visitor_page_content AS (
SELECT *,
# Schema: https://support.google.com/analytics/answer/3437719?hl=en
# For a completely unique visit-session ID, we combine combination of fullVisitorId and visitNumber:
(LEAD(hit_time, 1) OVER (PARTITION BY CONCAT(visitorId, visitNumber, latestContentId) ORDER BY hit_time ASC) - hit_time) AS session_duration
FROM CTE_visitor_content_time
)
-- Aggregate web stats
SELECT
visitorId,
latestContentId as contentId,
SUM(session_duration) AS session_duration
FROM
CTE_visitor_page_content
WHERE
latestContentId IS NOT NULL
GROUP BY
visitorId,
latestContentId
HAVING
session_duration > 0
df.head()
df.describe()
df[["session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5]);
```
## Scaling and clipping
Scale the duration by median and clip it to lie between [0,1]
```
%%bigquery
CREATE TEMPORARY FUNCTION CLIP_LESS(x FLOAT64, a FLOAT64) AS (
IF (x < a, a, x)
);
CREATE TEMPORARY FUNCTION CLIP_GT(x FLOAT64, b FLOAT64) AS (
IF (x > b, b, x)
);
CREATE TEMPORARY FUNCTION CLIP(x FLOAT64, a FLOAT64, b FLOAT64) AS (
CLIP_GT(CLIP_LESS(x, a), b)
);
CREATE OR REPLACE TABLE advdata.ga360_recommendations_data
AS
WITH CTE_visitor_page_content AS (
SELECT
# Schema: https://support.google.com/analytics/answer/3437719?hl=en
# For a completely unique visit-session ID, we combine combination of fullVisitorId and visitNumber:
CONCAT(fullVisitorID,'-',CAST(visitNumber AS STRING)) AS visitorId,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId,
(LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) - hits.time) AS session_duration
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
GROUP BY
fullVisitorId,
visitNumber,
latestContentId,
hits.time ),
aggregate_web_stats AS (
-- Aggregate web stats
SELECT
visitorId,
latestContentId as contentId,
SUM(session_duration) AS session_duration
FROM
CTE_visitor_page_content
WHERE
latestContentId IS NOT NULL
GROUP BY
visitorId,
latestContentId
HAVING
session_duration > 0
),
normalized_session_duration AS (
SELECT APPROX_QUANTILES(session_duration,100)[OFFSET(50)] AS median_duration
FROM aggregate_web_stats
)
SELECT
* EXCEPT(session_duration, median_duration),
CLIP(0.3 * session_duration / median_duration, 0, 1.0) AS normalized_session_duration
FROM
aggregate_web_stats, normalized_session_duration
%%bigquery df_scaled
SELECT * FROM advdata.ga360_recommendations_data
df_scaled[["normalized_session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5]);
df_scaled.head()
%%bash
cd ../flex_slots
./run_query_on_flex_slots.sh
%%bigquery
SELECT
visitorId,
ARRAY_AGG(STRUCT(contentId, predicted_normalized_session_duration)
ORDER BY predicted_normalized_session_duration DESC
LIMIT 3)
FROM ML.RECOMMEND(MODEL advdata.ga360_recommendations_model)
WHERE predicted_normalized_session_duration < 1
GROUP BY visitorId
LIMIT 5
```
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
<img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br>
# Python for Finance
**Analyze Big Financial Data**
O'Reilly (2014)
Yves Hilpisch
<img style="border:0px solid grey;" src="http://hilpisch.com/python_for_finance.png" alt="Python for Finance" width="30%" align="left" border="0">
**Buy the book ** |
<a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> |
<a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a>
**All book codes & IPYNBs** |
<a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a>
**The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a>
**Contact us** | <a href='mailto:pff@tpq.io'>pff@tpq.io</a>
# Volatility Options
```
from pylab import plt
plt.style.use('ggplot')
import matplotlib as mpl
mpl.rcParams['font.family'] = 'serif'
import warnings; warnings.simplefilter('ignore')
```
## The VSTOXX Data
```
import sys
sys.path.append('../python3/')
sys.path.append('../python3/dxa')
import numpy as np
import pandas as pd
```
### VSTOXX Index Data
```
url = 'http://www.stoxx.com/download/historical_values/h_vstoxx.txt'
vstoxx_index = pd.read_csv(url, index_col=0, header=2,
parse_dates=True, dayfirst=True,
sep=',')
vstoxx_index.info()
vstoxx_index = vstoxx_index[('2013/12/31' < vstoxx_index.index)
& (vstoxx_index.index < '2014/4/1')]
np.round(vstoxx_index.tail(), 2)
```
### VSTOXX Futures Data
```
vstoxx_futures = pd.read_excel('./source/vstoxx_march_2014.xlsx',
'vstoxx_futures')
vstoxx_futures.info()
del vstoxx_futures['A_SETTLEMENT_PRICE_SCALED']
del vstoxx_futures['A_CALL_PUT_FLAG']
del vstoxx_futures['A_EXERCISE_PRICE']
del vstoxx_futures['A_PRODUCT_ID']
columns = ['DATE', 'EXP_YEAR', 'EXP_MONTH', 'PRICE']
vstoxx_futures.columns = columns
import datetime as dt
import calendar
def third_friday(date):
day = 21 - (calendar.weekday(date.year, date.month, 1) + 2) % 7
return dt.datetime(date.year, date.month, day)
set(vstoxx_futures['EXP_MONTH'])
third_fridays = {}
for month in set(vstoxx_futures['EXP_MONTH']):
third_fridays[month] = third_friday(dt.datetime(2014, month, 1))
third_fridays
tf = lambda x: third_fridays[x]
vstoxx_futures['MATURITY'] = vstoxx_futures['EXP_MONTH'].apply(tf)
vstoxx_futures.tail()
```
### VSTOXX Options Data
```
vstoxx_options = pd.read_excel('./source/vstoxx_march_2014.xlsx',
'vstoxx_options')
vstoxx_options.info()
del vstoxx_options['A_SETTLEMENT_PRICE_SCALED']
del vstoxx_options['A_PRODUCT_ID']
columns = ['DATE', 'EXP_YEAR', 'EXP_MONTH', 'TYPE', 'STRIKE', 'PRICE']
vstoxx_options.columns = columns
vstoxx_options['MATURITY'] = vstoxx_options['EXP_MONTH'].apply(tf)
vstoxx_options.head()
vstoxx_options['STRIKE'] = vstoxx_options['STRIKE'] / 100.
save = False
if save is True:
import warnings
warnings.simplefilter('ignore')
h5 = pd.HDFStore('./source/vstoxx_march_2014.h5',
complevel=9, complib='blosc')
h5['vstoxx_index'] = vstoxx_index
h5['vstoxx_futures'] = vstoxx_futures
h5['vstoxx_options'] = vstoxx_options
h5.close()
```
## Model Calibration
### Relevant Market Data
```
pricing_date = dt.datetime(2014, 3, 31)
# last trading day in March 2014
maturity = third_fridays[10]
# October maturity
initial_value = vstoxx_index['V2TX'][pricing_date]
# VSTOXX on pricing_date
forward = vstoxx_futures[(vstoxx_futures.DATE == pricing_date)
& (vstoxx_futures.MATURITY == maturity)]['PRICE'].values[0]
tol = 0.20
option_selection = \
vstoxx_options[(vstoxx_options.DATE == pricing_date)
& (vstoxx_options.MATURITY == maturity)
& (vstoxx_options.TYPE == 'C')
& (vstoxx_options.STRIKE > (1 - tol) * forward)
& (vstoxx_options.STRIKE < (1 + tol) * forward)]
option_selection
```
### Option Modeling
```
from dxa import *
me_vstoxx = market_environment('me_vstoxx', pricing_date)
me_vstoxx.add_constant('initial_value', initial_value)
me_vstoxx.add_constant('final_date', maturity)
me_vstoxx.add_constant('currency', 'EUR')
me_vstoxx.add_constant('frequency', 'B')
me_vstoxx.add_constant('paths', 10000)
csr = constant_short_rate('csr', 0.01)
# somewhat arbitrarily chosen here
me_vstoxx.add_curve('discount_curve', csr)
# parameters to be calibrated later
me_vstoxx.add_constant('kappa', 1.0)
me_vstoxx.add_constant('theta', 1.2 * initial_value)
vol_est = vstoxx_index['V2TX'].std() \
* np.sqrt(len(vstoxx_index['V2TX']) / 252.)
me_vstoxx.add_constant('volatility', vol_est)
vol_est
vstoxx_model = square_root_diffusion('vstoxx_model', me_vstoxx)
me_vstoxx.add_constant('strike', forward)
me_vstoxx.add_constant('maturity', maturity)
payoff_func = 'np.maximum(maturity_value - strike, 0)'
vstoxx_eur_call = valuation_mcs_european('vstoxx_eur_call',
vstoxx_model, me_vstoxx, payoff_func)
vstoxx_eur_call.present_value()
option_models = {}
for option in option_selection.index:
strike = option_selection['STRIKE'].ix[option]
me_vstoxx.add_constant('strike', strike)
option_models[option] = \
valuation_mcs_european(
'eur_call_%d' % strike,
vstoxx_model,
me_vstoxx,
payoff_func)
def calculate_model_values(p0):
''' Returns all relevant option values.
Parameters
===========
p0 : tuple/list
tuple of kappa, theta, volatility
Returns
=======
model_values : dict
dictionary with model values
'''
kappa, theta, volatility = p0
vstoxx_model.update(kappa=kappa,
theta=theta,
volatility=volatility)
model_values = {}
for option in option_models:
model_values[option] = \
option_models[option].present_value(fixed_seed=True)
return model_values
calculate_model_values((0.5, 27.5, vol_est))
```
### Calibration Procedure
```
i = 0
def mean_squared_error(p0):
''' Returns the mean-squared error given
the model and market values.
Parameters
===========
p0 : tuple/list
tuple of kappa, theta, volatility
Returns
=======
MSE : float
mean-squared error
'''
global i
model_values = np.array(list(calculate_model_values(p0).values()))
market_values = option_selection['PRICE'].values
option_diffs = model_values - market_values
MSE = np.sum(option_diffs ** 2) / len(option_diffs)
# vectorized MSE calculation
if i % 20 == 0:
if i == 0:
print('%4s %6s %6s %6s --> %6s' %
('i', 'kappa', 'theta', 'vola', 'MSE'))
print('%4d %6.3f %6.3f %6.3f --> %6.3f' %
(i, p0[0], p0[1], p0[2], MSE))
i += 1
return MSE
mean_squared_error((0.5, 27.5, vol_est))
import scipy.optimize as spo
%%time
i = 0
opt_global = spo.brute(mean_squared_error,
((0.5, 3.01, 0.5), # range for kappa
(15., 30.1, 5.), # range for theta
(0.5, 5.51, 1)), # range for volatility
finish=None)
i = 0
mean_squared_error(opt_global)
%%time
i = 0
opt_local = spo.fmin(mean_squared_error, opt_global,
xtol=0.00001, ftol=0.00001,
maxiter=100, maxfun=350)
i = 0
mean_squared_error(opt_local)
calculate_model_values(opt_local)
pd.options.mode.chained_assignment = None
option_selection['MODEL'] = \
np.array(list(calculate_model_values(opt_local).values()))
option_selection['ERRORS'] = \
option_selection['MODEL'] - option_selection['PRICE']
option_selection[['MODEL', 'PRICE', 'ERRORS']]
round(option_selection['ERRORS'].mean(), 3)
import matplotlib.pyplot as plt
%matplotlib inline
fix, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(8, 8))
strikes = option_selection['STRIKE'].values
ax1.plot(strikes, option_selection['PRICE'], label='market quotes')
ax1.plot(strikes, option_selection['MODEL'], 'ro', label='model values')
ax1.set_ylabel('option values')
ax1.grid(True)
ax1.legend(loc=0)
wi = 0.25
ax2.bar(strikes - wi / 2., option_selection['ERRORS'],
label='market quotes', width=wi)
ax2.grid(True)
ax2.set_ylabel('differences')
ax2.set_xlabel('strikes')
# tag: vstoxx_calibration
# title: Calibrated model values for VSTOXX call options vs. market quotes
```
## American Options on the VSTOXX
### Modeling Option Positions
```
me_vstoxx = market_environment('me_vstoxx', pricing_date)
me_vstoxx.add_constant('initial_value', initial_value)
me_vstoxx.add_constant('final_date', pricing_date)
me_vstoxx.add_constant('currency', 'NONE')
# adding optimal parameters to environment
me_vstoxx.add_constant('kappa', opt_local[0])
me_vstoxx.add_constant('theta', opt_local[1])
me_vstoxx.add_constant('volatility', opt_local[2])
me_vstoxx.add_constant('model', 'srd')
payoff_func = 'np.maximum(strike - instrument_values, 0)'
shared = market_environment('share', pricing_date)
shared.add_constant('maturity', maturity)
shared.add_constant('currency', 'EUR')
option_positions = {}
# dictionary for option positions
option_environments = {}
# dictionary for option environments
for option in option_selection.index:
option_environments[option] = \
market_environment('am_put_%d' % option, pricing_date)
# define new option environment, one for each option
strike = option_selection['STRIKE'].ix[option]
# pick the relevant strike
option_environments[option].add_constant('strike', strike)
# add it to the environment
option_environments[option].add_environment(shared)
# add the shared data
option_positions['am_put_%d' % strike] = \
derivatives_position(
'am_put_%d' % strike,
quantity=100.,
underlying='vstoxx_model',
mar_env=option_environments[option],
otype='American',
payoff_func=payoff_func)
```
### The Options Portfolio
```
val_env = market_environment('val_env', pricing_date)
val_env.add_constant('starting_date', pricing_date)
val_env.add_constant('final_date', pricing_date)
# temporary value, is updated during valuation
val_env.add_curve('discount_curve', csr)
val_env.add_constant('frequency', 'B')
val_env.add_constant('paths', 25000)
underlyings = {'vstoxx_model' : me_vstoxx}
portfolio = derivatives_portfolio('portfolio', option_positions,
val_env, underlyings)
%time results = portfolio.get_statistics(fixed_seed=True)
results.sort_values(by='name')
results[['pos_value','pos_delta','pos_vega']].sum()
```
## Conclusions
## Further Reading
<img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br>
<a href="http://www.pythonquants.com" target="_blank">www.pythonquants.com</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a>
<a href="mailto:analytics@pythonquants.com">analytics@pythonquants.com</a>
**Python Quant Platform** |
<a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a>
**Derivatives Analytics with Python** |
<a href="http://www.derivatives-analytics-with-python.com" target="_blank">Derivatives Analytics @ Wiley Finance</a>
**Python for Finance** |
<a href="http://shop.oreilly.com/product/0636920032441.do" target="_blank">Python for Finance @ O'Reilly</a>
| github_jupyter |
# Training Deep Neural Networks
> Chapter 11
- permalink: /11_training_deep_neural_networks
_This notebook contains all the sample code and solutions to the exercises in chapter 11._
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
```
#collapse-show
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Vanishing/Exploding Gradients Problem
```
def logit(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
```
## Xavier and He Initialization
```
[name for name in dir(keras.initializers) if not name.startswith("_")]
keras.layers.Dense(10, activation="relu", kernel_initializer="he_normal")
init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',
distribution='uniform')
keras.layers.Dense(10, activation="relu", kernel_initializer=init)
```
## Nonsaturating Activation Functions
### Leaky ReLU
```
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
[m for m in dir(keras.activations) if not m.startswith("_")]
[m for m in dir(keras.layers) if "relu" in m.lower()]
```
Let's train a neural network on Fashion MNIST using the Leaky ReLU:
```
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
Now let's try PReLU:
```
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
### ELU
```
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
```
Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
```
keras.layers.Dense(10, activation="elu")
```
### SELU
This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ<sub>1</sub> or ℓ<sub>2</sub> regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
```
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title("SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
```
By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
```
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
```
Using SELU is easy:
```
keras.layers.Dense(10, activation="selu",
kernel_initializer="lecun_normal")
```
Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="selu",
kernel_initializer="lecun_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
```
Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
```
pixel_means = X_train.mean(axis=0, keepdims=True)
pixel_stds = X_train.std(axis=0, keepdims=True)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
```
Now look at what happens if we try to use the ReLU activation function instead:
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
```
Not great at all, we suffered from the vanishing/exploding gradients problem.
# Batch Normalization
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(100, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
bn1 = model.layers[1]
[(var.name, var.trainable) for var in bn1.variables]
bn1.updates
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(100, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
## Gradient Clipping
All Keras optimizers accept `clipnorm` or `clipvalue` arguments:
```
optimizer = keras.optimizers.SGD(clipvalue=1.0)
optimizer = keras.optimizers.SGD(clipnorm=1.0)
```
## Reusing Pretrained Layers
### Reusing a Keras model
Let's split the fashion MNIST training set in two:
* `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).
* `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.
The validation set and the test set are also split this way, but without restricting the number of images.
We will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).
```
def split_dataset(X, y):
y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts
y_A = y[~y_5_or_6]
y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7
y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?
return ((X[~y_5_or_6], y_A),
(X[y_5_or_6], y_B))
(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)
(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)
(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)
X_train_B = X_train_B[:200]
y_train_B = y_train_B[:200]
X_train_A.shape
X_train_B.shape
y_train_A[:30]
y_train_B[:30]
tf.random.set_seed(42)
np.random.seed(42)
model_A = keras.models.Sequential()
model_A.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_A.add(keras.layers.Dense(n_hidden, activation="selu"))
model_A.add(keras.layers.Dense(8, activation="softmax"))
model_A.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_A.fit(X_train_A, y_train_A, epochs=20,
validation_data=(X_valid_A, y_valid_A))
model_A.save("my_model_A.h5")
model_B = keras.models.Sequential()
model_B.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_B.add(keras.layers.Dense(n_hidden, activation="selu"))
model_B.add(keras.layers.Dense(1, activation="sigmoid"))
model_B.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B.fit(X_train_B, y_train_B, epochs=20,
validation_data=(X_valid_B, y_valid_B))
model.summary()
model_A = keras.models.load_model("my_model_A.h5")
model_B_on_A = keras.models.Sequential(model_A.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
model_A_clone = keras.models.clone_model(model_A)
model_A_clone.set_weights(model_A.get_weights())
for layer in model_B_on_A.layers[:-1]:
layer.trainable = False
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,
validation_data=(X_valid_B, y_valid_B))
for layer in model_B_on_A.layers[:-1]:
layer.trainable = True
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,
validation_data=(X_valid_B, y_valid_B))
```
So, what's the final verdict?
```
model_B.evaluate(X_test_B, y_test_B)
model_B_on_A.evaluate(X_test_B, y_test_B)
```
Great! We got quite a bit of transfer: the error rate dropped by a factor of almost 4!
```
(100 - 97.05) / (100 - 99.25)
```
# Faster Optimizers
## Momentum optimization
```
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9)
```
## Nesterov Accelerated Gradient
```
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
```
## AdaGrad
```
optimizer = keras.optimizers.Adagrad(lr=0.001)
```
## RMSProp
```
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9)
```
## Adam Optimization
```
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Adamax Optimization
```
optimizer = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Nadam Optimization
```
optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Learning Rate Scheduling
### Power Scheduling
```lr = lr0 / (1 + steps / s)**c```
* Keras uses `c=1` and `s = 1 / decay`
```
optimizer = keras.optimizers.SGD(lr=0.01, decay=1e-4)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
learning_rate = 0.01
decay = 1e-4
batch_size = 32
n_steps_per_epoch = len(X_train) // batch_size
epochs = np.arange(n_epochs)
lrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)
plt.plot(epochs, lrs, "o-")
plt.axis([0, n_epochs - 1, 0, 0.01])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Power Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
### Exponential Scheduling
```lr = lr0 * 0.1**(epoch / s)```
```
def exponential_decay_fn(epoch):
return 0.01 * 0.1**(epoch / 20)
def exponential_decay(lr0, s):
def exponential_decay_fn(epoch):
return lr0 * 0.1**(epoch / s)
return exponential_decay_fn
exponential_decay_fn = exponential_decay(lr0=0.01, s=20)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
The schedule function can take the current learning rate as a second argument:
```
def exponential_decay_fn(epoch, lr):
return lr * 0.1**(1 / 20)
```
If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:
```
K = keras.backend
class ExponentialDecay(keras.callbacks.Callback):
def __init__(self, s=40000):
super().__init__()
self.s = s
def on_batch_begin(self, batch, logs=None):
# Note: the `batch` argument is reset at each epoch
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr * 0.1**(1 / s))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
lr0 = 0.01
optimizer = keras.optimizers.Nadam(lr=lr0)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
exp_decay = ExponentialDecay(s)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[exp_decay])
n_steps = n_epochs * len(X_train) // 32
steps = np.arange(n_steps)
lrs = lr0 * 0.1**(steps / s)
plt.plot(steps, lrs, "-", linewidth=2)
plt.axis([0, n_steps - 1, 0, lr0 * 1.1])
plt.xlabel("Batch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling (per batch)", fontsize=14)
plt.grid(True)
plt.show()
```
### Piecewise Constant Scheduling
```
def piecewise_constant_fn(epoch):
if epoch < 5:
return 0.01
elif epoch < 15:
return 0.005
else:
return 0.001
def piecewise_constant(boundaries, values):
boundaries = np.array([0] + boundaries)
values = np.array(values)
def piecewise_constant_fn(epoch):
return values[np.argmax(boundaries > epoch) - 1]
return piecewise_constant_fn
piecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])
lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Piecewise Constant Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
### Performance Scheduling
```
tf.random.set_seed(42)
np.random.seed(42)
lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.02, momentum=0.9)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "bo-")
plt.xlabel("Epoch")
plt.ylabel("Learning Rate", color='b')
plt.tick_params('y', colors='b')
plt.gca().set_xlim(0, n_epochs - 1)
plt.grid(True)
ax2 = plt.gca().twinx()
ax2.plot(history.epoch, history.history["val_loss"], "r^-")
ax2.set_ylabel('Validation Loss', color='r')
ax2.tick_params('y', colors='r')
plt.title("Reduce LR on Plateau", fontsize=14)
plt.show()
```
### tf.keras schedulers
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
For piecewise constant scheduling, try this:
```
learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],
values=[0.01, 0.005, 0.001])
```
### 1Cycle scheduling
```
#collapse-show
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
def find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):
init_weights = model.get_weights()
iterations = len(X) // batch_size * epochs
factor = np.exp(np.log(max_rate / min_rate) / iterations)
init_lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, min_rate)
exp_lr = ExponentialLearningRate(factor)
history = model.fit(X, y, epochs=epochs, batch_size=batch_size,
callbacks=[exp_lr])
K.set_value(model.optimizer.lr, init_lr)
model.set_weights(init_weights)
return exp_lr.rates, exp_lr.losses
def plot_lr_vs_loss(rates, losses):
plt.plot(rates, losses)
plt.gca().set_xscale('log')
plt.hlines(min(losses), min(rates), max(rates))
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
#collapse-show
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
rate = max(rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.lr, rate)
n_epochs = 25
onecycle = OneCycleScheduler(len(X_train) // batch_size * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
```
# Avoiding Overfitting Through Regularization
## $\ell_1$ and $\ell_2$ regularization
```
layer = keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
# or l1(0.1) for ℓ1 regularization with a factor or 0.1
# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(10, activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.01))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
from functools import partial
RegularizedDense = partial(keras.layers.Dense,
activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
RegularizedDense(300),
RegularizedDense(100),
RegularizedDense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
## Dropout
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(300, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
## Alpha Dropout
```
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 20
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.evaluate(X_train_scaled, y_train)
history = model.fit(X_train_scaled, y_train)
```
## MC Dropout
```
tf.random.set_seed(42)
np.random.seed(42)
y_probas = np.stack([model(X_test_scaled, training=True)
for sample in range(100)])
y_proba = y_probas.mean(axis=0)
y_std = y_probas.std(axis=0)
np.round(model.predict(X_test_scaled[:1]), 2)
np.round(y_probas[:, :1], 2)
np.round(y_proba[:1], 2)
y_std = y_probas.std(axis=0)
np.round(y_std[:1], 2)
y_pred = np.argmax(y_proba, axis=1)
accuracy = np.sum(y_pred == y_test) / len(y_test)
accuracy
class MCDropout(keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
tf.random.set_seed(42)
np.random.seed(42)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
mc_model.summary()
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
mc_model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
mc_model.set_weights(model.get_weights())
```
Now we can use the model with MC Dropout:
```
np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)
```
## Max norm
```
layer = keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
MaxNormDense = partial(keras.layers.Dense,
activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
MaxNormDense(300),
MaxNormDense(100),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
# Exercises
## 1. to 7.
See appendix A.
## 8. Deep Learning
### 8.1.
_Exercise: Build a DNN with five hidden layers of 100 neurons each, He initialization, and the ELU activation function._
### 8.2.
_Exercise: Using Adam optimization and early stopping, try training it on MNIST but only on digits 0 to 4, as we will use transfer learning for digits 5 to 9 in the next exercise. You will need a softmax output layer with five neurons, and as always make sure to save checkpoints at regular intervals and save the final model so you can reuse it later._
### 8.3.
_Exercise: Tune the hyperparameters using cross-validation and see what precision you can achieve._
### 8.4.
_Exercise: Now try adding Batch Normalization and compare the learning curves: is it converging faster than before? Does it produce a better model?_
### 8.5.
_Exercise: is the model overfitting the training set? Try adding dropout to every layer and try again. Does it help?_
## 9. Transfer learning
### 9.1.
_Exercise: create a new DNN that reuses all the pretrained hidden layers of the previous model, freezes them, and replaces the softmax output layer with a new one._
### 9.2.
_Exercise: train this new DNN on digits 5 to 9, using only 100 images per digit, and time how long it takes. Despite this small number of examples, can you achieve high precision?_
### 9.3.
_Exercise: try caching the frozen layers, and train the model again: how much faster is it now?_
### 9.4.
_Exercise: try again reusing just four hidden layers instead of five. Can you achieve a higher precision?_
### 9.5.
_Exercise: now unfreeze the top two hidden layers and continue training: can you get the model to perform even better?_
## 10. Pretraining on an auxiliary task
In this exercise you will build a DNN that compares two MNIST digit images and predicts whether they represent the same digit or not. Then you will reuse the lower layers of this network to train an MNIST classifier using very little training data.
### 10.1.
Exercise: _Start by building two DNNs (let's call them DNN A and B), both similar to the one you built earlier but without the output layer: each DNN should have five hidden layers of 100 neurons each, He initialization, and ELU activation. Next, add one more hidden layer with 10 units on top of both DNNs. You should use the `keras.layers.concatenate()` function to concatenate the outputs of both DNNs, then feed the result to the hidden layer. Finally, add an output layer with a single neuron using the logistic activation function._
### 10.2.
_Exercise: split the MNIST training set in two sets: split #1 should containing 55,000 images, and split #2 should contain contain 5,000 images. Create a function that generates a training batch where each instance is a pair of MNIST images picked from split #1. Half of the training instances should be pairs of images that belong to the same class, while the other half should be images from different classes. For each pair, the training label should be 0 if the images are from the same class, or 1 if they are from different classes._
### 10.3.
_Exercise: train the DNN on this training set. For each image pair, you can simultaneously feed the first image to DNN A and the second image to DNN B. The whole network will gradually learn to tell whether two images belong to the same class or not._
### 10.4.
_Exercise: now create a new DNN by reusing and freezing the hidden layers of DNN A and adding a softmax output layer on top with 10 neurons. Train this network on split #2 and see if you can achieve high performance despite having only 500 images per class._
| github_jupyter |
# Exploratory Data Analysis
ALS Hiring
A dataset simulating CRM data is available in these public AWS S3 files:
Constituent Information: https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons.csv
Constituent Email Addresses: https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email.csv
Boolean columns (including is_primary) in all of these datasets are 1/0 numeric values. 1 means True, 0 means False.
Constituent Subscription Status: https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email_chapter_subscription.csv
We only care about subscription statuses where chapter_id is 1.
If an email is not present in this table, it is assumed to still be subscribed where chapter_id is 1.
## IMPORTS
```
import pandas as pd
from pandas_profiling import ProfileReport
```
## Load CSVs into Dataframes
```
df_cons = pd.read_csv('https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons.csv')
df_cons_email = pd.read_csv('https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email.csv')
df_cons_email_sub = pd.read_csv('https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email_chapter_subscription.csv')
```
## Description, Summary Info, and nulls
### Constituents dataframe
```
df_cons.info()
df_cons.describe()
df_cons.isnull().sum()
```
### Email Info dataframe
```
df_cons_email.info()
df_cons_email.describe()
df_cons_email.isnull().sum()
```
### Constituents Email Subscriptions dataframe
```
df_cons_email_sub.info()
df_cons_email_sub.describe()
df_cons_email_sub.isnull().sum()
```
### Profile Reports
```
# ProfileReport(df_cons,
# title='Constituents')
# ProfileReport(df_cons_email,
# title='Constituent Email Info')
# ProfileReport(df_cons_email_sub,
# title='Constituent Email Subscription Info')
```
### Features List
```
df_cons.info()
df_cons_email.info()
df_cons_email_sub.info()
```
## Create people.csv
### Create smaller dataframes with only necessary features
```
people_df = pd.DataFrame(df_cons[['cons_id', 'source','create_dt','modified_dt']])
people_df
email_df = pd.DataFrame(df_cons_email[['cons_id', 'email', 'cons_email_id' ]])
email_df
sub_df = pd.DataFrame(df_cons_email_sub[['isunsub', 'chapter_id', 'cons_email_id']])
# "We only care about subscription statuses where chapter_id = 1 . "
# This also removes duplicate emails per constituent
sub_df = sub_df[sub_df['chapter_id'] == 1]
sub_df.info()
```
### Join the dataframes
```
## left outer join people_df to email_df
df2 = email_df.join(people_df.set_index('cons_id'), on='cons_id' )
df2
# Join subscription df to people/email df2.
ppl_df = df2.join(sub_df.set_index('cons_email_id'), on='cons_email_id' )
print(ppl_df.info())
pd.set_option('display.max_rows', 100)
ppl_df.head(100)
def EncodeTF(val_ue):
if val_ue == 1.0 :
return (True)
else :
return (False)
return (False)
ppl_df['isunsub']=ppl_df['isunsub'].apply(EncodeTF)
# drop unecessary columns
ppl_df.drop(columns=['cons_id', 'cons_email_id', 'chapter_id'], inplace=True)
ppl_df
## CONVERT to DATETIME (this can take a long time)
ppl_df[['create_dt','modified_dt']] = ppl_df[['create_dt','modified_dt']].apply(pd.to_datetime)
ppl_df.info()
ppl_df
# Write to CSV file
ppl_df.to_csv('people.csv', index=False, header=ppl_df.columns)
ppl_df.info()
```
## Create AQUISITION_FACTS.csv
```
temp_df = pd.DataFrame(ppl_df['create_dt']) # Start a df with just the create date
temp_df.rename(columns = {'create_dt': 'aquisition_date'}, inplace=True) # rename the new column (for csv heading requirement)
temp_df['aquisitions']=1 # add a column of 1's to count while resampling
temp_df
aq_df = temp_df.resample('D', on='aquisition_date').sum() # resample rows by 'D'ay and sum the 1's
aq_df.reset_index(inplace=True) # reset the index to get rid of extra id column
aq_df
# Write to CSV file
aq_df.to_csv('aquisition_facts.csv', index=False, header=aq_df.columns)
aq_df['aquisitions'].sum()
```
# UNUSED Cells
```
search_id = 57
print(email_df[email_df['cons_email_id'] == search_id])
print(sub_df[sub_df['cons_email_id'] == search_id])
search_email_id = int(email_df[email_df['cons_email_id'] == search_id]['cons_id'])
people_df[people_df['cons_id'] == search_email_id]
## pandas JOIN example
df2 = pd.DataFrame({'Reg_no': ['11', '12', '13', '14', '15', '16'],
'Result1': ['77', '79', '96', '38', '54', '69']})
df1 = pd.DataFrame({'Reg_no': ['11', '12', '13'],
'Result2': ['72', '82', '92']})
final_df = df1.join(df2.set_index('Reg_no'), on="Reg_no")
print(df1)
print(df2)
print(final_df)
```
| github_jupyter |
```
import os
from argparse import Namespace
from collections import Counter
import json
import re
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
"""
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token="<UNK>",
mask_token="<MASK>", begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super(SequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token
self._unk_token = unk_token
self._begin_seq_token = begin_seq_token
self._end_seq_token = end_seq_token
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class NMTVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, source_vocab, target_vocab, max_source_length, max_target_length):
"""
Args:
source_vocab (SequenceVocabulary): maps source words to integers
target_vocab (SequenceVocabulary): maps target words to integers
max_source_length (int): the longest sequence in the source dataset
max_target_length (int): the longest sequence in the target dataset
"""
self.source_vocab = source_vocab
self.target_vocab = target_vocab
self.max_source_length = max_source_length
self.max_target_length = max_target_length
def _vectorize(self, indices, vector_length=-1, mask_index=0):
"""Vectorize the provided indices
Args:
indices (list): a list of integers that represent a sequence
vector_length (int): an argument for forcing the length of index vector
mask_index (int): the mask_index to use; almost always 0
"""
if vector_length < 0:
vector_length = len(indices)
vector = np.zeros(vector_length, dtype=np.int64)
vector[:len(indices)] = indices
vector[len(indices):] = mask_index
return vector
def _get_source_indices(self, text):
"""Return the vectorized source text
Args:
text (str): the source text; tokens should be separated by spaces
Returns:
indices (list): list of integers representing the text
"""
indices = [self.source_vocab.begin_seq_index]
indices.extend(self.source_vocab.lookup_token(token) for token in text.split(" "))
indices.append(self.source_vocab.end_seq_index)
return indices
def _get_target_indices(self, text):
"""Return the vectorized source text
Args:
text (str): the source text; tokens should be separated by spaces
Returns:
a tuple: (x_indices, y_indices)
x_indices (list): list of integers representing the observations in target decoder
y_indices (list): list of integers representing predictions in target decoder
"""
indices = [self.target_vocab.lookup_token(token) for token in text.split(" ")]
x_indices = [self.target_vocab.begin_seq_index] + indices
y_indices = indices + [self.target_vocab.end_seq_index]
return x_indices, y_indices
def vectorize(self, source_text, target_text, use_dataset_max_lengths=True):
"""Return the vectorized source and target text
The vetorized source text is just the a single vector.
The vectorized target text is split into two vectors in a similar style to
the surname modeling in Chapter 7.
At each timestep, the first vector is the observation and the second vector is the target.
Args:
source_text (str): text from the source language
target_text (str): text from the target language
use_dataset_max_lengths (bool): whether to use the global max vector lengths
Returns:
The vectorized data point as a dictionary with the keys:
source_vector, target_x_vector, target_y_vector, source_length
"""
source_vector_length = -1
target_vector_length = -1
if use_dataset_max_lengths:
source_vector_length = self.max_source_length + 2
target_vector_length = self.max_target_length + 1
source_indices = self._get_source_indices(source_text)
source_vector = self._vectorize(source_indices,
vector_length=source_vector_length,
mask_index=self.source_vocab.mask_index)
target_x_indices, target_y_indices = self._get_target_indices(target_text)
target_x_vector = self._vectorize(target_x_indices,
vector_length=target_vector_length,
mask_index=self.target_vocab.mask_index)
target_y_vector = self._vectorize(target_y_indices,
vector_length=target_vector_length,
mask_index=self.target_vocab.mask_index)
return {"source_vector": source_vector,
"target_x_vector": target_x_vector,
"target_y_vector": target_y_vector,
"source_length": len(source_indices)}
@classmethod
def from_dataframe(cls, bitext_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
bitext_df (pandas.DataFrame): the parallel text dataset
Returns:
an instance of the NMTVectorizer
"""
source_vocab = SequenceVocabulary()
target_vocab = SequenceVocabulary()
max_source_length = 0
max_target_length = 0
for _, row in bitext_df.iterrows():
source_tokens = row["source_language"].split(" ")
if len(source_tokens) > max_source_length:
max_source_length = len(source_tokens)
for token in source_tokens:
source_vocab.add_token(token)
target_tokens = row["target_language"].split(" ")
if len(target_tokens) > max_target_length:
max_target_length = len(target_tokens)
for token in target_tokens:
target_vocab.add_token(token)
return cls(source_vocab, target_vocab, max_source_length, max_target_length)
@classmethod
def from_serializable(cls, contents):
source_vocab = SequenceVocabulary.from_serializable(contents["source_vocab"])
target_vocab = SequenceVocabulary.from_serializable(contents["target_vocab"])
return cls(source_vocab=source_vocab,
target_vocab=target_vocab,
max_source_length=contents["max_source_length"],
max_target_length=contents["max_target_length"])
def to_serializable(self):
return {"source_vocab": self.source_vocab.to_serializable(),
"target_vocab": self.target_vocab.to_serializable(),
"max_source_length": self.max_source_length,
"max_target_length": self.max_target_length}
class NMTDataset(Dataset):
def __init__(self, text_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.text_df = text_df
self._vectorizer = vectorizer
self.train_df = self.text_df[self.text_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.text_df[self.text_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.text_df[self.text_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
@classmethod
def load_dataset_and_make_vectorizer(cls, dataset_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
text_df = pd.read_csv(dataset_csv)
train_subset = text_df[text_df.split=='train']
return cls(text_df, NMTVectorizer.from_dataframe(train_subset))
@classmethod
def load_dataset_and_load_vectorizer(cls, dataset_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
text_df = pd.read_csv(dataset_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(text_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return NMTVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point: (x_data, y_target, class_index)
"""
row = self._target_df.iloc[index]
vector_dict = self._vectorizer.vectorize(row.source_language, row.target_language)
return {"x_source": vector_dict["source_vector"],
"x_target": vector_dict["target_x_vector"],
"y_target": vector_dict["target_y_vector"],
"x_source_length": vector_dict["source_length"]}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_nmt_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""A generator function which wraps the PyTorch DataLoader. The NMT Version """
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
lengths = data_dict['x_source_length'].numpy()
sorted_length_indices = lengths.argsort()[::-1].tolist()
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name][sorted_length_indices].to(device)
yield out_data_dict
```
## Neural Machine Translation Model
Components:
1. NMTEncoder
- accepts as input a source sequence to be embedded and fed through a bi-directional GRU
2. NMTDecoder
- using the encoder state and attention, the decoder generates a new sequence
- the ground truth target sequence is used as input to the decoder at each time step
- an alternative formulation would allow some of the decoder's own choices to be used as input
- this is referred to as curriculum learning, learning to search
- TODO: Look up references for this. I believe Bengio has a paper from the image captioning competitions. Hal Daume has tons on this and is the main NLP guy for it.
3. NMTModel
- Combines the encoder and decoder into a single class.
```
class NMTEncoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, rnn_hidden_size):
"""
Args:
num_embeddings (int): number of embeddings is the size of source vocabulary
embedding_size (int): size of the embedding vectors
rnn_hidden_size (int): size of the RNN hidden state vectors
"""
super(NMTEncoder, self).__init__()
self.source_embedding = nn.Embedding(num_embeddings, embedding_size, padding_idx=0)
self.birnn = nn.GRU(embedding_size, rnn_hidden_size, bidirectional=True, batch_first=True)
def forward(self, x_source, x_lengths):
"""The forward pass of the model
Args:
x_source (torch.Tensor): the input data tensor.
x_source.shape is (batch, seq_size)
x_lengths (torch.Tensor): a vector of lengths for each item in the batch
Returns:
a tuple: x_unpacked (torch.Tensor), x_birnn_h (torch.Tensor)
x_unpacked.shape = (batch, seq_size, rnn_hidden_size * 2)
x_birnn_h.shape = (batch, rnn_hidden_size * 2)
"""
x_embedded = self.source_embedding(x_source)
# create PackedSequence; x_packed.data.shape=(number_items, embeddign_size)
x_packed = pack_padded_sequence(x_embedded, x_lengths.detach().cpu().numpy(),
batch_first=True)
# x_birnn_h.shape = (num_rnn, batch_size, feature_size)
x_birnn_out, x_birnn_h = self.birnn(x_packed)
# permute to (batch_size, num_rnn, feature_size)
x_birnn_h = x_birnn_h.permute(1, 0, 2)
# flatten features; reshape to (batch_size, num_rnn * feature_size)
# (recall: -1 takes the remaining positions,
# flattening the two RNN hidden vectors into 1)
x_birnn_h = x_birnn_h.contiguous().view(x_birnn_h.size(0), -1)
x_unpacked, _ = pad_packed_sequence(x_birnn_out, batch_first=True)
return x_unpacked, x_birnn_h
def verbose_attention(encoder_state_vectors, query_vector):
"""A descriptive version of the neural attention mechanism
Args:
encoder_state_vectors (torch.Tensor): 3dim tensor from bi-GRU in encoder
query_vector (torch.Tensor): hidden state in decoder GRU
Returns:
"""
batch_size, num_vectors, vector_size = encoder_state_vectors.size()
vector_scores = torch.sum(encoder_state_vectors * query_vector.view(batch_size, 1, vector_size),
dim=2)
vector_probabilities = F.softmax(vector_scores, dim=1)
weighted_vectors = encoder_state_vectors * vector_probabilities.view(batch_size, num_vectors, 1)
context_vectors = torch.sum(weighted_vectors, dim=1)
return context_vectors, vector_probabilities, vector_scores
def terse_attention(encoder_state_vectors, query_vector):
"""A shorter and more optimized version of the neural attention mechanism
Args:
encoder_state_vectors (torch.Tensor): 3dim tensor from bi-GRU in encoder
query_vector (torch.Tensor): hidden state
"""
vector_scores = torch.matmul(encoder_state_vectors, query_vector.unsqueeze(dim=2)).squeeze()
vector_probabilities = F.softmax(vector_scores, dim=-1)
context_vectors = torch.matmul(encoder_state_vectors.transpose(-2, -1),
vector_probabilities.unsqueeze(dim=2)).squeeze()
return context_vectors, vector_probabilities
class NMTDecoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, rnn_hidden_size, bos_index):
"""
Args:
num_embeddings (int): number of embeddings is also the number of
unique words in target vocabulary
embedding_size (int): the embedding vector size
rnn_hidden_size (int): size of the hidden rnn state
bos_index(int): begin-of-sequence index
"""
super(NMTDecoder, self).__init__()
self._rnn_hidden_size = rnn_hidden_size
self.target_embedding = nn.Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_size,
padding_idx=0)
self.gru_cell = nn.GRUCell(embedding_size + rnn_hidden_size,
rnn_hidden_size)
self.hidden_map = nn.Linear(rnn_hidden_size, rnn_hidden_size)
self.classifier = nn.Linear(rnn_hidden_size * 2, num_embeddings)
self.bos_index = bos_index
self._sampling_temperature = 3
def _init_indices(self, batch_size):
""" return the BEGIN-OF-SEQUENCE index vector """
return torch.ones(batch_size, dtype=torch.int64) * self.bos_index
def _init_context_vectors(self, batch_size):
""" return a zeros vector for initializing the context """
return torch.zeros(batch_size, self._rnn_hidden_size)
def forward(self, encoder_state, initial_hidden_state, target_sequence, sample_probability=0.0):
"""The forward pass of the model
Args:
encoder_state (torch.Tensor): the output of the NMTEncoder
initial_hidden_state (torch.Tensor): The last hidden state in the NMTEncoder
target_sequence (torch.Tensor): the target text data tensor
sample_probability (float): the schedule sampling parameter
probabilty of using model's predictions at each decoder step
Returns:
output_vectors (torch.Tensor): prediction vectors at each output step
"""
if target_sequence is None:
sample_probability = 1.0
else:
# We are making an assumption there: The batch is on first
# The input is (Batch, Seq)
# We want to iterate over sequence so we permute it to (S, B)
target_sequence = target_sequence.permute(1, 0)
output_sequence_size = target_sequence.size(0)
# use the provided encoder hidden state as the initial hidden state
h_t = self.hidden_map(initial_hidden_state)
batch_size = encoder_state.size(0)
# initialize context vectors to zeros
context_vectors = self._init_context_vectors(batch_size)
# initialize first y_t word as BOS
y_t_index = self._init_indices(batch_size)
h_t = h_t.to(encoder_state.device)
y_t_index = y_t_index.to(encoder_state.device)
context_vectors = context_vectors.to(encoder_state.device)
output_vectors = []
self._cached_p_attn = []
self._cached_ht = []
self._cached_decoder_state = encoder_state.cpu().detach().numpy()
for i in range(output_sequence_size):
# Schedule sampling is whe
use_sample = np.random.random() < sample_probability
if not use_sample:
y_t_index = target_sequence[i]
# Step 1: Embed word and concat with previous context
y_input_vector = self.target_embedding(y_t_index)
rnn_input = torch.cat([y_input_vector, context_vectors], dim=1)
# Step 2: Make a GRU step, getting a new hidden vector
h_t = self.gru_cell(rnn_input, h_t)
self._cached_ht.append(h_t.cpu().detach().numpy())
# Step 3: Use the current hidden to attend to the encoder state
context_vectors, p_attn, _ = verbose_attention(encoder_state_vectors=encoder_state,
query_vector=h_t)
# auxillary: cache the attention probabilities for visualization
self._cached_p_attn.append(p_attn.cpu().detach().numpy())
# Step 4: Use the current hidden and context vectors to make a prediction to the next word
prediction_vector = torch.cat((context_vectors, h_t), dim=1)
score_for_y_t_index = self.classifier(F.dropout(prediction_vector, 0.3))
if use_sample:
p_y_t_index = F.softmax(score_for_y_t_index * self._sampling_temperature, dim=1)
# _, y_t_index = torch.max(p_y_t_index, 1)
y_t_index = torch.multinomial(p_y_t_index, 1).squeeze()
# auxillary: collect the prediction scores
output_vectors.append(score_for_y_t_index)
output_vectors = torch.stack(output_vectors).permute(1, 0, 2)
return output_vectors
class NMTModel(nn.Module):
""" The Neural Machine Translation Model """
def __init__(self, source_vocab_size, source_embedding_size,
target_vocab_size, target_embedding_size, encoding_size,
target_bos_index):
"""
Args:
source_vocab_size (int): number of unique words in source language
source_embedding_size (int): size of the source embedding vectors
target_vocab_size (int): number of unique words in target language
target_embedding_size (int): size of the target embedding vectors
encoding_size (int): the size of the encoder RNN.
"""
super(NMTModel, self).__init__()
self.encoder = NMTEncoder(num_embeddings=source_vocab_size,
embedding_size=source_embedding_size,
rnn_hidden_size=encoding_size)
decoding_size = encoding_size * 2
self.decoder = NMTDecoder(num_embeddings=target_vocab_size,
embedding_size=target_embedding_size,
rnn_hidden_size=decoding_size,
bos_index=target_bos_index)
def forward(self, x_source, x_source_lengths, target_sequence, sample_probability=0.0):
"""The forward pass of the model
Args:
x_source (torch.Tensor): the source text data tensor.
x_source.shape should be (batch, vectorizer.max_source_length)
x_source_lengths torch.Tensor): the length of the sequences in x_source
target_sequence (torch.Tensor): the target text data tensor
sample_probability (float): the schedule sampling parameter
probabilty of using model's predictions at each decoder step
Returns:
decoded_states (torch.Tensor): prediction vectors at each output step
"""
encoder_state, final_hidden_states = self.encoder(x_source, x_source_lengths)
decoded_states = self.decoder(encoder_state=encoder_state,
initial_hidden_state=final_hidden_states,
target_sequence=target_sequence,
sample_probability=sample_probability)
return decoded_states
```
## Training Routine and Bookkeeping Functions
```
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= loss_tm1:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['early_stopping_best_val'] = loss_t
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def normalize_sizes(y_pred, y_true):
"""Normalize tensor sizes
Args:
y_pred (torch.Tensor): the output of the model
If a 3-dimensional tensor, reshapes to a matrix
y_true (torch.Tensor): the target predictions
If a matrix, reshapes to be a vector
"""
if len(y_pred.size()) == 3:
y_pred = y_pred.contiguous().view(-1, y_pred.size(2))
if len(y_true.size()) == 2:
y_true = y_true.contiguous().view(-1)
return y_pred, y_true
def compute_accuracy(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
_, y_pred_indices = y_pred.max(dim=1)
correct_indices = torch.eq(y_pred_indices, y_true).float()
valid_indices = torch.ne(y_true, mask_index).float()
n_correct = (correct_indices * valid_indices).sum().item()
n_valid = valid_indices.sum().item()
return n_correct / n_valid * 100
def sequence_loss(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
return F.cross_entropy(y_pred, y_true, ignore_index=mask_index)
args = Namespace(dataset_csv="data/nmt/simplest_eng_fra.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch8/nmt_luong_sampling",
reload_from_files=False,
expand_filepaths_to_save_dir=True,
cuda=True,
seed=1337,
learning_rate=5e-4,
batch_size=32,
num_epochs=100,
early_stopping_criteria=5,
source_embedding_size=24,
target_embedding_size=24,
encoding_size=32,
catch_keyboard_interrupt=True)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
if args.reload_from_files and os.path.exists(args.vectorizer_file):
# training from a checkpoint
dataset = NMTDataset.load_dataset_and_load_vectorizer(args.dataset_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
dataset = NMTDataset.load_dataset_and_make_vectorizer(args.dataset_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
model = NMTModel(source_vocab_size=len(vectorizer.source_vocab),
source_embedding_size=args.source_embedding_size,
target_vocab_size=len(vectorizer.target_vocab),
target_embedding_size=args.target_embedding_size,
encoding_size=args.encoding_size,
target_bos_index=vectorizer.target_vocab.begin_seq_index)
if args.reload_from_files and os.path.exists(args.model_state_file):
model.load_state_dict(torch.load(args.model_state_file))
print("Reloaded model")
else:
print("New model")
model = model.to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
mask_index = vectorizer.target_vocab.mask_index
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
sample_probability = (20 + epoch_index) / args.num_epochs
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
model.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = model(batch_dict['x_source'],
batch_dict['x_source_length'],
batch_dict['x_target'],
sample_probability=sample_probability)
# step 3. compute the loss
loss = sequence_loss(y_pred, batch_dict['y_target'], mask_index)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the running loss and running accuracy
running_loss += (loss.item() - running_loss) / (batch_index + 1)
acc_t = compute_accuracy(y_pred, batch_dict['y_target'], mask_index)
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = model(batch_dict['x_source'],
batch_dict['x_source_length'],
batch_dict['x_target'],
sample_probability=sample_probability)
# step 3. compute the loss
loss = sequence_loss(y_pred, batch_dict['y_target'], mask_index)
# compute the running loss and accuracy
running_loss += (loss.item() - running_loss) / (batch_index + 1)
acc_t = compute_accuracy(y_pred, batch_dict['y_target'], mask_index)
running_acc += (acc_t - running_acc) / (batch_index + 1)
# Update bar
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=model,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'])
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
from nltk.translate import bleu_score
import seaborn as sns
import matplotlib.pyplot as plt
chencherry = bleu_score.SmoothingFunction()
def sentence_from_indices(indices, vocab, strict=True, return_string=True):
ignore_indices = set([vocab.mask_index, vocab.begin_seq_index, vocab.end_seq_index])
out = []
for index in indices:
if index == vocab.begin_seq_index and strict:
continue
elif index == vocab.end_seq_index and strict:
break
else:
out.append(vocab.lookup_index(index))
if return_string:
return " ".join(out)
else:
return out
class NMTSampler:
def __init__(self, vectorizer, model):
self.vectorizer = vectorizer
self.model = model
def apply_to_batch(self, batch_dict):
self._last_batch = batch_dict
y_pred = self.model(x_source=batch_dict['x_source'],
x_source_lengths=batch_dict['x_source_length'],
target_sequence=batch_dict['x_target'])
self._last_batch['y_pred'] = y_pred
attention_batched = np.stack(self.model.decoder._cached_p_attn).transpose(1, 0, 2)
self._last_batch['attention'] = attention_batched
def _get_source_sentence(self, index, return_string=True):
indices = self._last_batch['x_source'][index].cpu().detach().numpy()
vocab = self.vectorizer.source_vocab
return sentence_from_indices(indices, vocab, return_string=return_string)
def _get_reference_sentence(self, index, return_string=True):
indices = self._last_batch['y_target'][index].cpu().detach().numpy()
vocab = self.vectorizer.target_vocab
return sentence_from_indices(indices, vocab, return_string=return_string)
def _get_sampled_sentence(self, index, return_string=True):
_, all_indices = torch.max(self._last_batch['y_pred'], dim=2)
sentence_indices = all_indices[index].cpu().detach().numpy()
vocab = self.vectorizer.target_vocab
return sentence_from_indices(sentence_indices, vocab, return_string=return_string)
def get_ith_item(self, index, return_string=True):
output = {"source": self._get_source_sentence(index, return_string=return_string),
"reference": self._get_reference_sentence(index, return_string=return_string),
"sampled": self._get_sampled_sentence(index, return_string=return_string),
"attention": self._last_batch['attention'][index]}
reference = output['reference']
hypothesis = output['sampled']
if not return_string:
reference = " ".join(reference)
hypothesis = " ".join(hypothesis)
output['bleu-4'] = bleu_score.sentence_bleu(references=[reference],
hypothesis=hypothesis,
smoothing_function=chencherry.method1)
return output
model = model.eval().to(args.device)
sampler = NMTSampler(vectorizer, model)
dataset.set_split('test')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
test_results = []
for batch_dict in batch_generator:
sampler.apply_to_batch(batch_dict)
for i in range(args.batch_size):
test_results.append(sampler.get_ith_item(i, False))
plt.hist([r['bleu-4'] for r in test_results], bins=100);
np.mean([r['bleu-4'] for r in test_results]), np.median([r['bleu-4'] for r in test_results])
dataset.set_split('val')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
batch_dict = next(batch_generator)
model = model.eval().to(args.device)
sampler = NMTSampler(vectorizer, model)
sampler.apply_to_batch(batch_dict)
all_results = []
for i in range(args.batch_size):
all_results.append(sampler.get_ith_item(i, False))
top_results = [x for x in all_results if x['bleu-4']>0.5]
len(top_results)
for sample in top_results:
plt.figure()
target_len = len(sample['sampled'])
source_len = len(sample['source'])
attention_matrix = sample['attention'][:target_len, :source_len+2].transpose()#[::-1]
ax = sns.heatmap(attention_matrix, center=0.0)
ylabs = ["<BOS>"]+sample['source']+["<EOS>"]
#ylabs = sample['source']
#ylabs = ylabs[::-1]
ax.set_yticklabels(ylabs, rotation=0)
ax.set_xticklabels(sample['sampled'], rotation=90)
ax.set_xlabel("Target Sentence")
ax.set_ylabel("Source Sentence\n\n")
def get_source_sentence(vectorizer, batch_dict, index):
indices = batch_dict['x_source'][index].cpu().data.numpy()
vocab = vectorizer.source_vocab
return sentence_from_indices(indices, vocab)
def get_true_sentence(vectorizer, batch_dict, index):
return sentence_from_indices(batch_dict['y_target'].cpu().data.numpy()[index], vectorizer.target_vocab)
def get_sampled_sentence(vectorizer, batch_dict, index):
y_pred = model(x_source=batch_dict['x_source'],
x_source_lengths=batch_dict['x_source_length'],
target_sequence=batch_dict['x_target'],
sample_probability=1.0)
return sentence_from_indices(torch.max(y_pred, dim=2)[1].cpu().data.numpy()[index], vectorizer.target_vocab)
def get_all_sentences(vectorizer, batch_dict, index):
return {"source": get_source_sentence(vectorizer, batch_dict, index),
"truth": get_true_sentence(vectorizer, batch_dict, index),
"sampled": get_sampled_sentence(vectorizer, batch_dict, index)}
def sentence_from_indices(indices, vocab, strict=True):
ignore_indices = set([vocab.mask_index, vocab.begin_seq_index, vocab.end_seq_index])
out = []
for index in indices:
if index == vocab.begin_seq_index and strict:
continue
elif index == vocab.end_seq_index and strict:
return " ".join(out)
else:
out.append(vocab.lookup_index(index))
return " ".join(out)
results = get_all_sentences(vectorizer, batch_dict, 1)
results
```
| github_jupyter |
```
import jax.numpy as np
# from jax.config import config; config.update("jax_enable_x64", True)
from jax import jacfwd, jacrev, hessian
import numpy as onp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import xara
import xaosim as xs
from xaosim.pupil import PHARO
from scipy.ndimage import fourier_shift
import morphine
import astropy.io.fits as fits
from tqdm import tqdm
import pickle, gzip, glob
%matplotlib inline
import matplotlib as mpl
mpl.style.use('seaborn-colorblind')
phasemap = mpl.cm.rainbow
phasemap.set_bad(color='k')
#To make sure we have always the same matplotlib settings
#(the ones in comments are the ipython notebook settings)
mpl.rcParams['figure.figsize']=(12.0,9.0) #(6.0,4.0)
mpl.rcParams['font.size']=20 #10
mpl.rcParams['savefig.dpi']= 200 #72
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
from matplotlib import rc
mpl.rcParams["font.family"] = "Times New Roman"
colours = mpl.rcParams['axes.prop_cycle'].by_key()['color']
from astropy import units as units
fftshift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
fftfreq = np.fft.fftfreq
dtor = np.pi/180.0
import warnings
warnings.filterwarnings("ignore")
import os
from jax.config import config; config.update("jax_enable_x64", True)
# ddir = os.path.abspath(os.path.dirname("__file__"))
ddir = '/Users/benjaminpope/code/xara/xara/'
# -------------------------------
# 1. create the KP info structure
# -------------------------------
# once saved, the kpi.gz structure can be directly reloaded when
# creating a KPO instance, such as done in step #2.
a = xara.KPI(ddir+"hst.txt")
a.name = "HST - NIC1" # # add a label to the template
a.save_to_file('./hst.kpi.gz')
ls ../data/
fits.getheader('HSTbinA.fits')
def shift(im,dx,dy):
input_ = onp.fft.fft2(im)
result = fourier_shift(input_, shift=(dx,dy))
return onp.real(onp.fft.ifft2(result))
def sim_binary(im,sep,th,con,pscale):
sep_pix = sep/pscale
th_rad = np.pi*th/180.
dx, dy = sep_pix*np.cos(-th_rad), sep_pix*np.sin(-th_rad)
shifted = shift(im,dx,dy)
return im+shifted/con
def preprocess_like(data,kpo):
isz = 128
xsz,ysz = data.shape
wrad = 50
pscale = 43.1 # plate scale (mas)
cwavel = kpo.CWAVEL # central wavelength
isz = 128 # chop image
m2pix = xara.core.mas2rad(pscale)*isz/cwavel # Fourier scaling
tdiam = 2.4 # telescope diameter (m)
spix = xara.core.rad2mas(cwavel/tdiam)/pscale # image sampling (pixels)
(x0, y0) = xara.core.determine_origin(data, mask=None,
algo="BCEN", verbose=False,
wmin=2.0*spix)
x1, y1 = int(x0-isz/2), int(y0-isz/2)
img = data[y1:y1+isz, x1:x1+isz] # image is now (isz x isz)
dy, dx = (y0-ysz/2), (x0-xsz/2)
sgmask = xara.core.super_gauss(isz, isz, isz/2, isz/2, wrad)
(x0, y0) = xara.core.determine_origin(img, mask=sgmask,
algo="BCEN", verbose=False,
wmin=2.0*spix)
img = xara.core.recenter(data,verbose=False)
nx,ny = img.shape
limsx = int(nx/2-64), int(nx/2+64)
limsy = int(ny/2-64), int(ny/2+64)
img = img[limsx[0]:limsx[1],limsy[0]:limsy[1]] # from 512x512 -> 128x128
return img
# -------------------
# 2. load the dataset
# -------------------
# load the FITS frame, and extract the Kernel-phases using the
# HST KPI template calculated at the previous step.
# Two data sets are provided:
# n8yj59010_mos.fits.gz and 'n8yj59020_mos.fits.gz
fname = 'HSTbinA.fits'
tgt_cube = fits.getdata(fname) # alpha Ophiuchi
band = 'H'
if band == 'J':
fname_b = "../data/n8yj67020/n8yj67020_mos.fits" # pope 2013 - calibrator - J band
fname_b = '../data/n8yj66020_mos.fits.gz' # pope 2013 - calibrator - 2M 1221
fname_kerphi = 'hstmorphine_32bit_64pix_J.kpi.gz'
elif band == 'H':
fname_b = "../data/n8yj67010/n8yj67010_mos.fits" # pope 2013 - calibrator - H band - 2M1705
# fname_b = "../data/n8yj56010/n8yj56010_mos.fits.gz" # pope 2013 - calibrator - 2M 1105
# fname_source = '../data/n8yj66010_mos.fits.gz' # pope 2013 - calibrator - 2M 1221 - has a faint binary
fname_source = '../data/n8yj52010_mos.fits.gz' # pope 2013 - calibrator - 2M 0911 - clipped
fname_source = '../data/n9nk02010_mos.fits.gz' # pope 2013 - calibrator - 2M 0228 - ok
# fname_source = '../data/n9nk28010_mos.fits.gz' # pope 2013 - calibrator - 2M 1421
# fname_b = '../data/n8yj17010_mos.fits.gz' # pope 2013 - calibrator - 2M 1731 - has a faint binary
fname_kerphi = 'hstmorphine_32bit_64pix.kpi.gz'
fname_kerphi = 'hstmorphine_32bit_64pix_H.kpi.gz'
fname_kerphi = 'hstmorphine_32bit_96pix_H.kpi.gz'
# fname_kerphi = 'hstmorphine_32bit_128pix_H.kpi.gz'
# fname_kerphi = 'hstmorphine_64bit_128pix_H.kpi.gz'
# fname_kerphi = 'hstmorphine_32bit_128pix_H.kpi.gz'
# fname = ddir+'/n8yj59010_mos.fits.gz' # frantz example
# fname_kerphi = './hst.kpi.gz'
# a = xara.KPO(fname_kerphi,offset=0.5)#xara.KPO(fname_kerphi,offset=0.5)
c = xara.KPO(fname_kerphi,offset=0.5)#xara.KPO(fname_kerphi,offset=0.5)
myf = gzip.open(fname_kerphi, "r")
stuff = pickle.load(myf)
myf.close()
c.M2PIX = xara.core.mas2rad(43.1) * 128 / 1.7057900000000002e-06
c.FF = stuff['dft']
b = c.copy()
dir_b = '../data/hst/cal/f170/*'
fnames_b = glob.glob(dir_b)
calibrators = []
otfs = []
b = c.copy()
for fb in fnames_b:
if fname_source[8:16] in fb:
continue
try:
b = c.copy()
# b.extract_KPD(fname_b, wrad=50) # calibrator
b.extract_KPD(fb,wrad=50)
calibrators.append(b.KPDT[0][0])
otf = np.abs(b.CVIS[0][0])
otfs.append(otf/otf.max())
except:
print('failed on',fb)
calibrators = np.array(calibrators)
otfs = np.array(otfs)
pscale=b.PSCALE
wl = b.CWAVEL
print('Done calibrator')
cal = fits.getdata(fname_source).astype('float64')
# good_calibs = (onp.nanstd(calibrators,axis=1)<1)*(onp.nanstd(calibrators,axis=1)<0.02)*(onp.nanstd(otfs,axis=1)>0.1)
# calibrators = calibrators[good_calibs,:]
# otfs = otfs[good_calibs,:]
# print(calibrators.shape)
calibrators = calibrators[onp.nanstd(calibrators,axis=1)<1]
otfs = otfs[onp.nanstd(calibrators,axis=1)<1]
calibrators = calibrators[onp.nanstd(otfs,axis=1)>0.1]
otfs = otfs[onp.nanstd(otfs,axis=1)>0.1]
calibrators.shape
# calib = np.median(np.array(calibrators),axis=0)
# calib = b.KPDT[0][0]
# coeffs = onp.linalg.lstsq(np.array(calibrators).T,a.KPDT[0][0], rcond=None)[0]
# calib = np.dot(coeffs,np.array(calibrators))
plt.plot(onp.nanstd(calibrators,axis=1),'.')
# dots = np.array([np.dot(cc,a.KPDT[0][0]) for cc in calibrators])
# plt.plot(dots,'.')
plt.plot(onp.nanstd(otfs,axis=1),'.')
fig = plt.figure(figsize=(12.0,8.0))
plt.fill_between(np.arange(len(otfs[0,:])),np.median(otfs,axis=0)-np.std(otfs,axis=0),
np.median(otfs,axis=0)+np.std(otfs,axis=0),alpha=0.2)
plt.plot(b.kpi.RED/b.kpi.RED.max())
plt.plot(np.mean(otfs,axis=0))
plt.yscale('log')
from sklearn.decomposition import PCA
fig = plt.figure(figsize=(12.0,8.0))
for j in range(20):
plt.plot(otfs[j,:],'-')
pca = PCA(n_components=5)
pca.fit(calibrators)
pca.components_.shape
pca.explained_variance_ratio_
plt.plot(pca.explained_variance_ratio_)
plt.yscale('log')
new_calibs = pca.fit_transform(calibrators)
reduced = pca.inverse_transform(new_calibs)
print(fits.getheader(fname_source)['FILTER'],fits.getheader(fname_b)['FILTER'],b.CWAVEL)
pscale
# j = 15
# img = preprocess_like(tgt_cube[:,:,j],b)
truth = [150.,90,20]
binary = sim_binary(cal,*truth,43.1)
img_sim = preprocess_like(binary,b)
a = c.copy()
a.extract_KPD_single_frame(
img_sim, pscale, wl, recenter=True,method='LDFT1')
# calib = pca.inverse_transform(pca.transform(a.KPDT[0]))[0]
calib = np.median(calibrators,axis=0)
mydata = a.KPDT[0][0]#-calib
myerr = np.ones_like(mydata)*0.1*(onp.nanmedian(np.abs(mydata)))
a.kpi.name = "2M XXXX-XX" # # labels the data
print('Done source')
mydata.shape
plt.imshow((img_sim)**0.25,interpolation='none')
# ls ../data/n8yj56010
# fits.getheader("../data/n8yj56010/n8yj56010_mos.fits.gz")
# a.plot_KPD()
print("\ncomputing colinearity map...")
gsize = 150 # gsize x gsize grid
gstep = 10 # grid step in mas
xx, yy = np.meshgrid(
np.arange(gsize) - gsize/2, np.arange(gsize) - gsize/2)
azim = -np.arctan2(xx, yy) * 180.0 / np.pi
dist = np.hypot(xx, yy) * gstep
#mmap = kpo1.kpd_binary_match_map(100, 10, mydata/myerr, norm=True)
mmap = a.kpd_binary_match_map(gsize, gstep, mydata, norm=True)
x0, y0 = np.argmax(mmap) % gsize, np.argmax(mmap) // gsize
print("max colinearity found for sep = %.2f mas and ang = %.2f deg" % (
dist[y0, x0], azim[y0, x0]))
f1 = plt.figure(figsize=(5,5))
ax1 = f1.add_subplot(111)
ax1.imshow(mmap, extent=(
gsize/2*gstep, -gsize/2*gstep, -gsize/2*gstep, gsize/2*gstep))
ax1.set_xlabel("right ascension (mas)")
ax1.set_ylabel("declination (mas)")
ax1.plot([0,0], [0,0], "w*", ms=16)
ax1.set_title("Calibrated signal colinearity map")
ax1.grid()
f1.set_tight_layout(True)
f1.canvas.draw()
from scipy.optimize import leastsq, minimize
def binary_model(params,kpo):
u = kpo.kpi.UVC[:,0]
v = kpo.kpi.UVC[:,1]
wl = kpo.CWAVEL
detpa = 0
return(xara.core.cvis_binary_jax(u,v,wl, params, detpa))
def ben_binary_model_fit_residuals(params,kpo=a,index=0,obs="KERNEL",err=1.0):
temp = binary_model(params,kpo)
model = np.array(kpo.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (err)
return np.array(error)
def ben_binary_model_fit_chi2(params):
residuals = ben_binary_model_fit_residuals(params,kpo=a,index=0,obs="KERNEL",err=myerr)
chi2 = np.sum(np.abs(residuals)**2)
return chi2
from jax import jit
chi2_jac = jacrev(ben_binary_model_fit_chi2)
chi2_jac_np = lambda x:onp.array(jit(chi2_jac)(x))
def ben_binary_model_fit(p0,kpo=a,index=0,obs='KERNEL',err=myerr):
soluce = leastsq(ben_binary_model_fit_residuals,
p0, args=((kpo,index, obs,err)), full_output=1)
# soluce = minimize(ben_binary_model_fit_chi2,p0,method='BFGS',jac=chi2_jac_np)
# p1 = soluce['x'][0] # the best fit parameter vector (sep, P.A., contrast)
return soluce
def get_chi2(addederror):
return np.sum(((mydata - np.array(ker_theo))/np.sqrt(myerr**2+addederror**2))**2) / (1.0*a.kpi.nbkp)
from jax import grad
def add_error():
def get_objective(addederror):
return np.abs(get_chi2(addederror)-1.)
jac = grad(get_objective)
return minimize(get_objective,0.0025,method='BFGS')['x']
print("\nbinary model fitting...")
# p0 = params0 # good starting point
# p0 = [dist[y0, x0], azim[y0, x0], mmap.max()] # good starting point
p0 = truth
p0[1] = np.mod(p0[1],360.)
wl = a.CWAVEL
# mfit = a.binary_model_fit(p0)
mfit = ben_binary_model_fit(p0,kpo=a,err=myerr)
p1 = mfit[0] # the best fit parameter vector (sep, P.A., contrast)
# p1 = mfit['x']
p1[1] = np.mod(p1[1],360.)
# p1 = p0
# p1 = p0
cvis_b = xara.core.cvis_binary(
a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p1) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
added_error = add_error()
this_error = np.sqrt(myerr**2+added_error**2)
mfit = ben_binary_model_fit(p0,kpo=a,err=this_error)
p2 = np.array(p1)#+np.sqrt(np.diag(mfit[1]))
cvis_b = xara.core.cvis_binary(
a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p2) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.errorbar(ker_theo, mydata, yerr=myerr, fmt="none", ecolor='c')
ax.plot(ker_theo, mydata, 'b.')
mmax = (np.abs(mydata).max())
ax.plot([-mmax,mmax],[-mmax,mmax], 'r')
ax.set_ylabel("data kernel-phase")
ax.set_xlabel("model kernel-phase")
ax.set_title('kernel-phase correlation diagram')
ax.axis("equal")
# ax.axis([-11, 11, -11, 11])
fig.set_tight_layout(True)
if myerr is not None:
chi2 = np.sum(((mydata - ker_theo)/(this_error))**2) / a.kpi.nbkp
else:
chi2 = np.sum(((mydata - ker_theo))**2) / a.kpi.nbkp
print("sep = %3f, ang=%3f, con=%3f => chi2 = %.3f" % (p1[0], p1[1], p1[2], chi2))
print("correlation matrix of parameters")
# hess_inv = mfit['hess_inv']
hess_inv = mfit[1]
print(np.round(hess_inv, 2))
print('Estimated Uncertainty')
print(np.sqrt(np.diag(hess_inv)))
def negative_log_posterior(params):
temp = binary_model(params,a)
model = np.array(a.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (this_error)
chi2 = np.sum((error)**2)
return chi2
def grad_negative_log_posterior(params):
return -1*jacrev(negative_log_posterior)(params)
def approx_covariance_matrix(params):
# evaluate the covariance matrix of the approximate normal
return np.linalg.inv(hessian(negative_log_posterior)(params))
covar = approx_covariance_matrix(1.0*np.array(p1))
uncertainty = (np.sqrt(np.diag(covar)))
print('Covariance matrix',covar)
print('Sigma',uncertainty)
```
### Now do a cube
```
results = []
uncertainties = []
# myerr = np.ones_like(mydata)*0.01*(onp.nanmedian(np.abs(mydata)))
seps_in = np.linspace(90,500,20)
calib = np.median(calibrators,axis=0)
for j, sep_in in enumerate(tqdm(seps_in)):
# img = preprocess_like(tgt_cube[:,:,j],b)
truth = [sep_in,90,25]
binary = sim_binary(cal,*truth,43.1)
img_sim = preprocess_like(binary,b)
a = c.copy()
a.extract_KPD_single_frame(
img_sim, pscale, wl, recenter=True,method='LDFT1')
a.kpi.name = "simulation" # # labels the data
# calib = pca.inverse_transform(pca.transform(a.KPDT[0]))[0]
mydata = a.KPDT[0][0]-calib
p0 = truth
p0[1] = np.mod(p0[1],360.)
# mfit = a.binary_model_fit(p0)
mfit = ben_binary_model_fit(p0,kpo=a)
cvis_b = xara.core.cvis_binary(
a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p2) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
if myerr is not None:
chi2 = np.sum(((mydata - ker_theo)/(this_error))**2) / a.kpi.nbkp
else:
chi2 = np.sum(((mydata - ker_theo))**2) / a.kpi.nbkp
added_error = add_error()
this_error = np.sqrt(myerr**2+added_error**2)
p1 = mfit[0] # the best fit parameter vector (sep, P.A., contrast)
p1[1] = np.mod(p1[1],360.)
results.append(p1)
def negative_log_posterior(params):
temp = binary_model(params,a)
model = np.array(a.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (this_error)
chi2 = np.sum((error)**2)
return chi2
def approx_covariance_matrix(params):
# evaluate the covariance matrix of the approximate normal
return np.linalg.inv(hessian(negative_log_posterior)(params))
covar = approx_covariance_matrix(1.0*np.array(p1))
uncertainties.append(np.sqrt(np.diag(covar)))
seps_out = np.array([result[0] for result in results])
thetas_out = np.array([result[1] for result in results])
cons_out = np.array([result[2] for result in results])
dseps_out = np.array([uncertainty[0] for uncertainty in uncertainties])
dthetas_out = np.array([uncertainty[1] for uncertainty in uncertainties])
dcons_out = np.array([uncertainty[2] for uncertainty in uncertainties])
pscale
fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(16.0,4.0))
inds = np.arange(len(seps_in))
ax1.plot(seps_in,seps_out-seps_in,'.')
ax1.axhline(0,color='k',linestyle='--',alpha=0.5)
ax1.errorbar(seps_in,seps_out-seps_in,yerr=dseps_out,ls='none',color=colours[0])
# ax1.plot(seps_in,seps_in,'--k',alpha=0.5)
ax2.plot(seps_in,thetas_out,'.')
ax2.axhline(truth[1],color='k',linestyle='--',alpha=0.5)
ax2.errorbar(seps_in,thetas_out,yerr=dthetas_out,ls='none',color=colours[0])
ax3.plot(seps_in,cons_out,'.')
ax3.errorbar(seps_in,dcons_out,yerr=dcons_out,ls='none',color=colours[0])
ax3.axhline(truth[2],color='k',linestyle='--',alpha=0.5)
```
### Now try Peter's
```
myerr = np.ones_like(mydata)*0.01*(onp.nanmedian(np.abs(mydata)))
myerr = np.std(calibrators,axis=0)
calib = np.median(calibrators,axis=0)
chi2s = []
for do_cal in [False,True]:
results = []
uncertainties = []
ssteps, thsteps = [], []
# plt.plot(0,0,'*')
for i in range(0,20):
step=np.array((-0.17,0.28))*i + np.array((3.0,2.0))
# print(step)
sep_step = 43.1 * (np.hypot(*step))
th_step = 180./np.pi*(np.angle(step[1]-1.j*step[0]))
# plt.plot(step[0],step[1],'.')
ssteps.append(sep_step)
thsteps.append(th_step)
ssteps = np.array(ssteps)
thsteps = np.mod(np.array(thsteps),360)
# plt.axis('square')
for j in tqdm(range(20)):
img = preprocess_like(tgt_cube[:,:,j],b)
a = c.copy()
a.extract_KPD_single_frame(
img, pscale, wl, recenter=True,method='LDFT1')
a.kpi.name = "simulation" # # labels the data
if do_cal:
mydata = a.KPDT[0][0]-calib#b.KPDT[0][0]
else:
mydata = a.KPDT[0][0]
# print("\ncomputing colinearity map...")
# gsize = 100 # gsize x gsize grid
# gstep = 5 # grid step in mas
# xx, yy = np.meshgrid(
# np.arange(gsize) - gsize/2, np.arange(gsize) - gsize/2)
# azim = -np.arctan2(xx, yy) * 180.0 / np.pi
# dist = np.hypot(xx, yy) * gstep
# # mmap = kpo1.kpd_binary_match_map(100, 10, mydata/myerr, norm=True)
# mmap = a.kpd_binary_match_map(gsize, gstep, mydata, norm=True)
# x0, y0 = np.argmax(mmap) % gsize, np.argmax(mmap) // gsize
# print("max colinearity found for sep = %.2f mas and ang = %.2f deg" % (
# dist[y0, x0], azim[y0, x0]))
# p0 = [dist[y0, x0], azim[y0, x0], mmap.max()] # good starting point
p0 = [ssteps[j], thsteps[j],20]
p0[1] = np.mod(p0[1],360.)
# p0[0] = 150+j*10
# mfit = a.binary_model_fit(p0)
mfit = ben_binary_model_fit(p0,kpo=a)
p1 = mfit[0] # the best fit parameter vector (sep, P.A., contrast)
p1[1] = np.mod(p1[1],360.)
p1[0] = np.abs(p1[0])
results.append(p1)
cvis_b = xara.core.cvis_binary(a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p1) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
def get_chi2(addederror):
return np.sum(((mydata - np.array(ker_theo))/np.sqrt(myerr**2+addederror**2))**2) / (1.0*a.kpi.nbkp)
chi2 = get_chi2(0)
chi2s.append(chi2)
if chi2 > 1.0:
from jax import grad
def add_error():
def get_objective(addederror):
return np.abs(get_chi2(addederror)-1.)
jac = grad(get_objective)
return minimize(get_objective,0.0025,method='BFGS')['x']
added_error = add_error()
this_error = np.sqrt(myerr**2+added_error**2)
else:
this_error = myerr
def negative_log_posterior(params):
temp = binary_model(params,a)
model = np.array(a.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (this_error)
chi2 = np.sum((error)**2)
return chi2
def grad_negative_log_posterior(params):
return -1*jacrev(negative_log_posterior)(params)
def approx_covariance_matrix(params):
# evaluate the covariance matrix of the approximate normal
return np.linalg.inv(hessian(negative_log_posterior)(params))
covar = approx_covariance_matrix(1.0*np.array(p1))
uncertainties.append(np.sqrt(np.diag(covar)))
print('chi2s:',(np.array(chi2s)))
if do_cal:
seps_out_nocal = np.array([result[0] for result in results])
thetas_out_nocal = np.array([result[1] for result in results])
cons_out_nocal = np.array([result[2] for result in results])
dseps_out_nocal = np.array([uncertainty[0] for uncertainty in uncertainties])
dthetas_out_nocal = np.array([uncertainty[1] for uncertainty in uncertainties])
dcons_out_nocal = np.array([uncertainty[2] for uncertainty in uncertainties])
else:
seps_out = np.array([result[0] for result in results])
thetas_out = np.array([result[1] for result in results])
cons_out = np.array([result[2] for result in results])
dseps_out = np.array([uncertainty[0] for uncertainty in uncertainties])
dthetas_out = np.array([uncertainty[1] for uncertainty in uncertainties])
dcons_out = np.array([uncertainty[2] for uncertainty in uncertainties])
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(6,9),sharex=True)
inds = np.arange(len(ssteps))
ax1.plot(ssteps,'.',label='Injected')
ax1.plot(seps_out,'.',label='Calibrated',color=colours[1])
ax1.errorbar(inds,seps_out,yerr=dseps_out,ls='none',color=colours[1])
ax1.plot(seps_out_nocal,'.',label='Uncalibrated')
ax1.errorbar(inds,seps_out_nocal,yerr=dseps_out_nocal,ls='none',color=colours[2])
ax1.set_ylabel('Separation (mas)')
# ax1.plot(seps_in,seps_in,'--k',alpha=0.5)
ax1.legend(fontsize=16,frameon=False)
ax1.set_xticks([])
ax2.plot(thsteps,'.')
# ax2.plot(thetas_out,'.')
# ax2.plot(thetas_out_nocal,'.')
ax2.plot(thetas_out,'.',label='Calibrated',color=colours[1])
ax2.errorbar(inds,thetas_out,yerr=dthetas_out,ls='none',color=colours[1])
ax2.plot(thetas_out_nocal,'.',label='Uncalibrated')
ax2.errorbar(inds,thetas_out_nocal,yerr=dthetas_out_nocal,ls='none',color=colours[2])
ax2.set_ylabel('Position Angle (deg)')
ax2.set_xticks([])
# ax2.axhline(truth[1],color='k',linestyle='--',alpha=0.5)
# ax3.plot(cons_out,'.',color=colours[1])
# ax3.plot(cons_out_nocal,'.',color=colours[2])
ax3.plot(cons_out,'.',label='Calibrated',color=colours[1])
ax3.errorbar(inds,cons_out,yerr=dcons_out,ls='none',color=colours[1])
ax3.plot(cons_out_nocal,'.',label='Uncalibrated')
ax3.errorbar(inds,cons_out_nocal,yerr=dcons_out_nocal,ls='none',color=colours[2])
ax3.axhline(20)
ax3.set_ylabel('Contrast')
ax3.set_xlabel('Epoch')
ax2.set_xticks(np.arange(20)[::5])
plt.subplots_adjust(wspace=0, hspace=0)
# ax3.axhline(truth[2],color='k',linestyle='--',alpha=0.5)
# plt.savefig('hare_hounds.png',bbox_inches='tight')
# paramlimits=[p1[0]*0.9,p1[0]*1.1,0,360,15,30.]
# def binary_model(params,kpo):
# u = kpo.kpi.UVC[:,0]
# v = kpo.kpi.UVC[:,1]
# wl = kpo.CWAVEL
# detpa = 0
# return(xara.core.cvis_binary_jax(u,v,wl, params, detpa))
# @jit
# def kp_loglikelihood(params,kpo):
# temp = binary_model(params,kpo)
# model = np.array(kpo.kpi.KPM).dot(np.angle(temp))
# error = mydata-model
# error /= (myerr)
# chi2 = -np.sum((error)**2)
# return chi2
# def negative_log_posterior(params):
# temp = binary_model(params,a)
# model = np.array(a.kpi.KPM).dot(np.angle(temp))
# error = mydata-model
# error /= (myerr)
# chi2 = np.sum((error)**2)
# return chi2
# def grad_negative_log_posterior(params):
# return -1*jacrev(negative_log_posterior)(params)
# @jit
# def approx_covariance_matrix(params):
# # evaluate the covariance matrix of the approximate normal
# return np.linalg.inv(hessian(negative_log_posterior)(params))
# def lnprior(params):
# if paramlimits[0] < params[0] < paramlimits[1] and paramlimits[2] < params[1] < paramlimits[3] and paramlimits[4] < params[2] < paramlimits[5]:
# return -np.log(params[0]) -np.log(params[2])
# return -np.inf
# def lnprob(params,kpo):
# return lnprior(params) + kp_loglikelihood(params,kpo)
# ndim=3
# nwalkers=100
# plot=False
# burnin=100
# nsteps=1000
# import time
# np.sqrt(np.diag(approx_covariance_matrix(1.0*np.array(p1))))
# import emcee
# ivar = np.array(p1) # initial parameters for model-fit
# ball = np.array([ivar + 0.01*ivar*onp.random.rand(ndim) for i in range(nwalkers)]) # initialise walkers in a ball
# print('Running emcee now!')
# t0 = time.time()
# sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[a])
# # burn in
# pos,prob,state = sampler.run_mcmc(ball, burnin)
# sampler.reset()
# t1 = time.time()
# print('Burnt in! Took %.3f seconds' %(t1-t0))
# # restart
# sampler.run_mcmc(pos,nsteps)
# tf = time.time()
# print('Time elapsed = %.3f s' %(tf-t0))
# seps = sampler.flatchain[:,0]
# ths = sampler.flatchain[:,1]
# meansep = np.mean(seps)
# dsep = np.std(seps)
# meanth = np.mean(ths)
# dth = np.std(ths)
# cs = sampler.flatchain[:,2]
# bestcon = np.mean(cs)
# conerr = np.std(cs)
# print('Separation %.3f pm %.3f mas' % (meansep,dsep))
# print('Position angle %.3f pm %.3f deg' % (meanth,dth))
# print('Contrast at',wl,'um %.3f pm %.3f' % (bestcon,conerr))
# import corner
# # Plot it.
# figure = corner.corner(sampler.flatchain, labels=[r"$\rho$", r"$\theta$", r"$c$",],
# quantiles=[0.16, 0.5, 0.84],
# show_titles=True, title_kwargs={"fontsize": 12},truths=p0)
p0
```
| github_jupyter |
# Fickian Diffusion
In this example, we will learn how to perform Fickian diffusion on a `Cubic` network. The algorithm works fine with every other network type, but for now we want to keep it simple. [See here](/examples/notebooks/networks/generation) for more details on different network types.
```
import numpy as np
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(10)
ws = op.Workspace()
ws.settings["loglevel"] = 40
np.set_printoptions(precision=5)
```
## Generating network
First, we need to generate a `Cubic` network. For now, we stick to a 2d network, but you might as well try it in 3d!
```
shape = [1, 10, 10]
spacing = 1e-5
net = op.network.Cubic(shape=shape, spacing=spacing)
```
## Adding geometry
Next, we need to add a geometry to the generated network. A geometry contains information about size of the pores/throats in a network. `OpenPNM` has tons of prebuilt geometries that represent the microstructure of different materials such as Toray090 carbon papers, sand stone, electrospun fibers, etc. For now, we'll stick to a simple geometry called `SpheresAndCylinders` that assigns random values to pore/throat diameters.
```
geom = op.geometry.SpheresAndCylinders(network=net, pores=net.Ps, throats=net.Ts)
```
## Adding phase
Next, we need to add a phase to our simulation. A phase object(s) contain(s) thermophysical information about the working fluid(s) in the simulation. `OpenPNM` has tons of prebuilt phases as well! For this simulation, we use air as our working fluid.
```
air = op.phases.Air(network=net)
```
## Adding physics
Finally, we need to add a physics. A physics object contains information about the working fluid in the simulation that depend on the geometry of the network. A good example is diffusive conductance, which not only depends on the thermophysical properties of the working fluid, but also depends on the geometry of pores/throats. OpenPNM includes a pre-defined physics class called ``Standard`` which as the name suggests contains all the standard pore-scale models to get you going:
```
phys_air = op.physics.Standard(network=net, phase=air, geometry=geom)
```
# Performing Fickian diffusion
Now that everything's set up, it's time to perform our Fickian diffusion simulation. For this purpose, we need to add the `FickianDiffusion` algorithm to our simulation. Here's how we do it:
```
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
```
Note that `network` and `phase` are required parameters for pretty much every algorithm we add, since we need to specify on which network and for which phase we want to run the algorithm.
## Adding boundary conditions
Next, we need to add some boundary conditions to the simulation. By default, `OpenPNM` assumes zero flux for the boundary pores.
```
inlet = net.pores('front')
outlet = net.pores('back')
C_in = 1.0
C_out = 0.0
fd.set_value_BC(pores=inlet, values=C_in)
fd.set_value_BC(pores=outlet, values=C_out)
```
`set_value_BC` applies the so-called "Dirichlet" boundary condition to the specified pores. Note that unless you want to apply a single value to all of the specified pores (like we just did), you must pass a list (or `ndarray`) as the `values` parameter.
## Running the algorithm
Now, it's time to run the algorithm. This is done by calling the `run` method attached to the algorithm object.
```
fd.run()
```
# Post processing
When an algorithm is successfully run, the results are attached to the same object. To access the results, you need to know the quantity for which the algorithm was solving. For instance, `FickianDiffusion` solves for the quantity `pore.concentration`, which is somewhat intuitive. However, if you ever forget it, or wanted to manually check the quantity, you can take a look at the algorithm `settings`:
```
print(fd.settings)
```
## Visualizing
Now that we know the quantity for which `FickianDiffusion` was solved, let's take a look at the results:
```
c = fd['pore.concentration']
r = fd.rate(throats=net.Ts, mode='single')
d = net['pore.diameter']
fig, ax = plt.subplots(figsize=[30, 15])
op.topotools.plot_coordinates(network=net, color_by=c, size_by=d, markersize=4000, ax=ax)
op.topotools.plot_connections(network=net, color_by=r, linewidth=3, ax=ax)
_ = plt.axis('off')
```
## Calculating flux
You might as well be interested in calculating the mass flux from a boundary! This is easily done in `OpenPNM` via calling the `rate` method attached to the algorithm. Let's see how it works:
```
rate_inlet = fd.rate(pores=inlet)[0]
print(f'Mass flow rate from inlet: {rate_inlet:.5e} mol/s')
```
We can determine the effective diffusivity of the network by solving Fick's law:
$$ D_{eff} = \frac{N_A L}{ A \Delta C} $$
```
A = (shape[0] * shape[1])*(spacing**2)
L = shape[2]*spacing
D_eff = rate_inlet * L / (A * (C_in - C_out))
print("{0:.6E}".format(D_eff))
```
And the formation factor can be found since the diffusion coefficient of open air is known:
$$ F = \frac{D_{AB}}{D_{eff}} $$
```
D_AB = air['pore.diffusivity'][0]
F = D_AB / D_eff
print('The formation factor is: ', "{0:.6E}".format(F))
```
The tortuosity is defined as follows:
$$ \frac{D_{eff}}{D_{AB}} = \frac{\varepsilon}{\tau} \rightarrow \tau = \varepsilon \frac{ D_{AB}}{D_{eff}} $$
Note that finding the tortuosity requires knowing the porosity, which is annoyingly difficult to calculate accurately, so here we will just gloss over the details.
```
V_p = geom['pore.volume'].sum()
V_t = geom['throat.volume'].sum()
V_bulk = np.prod(shape)*(spacing**3)
e = (V_p + V_t) / V_bulk
print('The porosity is: ', "{0:.6E}".format(e))
tau = e * D_AB / D_eff
print('The tortuosity is:', "{0:.6E}".format(tau))
```
| github_jupyter |
## Part 1: LLE
Implement Locally Linear Embedding function
```
from sklearn.neighbors import kneighbors_graph
from scipy.sparse import csr_matrix
from numpy import matlib
import numpy as np
def csr_from_mat(W, NI):
n, k = W.shape
data = np.reshape(W, n*k)
cols = np.reshape(NI, n*k)
rows = np.floor(np.arange(0, n, 1/k))
return csr_matrix((data, (rows, cols)), shape=(n, n))
def lle_neighborhood(X, k):
n, d = X.shape
NN = kneighbors_graph(X, k, mode='connectivity')
return np.reshape(NN.indices, (n, k))
def lle_weights(X, NI):
n, d = X.shape
n, k = NI.shape
tol = 1e-3 if k>d else 0
W = np.zeros((n, k))
for i in range(n):
Z = (X[NI[i,:],:] - matlib.repmat(X[i,:], k, 1)).T
C = Z.T.dot(Z)
C = C + tol*np.trace(C)*np.identity(k)
w = np.linalg.inv(C).dot(np.ones((k, 1)))
w = w / np.sum(w)
W[i,:] = w.T
return W
def lle_embedding(W, m):
n, n = W.shape
I, W = np.identity(n), W
M = (I-W).T.dot(I-W)
w, v = np.linalg.eig(M)
i = np.argsort(w)
w, v = w[i].real, v[:,i].real
# did i do wrong here?
return v[:,1:m+1]
"""Args:
X: input samples, array (num, dim)
n_components: dimension of output data
n_neighbours: neighborhood size
Returns:
Y: output samples, array (num, n_components)
"""
def LLE(X, n_components=2, n_neighbours=10):
NI = lle_neighborhood(X, n_neighbours)
W = lle_weights(X, NI)
W = csr_from_mat(W, NI)
Y = lle_embedding(W, n_components)
return Y
```
## Part 2: Manifold Visualization
Visualize the S-shaped 3-d dataset using the LLE.
```
from sklearn import manifold, datasets
SX, St = datasets.make_s_curve(n_samples=1000, random_state=1337)
# SX: input data [n_samples, 3]
# St: univariate position along manifold [n_samples], use for coloring the plots
```
The code in the next cell should draw a single plot with the following subplots:
1. 3D S-shaped dataset
2. 2D Manifold learnt using LLE
Use the `St` variable to color the points in your visualizations. Use a color spectrum, and the position along the manifold to assign the color.
```
# Visualization code here
from matplotlib import pyplot as plt
%matplotlib notebook
LX = LLE(SX, 2)
fig = plt.figure(figsize=(6, 10))
axi = fig.add_subplot(211, projection='3d')
colorize = dict(c=St, cmap=plt.cm.get_cmap('rainbow', 4))
axi.scatter3D(SX[:, 0], SX[:, 1], SX[:, 2], **colorize)
axi.title.set_text('3D S-shaped dataset')
axi = fig.add_subplot(212)
axi.scatter([LX[:, 0]], [LX[:, 1]], **colorize)
axi.title.set_text('2D Manifold learnt using LLE')
```
## Part 3: Visualizing high-dimensional data
Visualize the Swiss roll dataset using LLE.
```
# Swiss roll dataset loading here
import numpy
d = []
with open('./swissroll.dat', 'r') as dat_file:
for line in dat_file:
line = line.strip().split()
line = [float(x.strip()) for x in line]
d.append(line)
swissroll = numpy.array(d)
print (swissroll.shape)
```
The code in the next cell should draw a single plot with the following subplots:
1. Visualize Swiss roll.
2. Unwrap the manifold in 2D and visualize using LLE.
```
import numpy as np
from matplotlib import pyplot as plt
%matplotlib notebook
X = swissroll
Xc = np.linspace(0, 1, X.shape[0])
LX = LLE(X, 2)
fig = plt.figure(figsize=(6, 10))
axi = fig.add_subplot(211, projection='3d')
colorize = dict(c=Xc, cmap=plt.cm.get_cmap('rainbow', 4))
axi.scatter3D(X[:, 0], X[:, 1], X[:, 2], **colorize)
axi.title.set_text('3D Swiss roll dataset')
axi = fig.add_subplot(212)
axi.scatter([LX[:, 0]], [LX[:, 1]], **colorize)
axi.title.set_text('Unwrap the manifold in 2D using LLE')
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.lines import Line2D
from TimeSeriesCrossValidation import splitTrain, splitTrainVal, splitTrainValTest
timeSeries = np.arange(27)
timeSeries
def show_train(X, y, num):
for j in np.arange(num):
print("--------- SET %d ---------" % (j+1))
print("X[%d] ="% (j+1), X[j])
print("y[%d] ="% (j+1), y[j])
def show_train_val(X, y, Xcv, ycv, num):
for j in np.arange(num):
print("--------- SET %d ---------" % (j+1))
print("X[%d] ="% (j+1), *X[j])
print("y[%d] ="% (j+1), *y[j])
print("Xcv[%d] ="% (j+1), *Xcv[j])
print("ycv[%d] ="% (j+1), *ycv[j])
def show_train_val_test(X, y, Xcv, ycv, Xtest, ytest, num):
for j in np.arange(num):
print("--------- SET %d ---------" % (j+1))
print("X[%d] ="% (j+1), *X[j])
print("y[%d] ="% (j+1), *y[j])
print("Xcv[%d] ="% (j+1), *Xcv[j])
print("ycv[%d] ="% (j+1), *ycv[j])
print("Xtest[%d] ="% (j+1), *Xtest[j])
print("ytest[%d] ="% (j+1), *ytest[j])
def plot_train(X, y, algo):
fig, ax = plt.subplots(figsize=(20, 13))
ax.plot(timeSeries, len(timeSeries) * [len(X)-0.75], lw=3, c='k')
k = 1./max([len(x)+2 for x in X])
k = 0
numSets = len(X)
level = 10
for i in np.arange(len(X)):
plt.plot(X[i], len(X[i]) * [numSets-i-1-k*level], lw=4, c='blue')
plt.plot([X[i][-1], y[i][0]], 2*[numSets-i-1-k*level], lw=2, c='blue')
plt.plot(y[i], len(y[i]) * [numSets-i-1-k*level], lw=4, c='lightblue')
ax.grid(which='minor')
ax.set_yticks(np.arange(len(X), 0, -1)-1)
ax.set_yticklabels(np.arange(1, len(X)+1, 1))
ax.set_ylabel("Train set", size=16)
custom_lines = [Line2D([0], [0], color='k', lw=4),
Line2D([0], [0], color='blue', lw=4), Line2D([0], [0], color='lightblue', lw=4)]
ax.legend(custom_lines, ['Original Time-Series',
'X (Train input)', 'y (Train output)' ],
loc='upper center', ncol=3, handleheight=2.4, labelspacing=0.05)
ax.set_ylim([-.5, len(X)])
ax.set_title(algo, size=18);
plt.grid('minor')
plt.show()
def plot_train_val(X, y, Xcv, ycv, algo):
fig, ax = plt.subplots(figsize=(20, 13))
ax.plot(timeSeries, len(timeSeries) * [len(X)-0.75], lw=3, c='k')
k = 1./max([len(x)+2 for x in X.values()])
numSets = len(X)
for i in np.arange(numSets):
sorted_arr = np.sort(np.array([x[0] for x in np.concatenate((X[i], Xcv[i]), axis=0)]))
for j in np.arange(len(X[i])):
level = sum(sorted_arr<X[i][j][0])
plt.plot(X[i][j], len(X[i][j]) * [numSets-i-1-k*level], lw=3, c='blue')
plt.plot([X[i][j][-1], y[i][j][0]], 2*[numSets-i-1-k*level], lw=1, c='blue')
plt.plot(y[i][j], len(y[i][j]) * [numSets-i-1-k*level], lw=3, c='lightblue')
for j in np.arange(len(Xcv[i])):
level = sum(sorted_arr<Xcv[i][j][0])
plt.plot(Xcv[i][j], len(Xcv[i][j]) * [numSets-i-1-k*level], lw=3, c='red')
plt.plot([Xcv[i][j][-1], ycv[i][j][0]], 2*[numSets-i-1-k*level], lw=1, c='red')
plt.plot(ycv[i][j], len(ycv[i][j]) * [numSets-i-1-k*level], lw=3, c='tomato')
rect = patches.Rectangle((0, len(X)-i-1), len(timeSeries)-1, k*(-len(sorted_arr)+1),
ls='-.', linewidth=1, edgecolor='k',facecolor='lavender')
ax.add_patch(rect)
ax.grid(which='minor')
ax.set_yticks(np.arange(len(X), 0, -1)-1)
ax.set_yticklabels(np.arange(1, len(X)+1, 1))
ax.set_ylabel("Train and validation set", size=16)
custom_lines = [Line2D([0], [0], color='k', lw=4), patches.Patch(facecolor='lavender', edgecolor='k'),
Line2D([0], [0], color='blue', lw=4), Line2D([0], [0], color='lightblue', lw=4),
Line2D([0], [0], color='red', lw=4), Line2D([0], [0], color='tomato', lw=4)]
ax.legend(custom_lines, ['Original Time-Series', 'Train and Validation set',
'X (Train input)', 'y (Train output)',
'Xcv (Cross-validation input)', 'ycv (Cross-validation output)'],
loc='upper center', ncol=3, handleheight=2.4, labelspacing=0.05)
ax.set_ylim([-1.5, len(X)])
ax.set_title(algo, size=18);
plt.show()
def plot_train_val_test(X, y, Xcv, ycv, Xtest, ytest, algo):
fig, ax = plt.subplots(figsize=(20, 13))
ax.plot(timeSeries, len(timeSeries) * [len(X)-0.75], lw=3, c='k')
k = 1./max([len(x)+2 for x in X.values()])
numSets = len(X)
for i in np.arange(numSets):
sorted_arr = np.sort(np.array([x[0] for x in np.concatenate((X[i], Xcv[i], Xtest[i]), axis=0)]))
for j in np.arange(len(X[i])):
level = sum(sorted_arr<X[i][j][0])
plt.plot(X[i][j], len(X[i][j]) * [numSets-i-1-k*level], lw=3, c='blue')
plt.plot([X[i][j][-1], y[i][j][0]], 2*[numSets-i-1-k*level], lw=1, c='blue')
plt.plot(y[i][j], len(y[i][j]) * [numSets-i-1-k*level], lw=3, c='lightblue')
for j in np.arange(len(Xcv[i])):
level = sum(sorted_arr<Xcv[i][j][0])
plt.plot(Xcv[i][j], len(Xcv[i][j]) * [numSets-i-1-k*level], lw=3, c='red')
plt.plot([Xcv[i][j][-1], ycv[i][j][0]], 2*[numSets-i-1-k*level], lw=1, c='red')
plt.plot(ycv[i][j], len(ycv[i][j]) * [numSets-i-1-k*level], lw=3, c='tomato')
for j in np.arange(len(Xtest[i])):
level = sum(sorted_arr<Xtest[i][j][0])
plt.plot(Xtest[i][j], len(Xtest[i][j]) * [numSets-i-1-k*level], lw=3, c='green')
plt.plot([Xtest[i][j][-1], ytest[i][j][0]], 2*[numSets-i-1-k*level], lw=1, c='green')
plt.plot(ytest[i][j], len(ytest[i][j]) * [numSets-i-1-k*level], lw=3, c='lightgreen')
rect = patches.Rectangle((0, len(X)-i-1), len(timeSeries)-1, k*(-len(sorted_arr)+1),
ls='-.', linewidth=1, edgecolor='k',facecolor='lavender')
ax.add_patch(rect)
ax.grid(which='minor')
ax.set_yticks(np.arange(len(X), 0, -1)-1)
ax.set_yticklabels(np.arange(1, len(X)+1, 1))
ax.set_ylabel("Train, validation and test set", size=16)
custom_lines = [Line2D([0], [0], color='k', lw=4), patches.Patch(facecolor='lavender', edgecolor='k'),
Line2D([0], [0], color='blue', lw=4), Line2D([0], [0], color='lightblue', lw=4),
Line2D([0], [0], color='red', lw=4), Line2D([0], [0], color='tomato', lw=4),
Line2D([0], [0], color='green', lw=4), Line2D([0], [0], color='lightgreen', lw=4)]
ax.legend(custom_lines, ['Original Time-Series', 'Train, validation and test set',
'X (Train input)', 'y (Train output)',
'Xcv (Cross-validation input)', 'ycv (Cross-validation output)',
'Xtest (Test input)', 'ytest (Test output)'],
loc='upper center', ncol=4, handleheight=2.4, labelspacing=0.05)
ax.set_ylim([-1.5, len(X)])
ax.set_title(algo, size=18);
plt.show()
X, y = splitTrain.split_train(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=3)
show_train(X, y, 5)
plot_train(X, y, "Split train data")
X, y = splitTrain.split_train_variableInput(timeSeries, minSamplesTrain=10, n_steps_forecast=3, n_steps_jump=3)
show_train(X, y, 5)
plot_train(X, y, "Split train data with variable input")
X, y, Xcv, ycv = splitTrainVal.split_train_val_forwardChaining(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val(X, y, Xcv, ycv, 5)
plot_train_val(X, y, Xcv, ycv, "Forward Chaining")
X, y, Xcv, ycv = splitTrainVal.split_train_val_kFold(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val(X, y, Xcv, ycv, 5)
plot_train_val(X, y, Xcv, ycv, "K-Fold")
X, y, Xcv, ycv = splitTrainVal.split_train_val_groupKFold(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val(X, y, Xcv, ycv, 5)
plot_train_val(X, y, Xcv, ycv, "Group K-Fold")
X, y, Xcv, ycv, Xtest, ytest = splitTrainValTest.split_train_val_test_forwardChaining(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val_test(X, y, Xcv, ycv, Xtest, ytest, 5)
plot_train_val_test(X, y, Xcv, ycv, Xtest, ytest, "Forward Chaining")
X, y, Xcv, ycv, Xtest, ytest = splitTrainValTest.split_train_val_test_kFold(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val_test(X, y, Xcv, ycv, Xtest, ytest, 5)
plot_train_val_test(X, y, Xcv, ycv, Xtest, ytest, "K-Fold")
X, y, Xcv, ycv, Xtest, ytest = splitTrainValTest.split_train_val_test_groupKFold(timeSeries, n_steps_input=4, n_steps_forecast=3, n_steps_jump=2)
show_train_val_test(X, y, Xcv, ycv, Xtest, ytest, 5)
plot_train_val_test(X, y, Xcv, ycv, Xtest, ytest, "Group K-Fold")
```
| github_jupyter |
# Feature Extraction from Text
This notebook is divided into two sections:
* First, we'll find out what what is necessary to build an NLP system that can turn a body of text into a numerical array of *features* by **manually calcuating frequencies and building out TF-IDF**.
* Next we'll show how to perform these steps **using scikit-learn tools**.
---------
+ [Part One: Core Concepts on Feature Extraction (MANUALLY)](#partone)
+ [1) Start with some documents:](#1)
+ [2) Building a vocabulary (Creating a "Bag of Words")](#2)
+ [2.1) Getting the unique words only](#2.1)
+ [2.2) Get all unique words across all documents (both One and Two)](#2.2)
+ [2.3) Create vocab dictionary with related index](#2.3)
+ [3)Bag of Words to Frequency Counts](#3)
+ [3.1) Make A list of All Vocab (which will be used to map later)](#3.1)
+ [3.2) Add in counts per word per doc:](#3.2)
+ [3.3) Create the DataFrame:](#3.3)
---------
+ [Concepts to Consider:](#concept)
+ [Bag of Words and Tf-idf](#bow)
+ [Stop Words and Word Stems](#stopwordswordstems)
+ [Tokenization and Tagging](#tokenizationtagging)
--------
+ [Part Two: Feature Extraction with Scikit-Learn](#part2)
+ [1) CountVectorizer](#countvect)
+ [`stop_words` parameter](#stopwords)
+ [2) TfidfTransformer](#tfidf)
+ [3) Using Pipeline (combining two steps of CountVectorizer + Tfidf Transformer)](#pipeline)
+ [4) TfIdfVectorizer (same as step 1 + step 2)](#tfidfvector)
-----
# <a name=partone>Part One: Core Concepts on Feature Extraction (MANUALLY)</a>
In this section we'll use basic Python to build a rudimentary NLP system. We'll build a *corpus of documents* (two small text files), create a *vocabulary* from all the words in both documents, and then demonstrate a *Bag of Words* technique to extract features from each document.<br>
<div class="alert alert-info" style="margin: 20px">This first section is for illustration only!
<br>Don't worry about memorizing this code - later on we will let Scikit-Learn Preprocessing tools do this for us.</div>
# <a name=1>1) Start with some documents:</a>
For simplicity we won't use any punctuation in the text files One.txt and Two.txt. Let's quickly open them and read them. Keep in mind, you should avoid opening and reading entire files if they are very large, as Python could just display everything depending on how you open the file.
```
with open('../Data/One.txt') as mytext:
a = mytext.read()
print(a)
# readlines returns as list
with open('../Data/One.txt') as mytext:
a = mytext.readlines()
print(a)
```
### Reading entire text as a string
```
with open('../Data/Two.txt') as mytext:
entire_text = mytext.read()
entire_text
print(entire_text)
```
### Reading Each Line as a List
```
with open('../Data/One.txt') as mytext:
lines = mytext.readlines()
lines
```
### Reading in Words Separately
```
with open('../Data/One.txt') as mytext:
words = mytext.read().lower().split()
words
```
-----
# <a name=2>2) Building a vocabulary (Creating a "Bag of Words")</a>
Let's create dictionaries that correspond to **unique mappings of the words in the documents**. We can begin to think of this as mapping out all the possible words available for all (both) documents.
#### Read in One.txt
```
with open('../Data/One.txt') as mytext:
words_one = mytext.read().lower().split()
words_one
len(words_one)
```
## <a name=2.1>2.1) Getting the unique words only</a>
```
unique_words_one = set(words_one)
unique_words_one
len(unique_words_one)
```
Now we only have 12 unique words instead of original 13 words in Document one.
### Repeat for Two.txt
```
with open('../Data/Two.txt') as mytext:
words_two = mytext.read().lower().split()
unique_words_two = set(words_two)
len(words_two), len(unique_words_two)
```
## <a name=2.2>2.2) Get all unique words across all documents (both One and Two)</a>
```
all_unique_words = set()
all_unique_words.update(unique_words_one)
print(all_unique_words)
all_unique_words.update(unique_words_two)
print(all_unique_words)
```
## <a name=2.3>2.3) Create vocab dictionary with related index</a>
```
full_vocab = {}
i = 0
for word in all_unique_words:
full_vocab[word] = i
i +=1
```
Take note that Set is not ordered. So words will not in ordered.
The for loop goes through the set() in the most efficient way possible, not in alphabetical order!
```
full_vocab
```
-----
# <a name=3>3)Bag of Words to Frequency Counts</a>
Now that we've encapsulated our "entire language" in a dictionary, let's perform *feature extraction* on each of our original documents:
#### Empty counts per doc
```
one_freq = [0] * len(full_vocab)
two_freq = [0] * len(full_vocab)
all_words = [''] * len(full_vocab)
one_freq
```
## <a name=3.1>3.1) Make A list of All Vocab (which will be used to map later)</a>
```
for word in full_vocab:
word_index = full_vocab[word]
all_words[word_index] = word
print(all_words)
```
## <a name=3.2>3.2) Add in counts per word per doc:</a>
```
# map the frequencies of each word in 1.txt to our vector:
with open('../Data/One.txt') as file:
one_text = file.read().lower().split()
for word in one_text:
word_index = full_vocab[word] #get the index of that specific word
one_freq[word_index] += 1 # increase by one
one_freq
# Do the same for the second document:
with open('../Data/Two.txt') as file:
two_text = file.read().lower().split()
for word in two_text:
word_index = full_vocab[word]
two_freq[word_index] += 1
two_freq
```
## <a name=3.3>3.3) Create the DataFrame:</a>
```
import pandas as pd
bow = pd.DataFrame(data=[one_freq, two_freq], columns=all_words)
bow
```
Now we can how frequently each word appears in the documents.
By comparing the vectors we see that some words are common to both, some appear only in `One.txt`, others only in `Two.txt`. Extending this logic to tens of thousands of documents, we would see the vocabulary dictionary grow to hundreds of thousands of words. Vectors would contain mostly zero values, making them **sparse matrices**.
# <a name=concept>Concepts to Consider:</a>
## <a name=bow>Bag of Words and Tf-idf</a>
In the above examples, each vector can be considered a *bag of words*. By itself these may not be helpful until we consider *term frequencies*, or how often individual words appear in documents. A simple way to calculate term frequencies is to divide the number of occurrences of a word by the total number of words in the document. In this way, the number of times a word appears in large documents can be compared to that of smaller documents.
However, it may be hard to differentiate documents based on term frequency if a word shows up in a majority of documents. To handle this we also consider *inverse document frequency*, which is the total number of documents divided by the number of documents that contain the word. In practice we convert this value to a logarithmic scale, as described [here](https://en.wikipedia.org/wiki/Tf%E2%80%93idf#Inverse_document_frequency).
Together these terms become [**tf-idf**](https://en.wikipedia.org/wiki/Tf%E2%80%93idf).
## <a name=stopwordswordstems>Stop Words and Word Stems</a>
Some words like "the" and "and" appear so frequently, and in so many documents, that we needn't bother counting them. Also, it may make sense to only record the root of a word, say `cat` in place of both `cat` and `cats`. This will shrink our vocab array and improve performance.
## <a name=tokenizationtagging>Tokenization and Tagging</a>
When we created our vectors the first thing we did was split the incoming text on whitespace with `.split()`. This was a crude form of *tokenization* - that is, dividing a document into individual words. In this simple example we didn't worry about punctuation or different parts of speech. In the real world we rely on some fairly sophisticated *morphology* to parse text appropriately.
Once the text is divided, we can go back and *tag* our tokens with information about parts of speech, grammatical dependencies, etc. This adds more dimensions to our data and enables a deeper understanding of the context of specific documents. For this reason, vectors become ***high dimensional sparse matrices***.
-------
-------
# <a name=part2>Part Two: Feature Extraction with Scikit-Learn</a>
Let's explore the more realistic process of using sklearn to complete the tasks mentioned above!
# Scikit-Learn's Text Feature Extraction Options
```
text = [
'This is a line',
'This is another line',
'Completely different line',
]
```
## <a name=countvect>1) CountVectorizer</a>
```
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
```
+ cv will treat each value as one document
+ `fit_transform` is basically doing get the unique vocabulary on fit and then transform it by acutally performing frequency count on each documents insides that list.
+ and it returns the `sparse matrix`. The reasonis when performing vectorizing and building out bag of words model, what's going to happen is most of the items in the matrix are going to be zeros. So when you are dealing with hundards and thousands of documents with many many words, you want to make sure you do't eat up too much of PC's memory unnecessarily by just storing a bunch of zeros. Which is why we have sparse matrix.
+ sparse matrix with 3x6 matrix. Why 3? because there are 3 documents in the list we passed. When we used `todense()` method, we can see the originally stored frequency count which is not in sparse matrix form (which stores information in memory efficient way). **NOTE: we don't want to call this method if we have a large values of words, which gonna take a lot of memory space**
```
sparse_matrix = cv.fit_transform(text)
sparse_matrix
```
#### using todense() to see in original form
```
sparse_matrix.todense()
```
#### vocabulary_
```
cv.vocabulary_
```
If we closely look at the value, `another` is at index `0`. So if you look at results of todense(), see index 0 has value 1 on second document.
which make senese because **This is another line** is the second document which has the word another.
## <a name=stopwords>`stop_words` parameter</a>
+ with the use of this parameter, common stop words in English are not longer part of the vocab.
```
cv = CountVectorizer(stop_words='english')
sparse_matrix = cv.fit_transform(text)
cv.vocabulary_
```
-------
## <a name=tfidf>2) TfidfTransformer</a>
+ TfidfVectorizer is used on sentences, while TfidfTransformer is used on an existing count matrix, such as one returned by CountVectorizer
+ using this, **we can transform Bag of Words into TF-IDF**.
```
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer()
sparse_matrix
results = tfidf.fit_transform(sparse_matrix) # BOW ===> TF-IDF
results
results.todense()
```
## <a name=pipeline>3) Using Pipeline (combining two steps of CountVectorizer + Tfidf Transformer)</a>
```
from sklearn.pipeline import Pipeline
pipe = Pipeline([
('cv', CountVectorizer()),
('tfidf', TfidfTransformer())
])
results = pipe.fit_transform(text)
results
results.todense()
```
----
## <a name=tfidfvector>4) TfIdfVectorizer</a>
Does both above (step 1 and 2) in a single step!
```
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer()
tv_results = tv.fit_transform(text)
tv_results.todense()
```
We can see both method yield same results.
| github_jupyter |
```
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import boto3
import os
from sagemaker.amazon.amazon_estimator import get_image_uri
import sagemaker
from sagemaker import get_execution_role
from sklearn.model_selection import train_test_split
import numpy as np
import sagemaker
from random import shuffle
import multiprocessing
from multiprocessing import Pool
import csv
import nltk
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
train = pd.read_csv('../Data/train.csv', names = list(range(89)))
test = pd.read_csv('../Data/test.csv', names = list(range(89)))
train_labels = np.array(train[0]).astype("float32")
train_features = np.array(train.drop(0, axis=1)).astype("float32")
test_labels = np.array(test[0]).astype("float32")
test_features = np.array(test.drop(0, axis=1)).astype("float32")
def get_base_estimator(clf, sess, role):
container = get_image_uri(boto3.Session().region_name, clf)
est = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, clf),
sagemaker_session=sess)
return est
def get_estimator(clf, sess, role):
container = get_image_uri(boto3.Session().region_name, clf)
if clf == 'xgboost':
est = get_base_estimator(clf, sess, role)
est.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
num_round=100)
elif clf == 'linear-learner':
est = sagemaker.LinearLearner(role=sagemaker.get_execution_role(),
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
predictor_type='binary_classifier',
num_classes=2)
elif clf == 'knn':
est = sagemaker.KNN(role=sagemaker.get_execution_role(),
k = 10,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
predictor_type='classifier',
sample_size = 200)
elif clf == 'factorization-machines':
est = sagemaker.FactorizationMachines(role=sagemaker.get_execution_role(),
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
predictor_type='binary_classifier',
num_factors = 2)
return est
# add k-fold cross validation here
sess = sagemaker.Session()
role = get_execution_role()
client = boto3.client('sagemaker')
bucket = sess.default_bucket()
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/train'.format(bucket), content_type='csv')
s3_input_test = sagemaker.s3_input(s3_data='s3://{}/test/'.format(bucket), content_type='csv')
import sagemaker
from sagemaker.amazon.amazon_estimator import RecordSet
import boto3
# instantiate the LinearLearner estimator object
multiclass_estimator = sagemaker.LinearLearner(role=sagemaker.get_execution_role(),
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
predictor_type='binary_classifier',
num_classes=2)
# wrap data in RecordSet objects
train_records = multiclass_estimator.record_set(train_features, train_labels, channel='train')
test_records = multiclass_estimator.record_set(test_features, test_labels, channel='test')
# start a training job
multiclass_estimator.fit([train_records, test_records])
def get_tuner(clf, est):
if clf == 'xgboost':
objective_metric_name = 'validation:auc'
hyperparameter_ranges = {'eta': ContinuousParameter(0, 1),
'min_child_weight': ContinuousParameter(1, 10),
'alpha': ContinuousParameter(0, 2),
'max_depth': IntegerParameter(1, 10)}
elif clf == 'knn':
objective_metric_name = 'test:accuracy'
hyperparameter_ranges = {'k': IntegerParameter(1, 1024),
'sample_size': IntegerParameter(256, 20000000)}
elif clf == 'linear-learner':
objective_metric_name = 'test:recall'
hyperparameter_ranges = {'l1': ContinuousParameter(0.0000001,1),
'use_bias': CategoricalParameter([True, False])}
elif clf == 'factorization-machines':
objective_metric_name = 'test:binary_classification_accuracy'
hyperparameter_ranges = {'bias_wd': IntegerParameter(1, 1000)}
tuner = HyperparameterTuner(est,
objective_metric_name,
hyperparameter_ranges,
max_jobs=30,
max_parallel_jobs=3)
return tuner
def run_training_job(clf):
# build the estimator
est = get_estimator(clf, sess, role)
# get the hyperparameter tuner config
# set this to look for recall somehow
if clf == 'xgboost':
tuner = get_tuner(clf, est)
tuner.fit({'train': s3_input_train, 'test': s3_input_test})
else:
# set the records
train_records = est.record_set(train_features, train_labels, channel='train')
test_records = est.record_set(test_features, test_labels, channel='validation')
tuner = get_tuner(clf, est)
tuner.fit([train_records, test_records])
def magic_loop(models_to_run):
pool = Pool(processes=multiprocessing.cpu_count())
transformed_rows = pool.map(run_training_job, models_to_run)
pool.close()
pool.join()
clfs = ['xgboost', 'linear-learner', 'factorization-machines', 'knn']
magic_loop(clfs)
```
| github_jupyter |
# Constellation and Chain Analysis: Prebuilt Chains
<img src="chainPaths.jpg" alt="Drawing" style="width: 500px;"/>
**Terminology**
* Node = Object in STK
* Edge = Access between two objects in STK
* Strand = The sequence of nodes and edges to complete access in a chain
**This notebook shows how to:**
* Merge access strands from chain results into one combined list
* Example:
* Chain1: constellation of ground stations -> constellation of satellites -> constellation of ground stations
* Chain2: constellation of ground stations -> constellation of satellites -> constellation of satellites -> constellation of ground stations
* Find the intervals of access for each strand, node and edge
* Find the percent of time with access vs without for each strand, node and edge
* Compute the distance and minimum time delay over time for each strand
* Find the minimum N distance/time delay strands
* Summary statistics for minimum distance and number of hops
* Load the selected strands back into STK as object lines
* Perform other network metrics such as node with most accesses, nodes with fewest hops to other nodes, nodes which connect the most other nodes
* Show network degree distributions
* Find the minimum number of nodes to remove to lose connectivity/strand access to any or all of the starting and ending nodes. (This method removes nodes along the shortest path and continuing until connectivity is lost. In some cases there may be a set of fewer nodes that could be removed to lose access to all starting and ending nodes)
* Rank nodes over time based on different connectivity metrics
```
import numpy as np
import pandas as pd
import itertools
pd.set_option('max_colwidth', 90)
from comtypes.client import CreateObject
from comtypes.client import GetActiveObject
from comtypes.gen import STKObjects
from comtypes.gen import STKUtil
from comtypes.gen import AgSTKVgtLib
import seaborn as sns
import matplotlib.pyplot as plt
from chainPathLib import *
import time
import networkx as nx
```
## Chain Names, Computation Time and Metric
```
# Inputs for Chain Analysis
chainNames = ['Chain1Hop','Chain2Hop'] # Will merge results of multiple chains
start = 0 # EpSec
stop = 3600*1 # EpSec
metric = 'timeDelay' # 'distance','timeDelay' # Other options could be add in the future
nodeDelays = {'Sats':0.01} # Add in time delays. Provide the constellation name in STK and the associated node delays
stkVersion = 12 # 11 or 12
# Connect to STK
stkApp = GetActiveObject('STK{}.Application'.format(stkVersion))
stkRoot = stkApp.Personality2
stkRoot.UnitPreferences.SetCurrentUnit('DateFormat','EpSec')
stkRoot.ExecuteCommand('Units_SetConnect / Date "EpochSeconds"');
stkRoot.ExecuteCommand('VO * ObjectLine DeleteAll'); # Clean up old object lines
# Build dict of node delays
nodeDelaysByNode = getNodeDelaysByNode(stkRoot,nodeDelays,chainNames=chainNames)
```
## Compute All Chain Strands
```
# Compute strands
t1 = time.time()
strands,dfStrands = getAllStrands(stkRoot,chainNames,start,stop)
print(time.time()-t1)
dfStrands
```
## Network Utilization
### Convert Strands into Nodes and Edges
```
dfNodesIntervals = getNodesIntervalsFromStrands(strands)
dfNodesIntervals
dfEdgesIntervals = getEdgesIntervalsFromStrands(strands)
dfEdgesIntervals
```
### Percent of time with Access
```
dfStrandsActive = getActiveDuration(dfStrands,start,stop)
dfStrandsActive
dfNodeActive = getActiveDuration(dfNodesIntervals,start,stop)
dfNodeActive
dfEdgesActive = getActiveDuration(dfEdgesIntervals,start,stop)
dfEdgesActive
```
## Path Selection
### Fewest Strand Handoffs
```
# Finds the fewest strand switches, uses a greedy first come approach
dfFewestStrandSwitches = computeFewestStrandSwitches(dfStrands,start,stop)
dfFewestStrandSwitches
addStrandsAsObjectLines(stkRoot,dfFewestStrandSwitches,color='yellow')
```
### Minimum Distance
```
# Compute minimum distance through a chain
# Time resolution for calculating distance
step = 10 # sec
# Pull the node positions over time
t1 = time.time()
nodesTimesPos = computeNodesPosOverTime(stkRoot,strands,start,stop,step) # Pull node position over time
t2 = time.time()
print(t2-t1)
# Discretize intervals into strands at each time
t1 = time.time()
strandsAtTimes = getStrandsAtTimes(strands,start,stop,step) # Discretize strand intervals into times
t2 = time.time()
print(t2-t1)
# Get the position of the nodes in each strand
t1 = time.time()
timeNodePos = computeTimeNodePos(strandsAtTimes,nodesTimesPos) # Nodes and positions at each time
t2 = time.time()
print(t2-t1)
# Compute the edges and edge distances
t1 = time.time()
timesEdgesDistancesDelays = computeTimeEdgesDistancesDelays(strandsAtTimes,nodesTimesPos,nodeDelaysByNode) # Edges, distances and delays at each time
print(time.time()-t1)
# Compute the strand distances and the minimum distance strand
t1 = time.time()
timeStrandsDistances,dfTimesStrandsDistances,dfMinStrandsDistances = computeTimeStrandsDistancesDelays(strandsAtTimes,timesEdgesDistancesDelays,start,stop,step) # Strands and distances at each time and minimum strand distance at each time
t2 = time.time()
print(t2-t1)
dfMinStrandsDistances
# View intervals of minimum distance
dfIntervals = createDfIntervals(dfMinStrandsDistances,stop,step)
dfIntervals
# Add shortest path to STK as object lines
t1 = time.time()
addStrandsAsObjectLines(stkRoot,dfIntervals,color='yellow')
print(time.time()-t1)
```
### Analyze Results of Minimum Distance
```
# View minimum strand distance over time
dfMinStrandsDistances.plot('time','distance');
# View minimum strand num hops over time
dfMinStrandsDistances.plot('time','num hops');
# Summary statistics for distance and num hops
dfMinStrandsDistances.describe()
# Summary stats for strand durations and num hops
dfIntervals[dfIntervals['strand'] != ''].describe()[['dur','num hops']]
# Summary stats for duration of strand gaps
dfIntervals[dfIntervals['strand'] == ''].describe()['dur']
```
### Find Minimum Time Delay = node delays + signal travel time
```
# Example adding node delays to look at constellation/chain latency
dfMinNStrandsTimeDelay = computeNMinMetric(dfTimesStrandsDistances,metric='time delay')
dfMinNStrandsTimeDelay.plot('time','time delay');
dfIntervalsMinTimeDelay = createDfIntervals(dfMinNStrandsTimeDelay,stop,step)
addStrandsAsObjectLines(stkRoot,dfIntervalsMinTimeDelay,color='yellow')
```
### N Shortest
```
# Compute shortest N strands by distance
maxNumStrands = 2
dfMinNStrandsDistances = computeNMinMetric(dfTimesStrandsDistances,n=maxNumStrands,metric='time delay')
dfMinNStrandsDistances
# Add best N Paths
groups = dfMinNStrandsDistances.groupby('time')
strandsMaxNumStrands = []
for numStrands in range(maxNumStrands):
indexes = []
for name,group in groups:
indexes.append(group.index[numStrands])
dfIntervalsNumStrands = createDfIntervals(dfMinNStrandsDistances.loc[indexes],stop,step)
strandsMaxNumStrands.append(dfIntervalsNumStrands[['strand','start','stop']].to_numpy())
strandsMaxNumStrands = mergeStrands(strandsMaxNumStrands)
dfStrandsMaxNumStrands = pd.DataFrame(strandsMaxNumStrands,columns=['strand','start','stop']).sort_values('start')
addStrandsAsObjectLines(stkRoot,dfStrandsMaxNumStrands,color='yellow')
dfIntervalsNumStrands
```
## Use NX for Network Metrics and Reliability Analysis
```
# Get starting nodes and ending nodes of the chains
startingNodes,endingNodes = getStartingAndEndingNodes(stkRoot,chainNames)
startingNodes,endingNodes
```
### Network at Time Instant
```
# Identify important nodes at a time instant
t = start
G = generateNetwork(t,timesEdgesDistancesDelays,timeNodePos) # Build network in nx, only active nodes are added
# Nodes with most Accesses/edges,score is normalized by total num edges
topNDegrees = getDegreeCentrality(G,topN=2)
print(topNDegrees)
# Nodes with fewest hops to connect to all over nodes, score is normalized
topNCloseness = getClosenessCentrality(G,topN=2)
print(topNCloseness)
# Nodes which connect the most strands between starting and ending nodes, score is normalized by connections which exist with node
startingNodesSub = [node for node in startingNodes if node in G.nodes()] # Subset of starting nodes that are active
endingNodesSub = [node for node in endingNodes if node in G.nodes()] # Subset of ending nodes that are active
topNBetweeness = getBetweennessSubsetCentrality(G,startingNodesSub,endingNodesSub,topN=2)
print(topNBetweeness )
# Minimum nodes which must be removed to lose all strand accesses
nodesToRemoveAll = nodesToLoseAccess(G,startingNodes,endingNodes,loseAccessTo='all',topN=10) # Only consider the highest topN betweenness scores
print(nodesToRemoveAll)
# Minimum nodes which must be removed to lose all strand accesses
nodesToRemoveAny = nodesToLoseAccess(G,startingNodes,endingNodes,loseAccessTo='any')
print(nodesToRemoveAny)
# Shortest distance/time delay between starting and ending nodes. Already computed above, but is useful if nodes are removed
strand,metricValue = shortestStrandDistance(G,startingNodes,endingNodes,metric=metric)
print(strand,metricValue)
# # Plot nodes based on position, red is stationary, blue is moving, the larger circles indicates more accesses
network_plot_3D(G,startingNodes=startingNodes,endingNodes=endingNodes)
```
### Network Metrics over Time
```
# Build new networks at each time and gather metrics
t1 = time.time()
centers = []
minNumHops = []
mostEdges = []
edgeCount = []
setsOfMinNodesToRemoveAll = []
setsOfMinNodesToRemoveAny = []
minNodesToRemoveAll = []
minNodesToRemoveAny = []
degreesOfNodes = []
times = np.arange(start,stop+step,step)
for t in times:
# Generate Network at each time
G = generateNetwork(t,timesEdgesDistancesDelays,timeNodePos)
# Store degree counts
nodeNames,degrees = zip(*G.degree())
degreesOfNodes.append(degrees)
# Find nodes with fewest hops to all over nodes
centers.append(nx.center(G))
minNumHops.append(nx.radius(G)-1)
# Nodes with most Acceses
maxDeg, nodesWMaxDeg = getMaxDegrees(G)
mostEdges.append(nodesWMaxDeg)
edgeCount.append(maxDeg)
# Could use centrality scores instead since these are normalized
# nx.degree_centrality(G)
# nx.closeness_centrality(G)
# nx.betweenness_centrality_subset(G,startingNodes,endingNodes,normalized=True)
# Min nodes to remove to lose access to all starting and ending nodes. This takes awhile to compute
setsOfMinNodesToRemoveAll.append(nodesToLoseAccess(G,startingNodes,endingNodes,loseAccessTo='all',topN=10))
# Min nodes to remove to lose access to any starting or ending node. This takes awhile to compute
setsOfMinNodesToRemoveAny.append(nodesToLoseAccess(G,startingNodes,endingNodes,loseAccessTo='any'))
time.time()-t1
# Put Network Metrics Over time into a df
dfNetwork = pd.DataFrame([times]).T
dfNetwork.columns=['time']
dfNetwork['num centers'] = pd.Series(centers).apply(lambda x: len(x))
dfNetwork['min num hops'] = minNumHops
dfNetwork['num nodes w/ max degree'] = pd.Series(mostEdges).apply(lambda x: len(x))
dfNetwork['max num edges'] = edgeCount
if setsOfMinNodesToRemoveAny:
numSets = [len(sets) for sets in setsOfMinNodesToRemoveAll]
lengthOfSet = [len(sets[0]) if len(sets) > 0 else 0 for sets in setsOfMinNodesToRemoveAll]
dfNetwork['min nodes to lose access to all'] = lengthOfSet
dfNetwork['num of sets to lose access to all'] = numSets
if setsOfMinNodesToRemoveAny:
numSets = [len(sets) for sets in setsOfMinNodesToRemoveAny]
lengthOfSet = [len(sets[0]) if len(sets) > 0 else 0 for sets in setsOfMinNodesToRemoveAny]
dfNetwork['min nodes to lose access to any'] = lengthOfSet
dfNetwork['num of sets to lose access to any'] = numSets
numNodes = [len(val) for key,val in timeNodePos.items()]
numEdges = [len(val) for key,val in timesEdgesDistancesDelays.items()]
numStrands = [len(val) for key,val in timeStrandsDistances.items()]
dfNetwork['tot num nodes'] = numNodes
dfNetwork['tot num edges'] = numEdges
dfNetwork['tot num strands'] = numEdges
dfNetwork.describe()
# Plot distribution of active node degrees over time
plotDegDistribution(degreesOfNodes)
# Plot metric over time
dfNetwork.plot.line(x='time',y='tot num strands');
# Plot nodes to lose access over time
if setsOfMinNodesToRemoveAny and setsOfMinNodesToRemoveAll:
dfNetwork.plot.line(x='time',y=['min nodes to lose access to any','min nodes to lose access to all']);
# Add data back into STK
t1 = time.time()
addDataToSTK(stkRoot,chainNames[0],dfNetwork) # Adds data to the first chain in the User Supplied data
print(time.time()-t1)
```
## Investigating important nodes and sets of nodes over time
```
# Find node with highest average betweenness score over time
t1 = time.time()
nodesAll = {}
for t in np.arange(start,stop+step,step):
# Generate Network at each time
G = generateNetwork(t,timesEdgesDistancesDelays,timeNodePos)
# Remove nodes which are not present
startingNodesSub = [node for node in startingNodes if node in G.nodes()]
endingNodesSub = [node for node in endingNodes if node in G.nodes()]
# Calculate score
nodeScores = getBetweennessSubsetCentrality(G,startingNodesSub,endingNodesSub)
# Sum Scores
for node,score in nodeScores:
score=float(score)
if node in nodesAll.keys():
nodesAll.update({node:nodesAll[node]+score})
else:
nodesAll.update({node:score})
# Sort and print top scores
scores = np.asarray(list(nodesAll.items()))
scores = scores[np.argsort(scores[:,1].astype(float)),:][::-1]
print(time.time()-t1)
scores[0:10,:]
# Number of times a node has the most edges
countNodesOverTime(mostEdges,topN=10)
# Number of times a set of node appears in the minimum nodes to remove all list
countNodesOverTime(setsOfMinNodesToRemoveAll,topN=10)
# Number of times a set of nodes appears in the minimum nodes to remove any list
countNodesOverTime(setsOfMinNodesToRemoveAny,topN=10)
```
| github_jupyter |
# Importing modules
```
import numpy as np
import pandas as pd
```
# Data
```
colunas = ['ANO_CINEMATOGRAFICO', 'SEMANA_CINEMATOGRAFICA', 'TIPO_SESSAO',
'REGISTRO_COMPLEXO', 'REGISTRO_GRUPO','REGISTRO_SALA', 'CPB_ROE', 'ASSENTOS_INFERIDO',
'OCUPAÇÃO_SALA_INFERIDA', 'd_t', 'id_NAC', 'xt_comp', 't_comp',
'OBG_FINAL_COMP', 'SALAS_COMP', 'DIA_abs', 'COMP_CUMPRIU', 'xt_frac',
'cump_frac', 'cpb_id', 'beta', 'HORA_ajustada'] # all cols with defined dtypes (see below)
remover = {'CPB_ROE','TIPO_SESSAO','ANO_CINEMATOGRAFICO','d_t','beta',
'OBG_FINAL_COMP','OCUPAÇÃO_SALA_INFERIDA','t_comp','cpb_id','COMP_CUMPRIU','cump_frac','xt_comp',
'SEMANA_CINEMATOGRAFICA','REGISTRO_SALA','REGISTRO_GRUPO'} # cols to remove
importar = list(set(colunas).difference(remover)) # cols to import
painel = pd.read_csv('Painel 2018 final.csv', dtype={
'ANO_CINEMATOGRAFICO':'int16', 'SEMANA_CINEMATOGRAFICA':'int8', 'REGISTRO_COMPLEXO':'uint16',
'CPB_ROE':str, 'OCUPAÇÃO_SALA_INFERIDA':float, 'd_t':int, 'x_t':float,
'id_NAC':bool, 'xt_comp':float, 't_comp':int, 'OBG_FINAL_COMP':float,
'SALAS_COMP':'int8', 'DIA_abs':'int16', 'COMP_CUMPRIU':bool, 'cpb_id':'int16', 'cump_frac':float,
'xt_frac':float, 'ASSENTOS_INFERIDO':'int16', 'TIPO_SESSAO':str, 'beta':float, 'HORA_ajustada':'int8',
'REGISTRO_GRUPO':int,'REGISTRO_SALA':'int16'},usecols=importar)
del colunas
del importar
del remover
print(painel.shape)
print(painel.columns)
print(painel.dtypes)
print(painel.info())
```
## Export as pickle
```
painel.to_pickle('Painel_2018_pickle')
```
# Creating design matrix
```
painel = pd.read_pickle('Painel_2018_pickle')
painel.columns
import psutil
print(psutil.virtual_memory().available / 1024 / 1024) # available virtual memory in MBs
painel = pd.get_dummies(painel, columns=['DIA_abs','HORA_ajustada'],drop_first=True)
painel = pd.get_dummies(painel, columns=['DIA_abs','REGISTRO_COMPLEXO','HORA_ajustada'],drop_first=True)
print(len(painel.columns))
print(painel.info())
print(painel.info())
```
# Model fit
```
from sklearn import linear_model
cols = painel.columns
y = painel['id_NAC']
painel.drop('id_NAC',axis=1, inplace=True)
reg = linear_model.LogisticRegression(
solver='lbfgs', max_iter=10000, fit_intercept=True).fit(
painel,y) # model fit
import statsmodels.api as sm
painel = sm.add_constant(painel)
reg = sm.Logit(y, painel).fit()
import shelve
with shelve.open(r'bin_logit') as bn:
bn['bin_logit'] = reg
bn['X_col_names'] = cols
```
# Ignore
from patsy import dmatrices
y, X = dmatrices('id_NAC ~ C(DIA_abs) + C(REGISTRO_COMPLEXO)*xt_frac + C(HORA_ajustada) + ASSENTOS_INFERIDO + SALAS_COMP',
data=painel, return_type='dataframe')
cols = painel.columns
for i, c in enumerate(cols[1150:]):
print(i, c)
if 'COMPLEXO' in c:
painel.loc[painel[c] != 0, 'xt:'+c] = np.multiply(
painel.loc[painel[c] != 0, 'xt_frac'], painel.loc[painel[c] != 0, c])
painel.loc[painel[c] == 0, 'xt:'+c] = 0
| github_jupyter |
# Introduction to Object Oriented Programming
## Lesson outline
- Object-oriented programming syntax
- Procedural vs. object-oriented programming
- Classes, objects, methods and attributes
- Coding a class
- Magic methods
- Inheritance
- Using object-oriented programming to make a Python package
- Making a package
- Tour of `scikit-learn` source code
- Putting your package on **PyPi**
## Class, object, method, and attribute
- Object-oriented programming (OOP) vocabulary
- `Class`: A blueprint consisting of methods and attributes.
- `Object`: An instance of a class. It can help to think of objects as something in the real world like a yellow pencil, a small dog, or a blue shirt. However, as you'll see later in the lesson, objects can be more abstract.
- `Attribute`: A descriptor or characteristic. Examples would be color, length, size, etc. These attributes can take on specific values like blue, 3 inches, large, etc.
- `Method`: An action that a class or object could take.
- `OOP`: A commonly used abbreviation for object-oriented programming.
- `Encapsulation`: One of the fundamental ideas behind object-oriented programming is called encapsulation: you can combine functions and data all into a single entity. In object-oriented programming, this single entity is called a class. Encapsulation allows you to hide implementation details, much like how the scikit-learn package hides the implementation of machine learning algorithms.
## OOP Syntax
```
class Shirt:
def __init__(self, shirt_color, shirt_size, shirt_style, shirt_price):
self.color = shirt_color;
self.size = shirt_size;
self.style = shirt_style;
self.price = shirt_price;
def change_price(self, new_price):
self.price = new_price;
def discount(self, discount):
return self.price * (1 - discount)
Shirt('red', 's', 'short sleeve', 15);
new_shirt = Shirt('red', 's', 'short sleeve', 15);
print(new_shirt.color)
print(new_shirt.size)
print(new_shirt.style)
print(new_shirt.price)
new_shirt.change_price(10)
print(new_shirt.price)
tshirt_collection = []
shirt_one = Shirt('orange', 'M', 'short sleeve', 25)
shirt_two = Shirt('red', 'S', 'short sleeve', 15)
shirt_three = Shirt('purple', 'XL', 'short sleeve', 35)
tshirt_collection.append(shirt_one)
tshirt_collection.append(shirt_two)
tshirt_collection.append(shirt_three)
for i in range(len(tshirt_collection)):
print(tshirt_collection[i].color)
```
| github_jupyter |
# Contrast Effects
### Authors
Ndèye Gagnessiry Ndiaye and Christin Seifert
### License
This work is licensed under the Creative Commons Attribution 3.0 Unported License https://creativecommons.org/licenses/by/3.0/
This notebook illustrates 3 contrast effects:
- Simultaneous Brightness Contrast
- Chevreul Illusion
- Contrast Crispening
## Simultaneous Brightness Contrast
Simultaneous Brightness Contrast is the general effect where a gray patch placed on a dark background looks lighter than the same gray patch on a light background (foreground and background affect each other). The effect is based on lateral inhibition.
Also see the following video as an example:
https://www.youtube.com/watch?v=ZYh4SxE7Xp8
```
import numpy as np
import matplotlib.pyplot as plt
```
The following image shows a gray square on different backgrounds. The inner square always has the same color (84% gray), and is successively shown on 0%, 50%, 100%, and 150% gray background patches. Note, how the inner squares are perceived differently (square on the right looks considerably darker than the square on the left).
Suggestion: Change the gray values of the inner and outer squares and see what happens.
```
# defining the inner square as 3x3 array with an initial gray value
inner_gray_value = 120
inner_square = np.full((3,3), inner_gray_value, np.double)
# defining the outer squares and overlaying the inner square
a = np.zeros((5,5), np.double)
a[1:4, 1:4] = inner_square
b = np.full((5,5), 50, np.double)
b[1:4, 1:4] = inner_square
c = np.full((5,5), 100, np.double)
c[1:4, 1:4] = inner_square
d = np.full((5,5), 150, np.double)
d[1:4, 1:4] = inner_square
simultaneous=np.hstack((a,b,c,d))
im=plt.imshow(simultaneous, cmap='gray',interpolation='nearest',vmin=0, vmax=255)
#plt.rcParams["figure.figsize"] = (70,10)
plt.axis('off')
plt.colorbar(im, orientation='horizontal')
plt.show()
```
## Chevreul Illusion
The following images visualizes the Chevreul illusion. We use a sequence of gray bands (200%, 150%, 100%, 75% and 50% gray). One band has a uniform gray value. When putting the bands next to each other, the gray values seem to be darker at the edges. This is due to lateral inhibition, a feature of our visual system that increases edge contrasts and helps us to better detect outlines of shapes.
```
e = np.full((9,5), 200, np.double)
f = np.full((9,5), 150, np.double)
g = np.full((9,5), 100, np.double)
h = np.full((9,5), 75, np.double)
i = np.full((9,5), 50, np.double)
image1= np.hstack((e,f,g,h,i))
e[:,4] = 255
f[:,4] = 255
g[:,4] = 255
h[:,4] = 255
i[:,4] = 255
image2=np.hstack((e,f,g,h,i))
plt.subplot(1,2,1)
plt.imshow(image1, cmap='gray',vmin=0, vmax=255,interpolation='nearest',aspect=4)
plt.title('Bands')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(image2, cmap='gray',vmin=0, vmax=255,interpolation='nearest',aspect=4)
plt.title('Bands with white breaks')
plt.axis('off')
plt.show()
```
## Contrast Crispening
The following images show the gray strips on a gray-scale background. Left image: All vertical gray bands are the same. Note how different parts of the vertical gray bands are enhanced (i.e., difference better perceivable) depending on the gray value of the background. In fact, differences are enhanced when the gray value in the foreground is closer to the gray value in the background. On the right, the same vertical bands are shown but without the background. In this image you can (perceptually) verify that all vertical gray bands are indeed the same.
```
strips = np.linspace( 0, 255, 10, np.double)
strips = strips.reshape((-1, 1))
M = np.linspace( 255, 0, 10, np.double)
n = np.ones((20, 10), np.double)
background = n[:,:]*M
background[5:15,::2] = strips
without_background = np.full((20,10), 255, np.double)
without_background[5:15,::2] = strips
plt.subplot(1,2,1)
plt.imshow(background, cmap='gray',vmin=0, vmax=255,interpolation='nearest')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off', labeltop='off', labelright='off', labelbottom='off')
plt.subplot(1,2,2)
plt.imshow(without_background, cmap='gray',vmin=0, vmax=255,interpolation='nearest')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off', labeltop='off', labelright='off', labelbottom='off')
plt.show()
```
| github_jupyter |
# Rasterio plotting of Landsat-8 scenes
In this notebook, we will download bands of a Landsat-8 scene, visualize them with [rasterio's plotting module]( https://rasterio.readthedocs.io/en/latest/topics/plotting.html), and write an RGB image as rendered GeoTIFF.
```
import os
import matplotlib.pyplot as plt
import numpy as np
import rasterio
from rasterio.plot import show
import requests
from skimage import exposure
%matplotlib inline
```
## Download and read bands
```
landsat_url_suffixes = {'red': 'B4', 'green': 'B3', 'blue': 'B2', 'qa': 'BQA'}
landsat_url_prefix = 'http://landsat-pds.s3.amazonaws.com/c1/L8/008/067/LC08_L1TP_008067_20190405_20190405_01_RT/LC08_L1TP_008067_20190405_20190405_01_RT_'
landsat_urls = {k: f'{landsat_url_prefix}{v}.TIF' for k, v in landsat_url_suffixes.items()}
def get_bands(band_urls, data_path='data', file_format='tif'):
"""Download and cache spectral bands of a satellite image
Parameters
----------
band_urls : dict
URLs of individual bands: {<band_name>: <url>}
data_path : string (optional)
Location to save the data
file_format : string (optional)
File format of band
Returns
-------
bands : dict
Band arrays and the profile
"""
if not os.path.exists(data_path):
os.makedirs(data_path)
bands = {}
for k, v in band_urls.items():
print(os.path.basename(v))
band_path = os.path.join(data_path, '{}.{}'.format(k, file_format))
if not os.path.exists(band_path):
print('Downloading...')
r = requests.get(v)
with open(band_path, 'wb') as f:
f.write(r.content)
else:
print('Already downloaded...')
with rasterio.open(band_path) as src:
print('Reading...\n')
bands[k] = src.read(1)
if 'profile' not in bands:
bands['profile'] = src.profile
return bands
landsat_bands = get_bands(landsat_urls, data_path='data', file_format='tif')
```
## Plot individual bands
Use `rasterio.plot.show()` to plot individual bands. The `transform` argument changes the image extent to the spatial bounds of the image.
```
def plot_rgb_bands(bands):
fig, (axr, axg, axb) = plt.subplots(1, 3, figsize=(21, 7))
transform = bands['profile']['transform']
show(bands['red'], ax=axr, cmap='Reds', title='Red band', transform=transform, vmax=np.percentile(bands['red'], 95))
show(bands['green'], ax=axg, cmap='Greens', title='Green band', transform=transform, vmax=np.percentile(bands['red'], 95))
show(bands['blue'], ax=axb, cmap='Blues', title='Blue band', transform=transform, vmax=np.percentile(bands['red'], 95))
plt.show()
plot_rgb_bands(landsat_bands)
```
## Create RGB stack
```
def create_rgb_stack(bands, method='rescaling', percentile=2, clip_limit=0.03):
"""Create RGB stack from RGB bands
Parameters
----------
bands : dict
Band arrays in {<band_name>: <array>} format, including 'red',
'green', 'blue', and 'qa' (optional) keys
method : string (optional)
Method for modifying the band intensities. 'rescaling' stretches
or shrinks the intensity range. 'clahe' applies Contrast Limited
Adaptive Histogram Equalization, which is an algorithm for
local contrast enhancement.
percentile : int (optional)
Shorthand for percentile range to compute (from percentile to
100 - percentile) for intensity rescaling. Required when
method='rescaling'.
clip_limit : float (optional)
Clipping limit. Required when method='clahe'.
Returns
-------
ndarray
RGB array (shape=(3, height, width), dtype='uint8')
"""
modified_bands = []
for band in [bands['red'], bands['green'], bands['blue']]:
if method == 'rescaling':
# Calculate percentiles, excluding fill pixels
try:
fill_mask = bands['qa'] == 1
masked_band = np.ma.masked_where(fill_mask, band)
masked_band = np.ma.filled(masked_band.astype('float'), np.nan)
except KeyError:
masked_band = band
vmin, vmax = np.nanpercentile(masked_band,
(percentile, 100 - percentile))
# Rescale to percentile range
modified_band = exposure.rescale_intensity(
band, in_range=(vmin, vmax), out_range='uint8')
modified_band = modified_band.astype(np.uint8)
elif method == 'clahe':
# Apply histogram equalization
modified_band = exposure.equalize_adapthist(
band, clip_limit=clip_limit)
modified_band = (modified_band * 255).astype(np.uint8)
modified_bands.append(modified_band)
return np.stack(modified_bands)
landsat_bands['rgb'] = create_rgb_stack(landsat_bands, method='clahe')
```
## Plot RGB image
```
def plot_rgb_image(bands):
plt.figure(figsize=(10, 10))
show(bands['rgb'], transform=bands['profile']['transform'])
plot_rgb_image(landsat_bands)
```
## Write RGB image
Write the RGB image as GeoTIFF and set 'RGB' color interpretation.
```
def write_rgb_image(bands, data_path='data'):
profile = bands['profile']
profile.update(driver='GTiff', dtype=rasterio.uint8, count=3, photometric='RGB')
rgb_path = os.path.join(data_path, 'rgb.tif')
with rasterio.open(rgb_path, 'w', **profile) as dst:
for i, band in enumerate(bands['rgb']):
dst.write_band(i + 1, band)
write_rgb_image(landsat_bands, data_path='data')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%aimport utils_1_1
import pandas as pd
import numpy as np
import altair as alt
from altair_saver import save
import datetime
import dateutil.parser
from os.path import join
from constants_1_1 import SITE_FILE_TYPES
from utils_1_1 import (
get_site_file_paths,
get_site_file_info,
get_site_ids,
get_visualization_subtitle,
get_country_color_map,
)
from theme import apply_theme
from web import for_website
alt.data_transformers.disable_max_rows(); # Allow using rows more than 5000
data_release='2021-04-27'
df = pd.read_csv(join("..", "data", "Phase2.1SurvivalRSummariesPublic", "ToShare", "table.stay.rmDead.toShare.csv"))
df = df.drop(columns=['Unnamed: 0'])
#df = df.rename(columns={"n.early": 'First', 'n.late': "Second", 'week.setting': 'week'})
colors = ['#E79F00', '#0072B2', '#D45E00', '#CB7AA7', '#029F73', '#57B4E9']
sites = ['META', 'APHP', 'FRBDX', 'ICSM', 'UKFR', 'NWU', 'BIDMC', 'MGB', 'UCLA', 'UMICH', 'UPENN', 'UPITT', 'VA1', 'VA2', 'VA3', 'VA4', 'VA5']
site_colors = ['black', '#0072B2', '#0072B2', '#0072B2', '#0072B2', '#CB7AA7', '#D45E00', '#D45E00', '#D45E00', '#D45E00', '#D45E00', '#D45E00', '#D45E00', '#D45E00', '#D45E00','#D45E00','#D45E00']
#df = pd.melt(df, id_vars=['siteid', 'week'], value_vars=['First', 'Second'], var_name='wave', value_name='n')
df.siteid = df.siteid.apply(lambda x: x.upper())
df.week = df.week.apply(lambda x: 'Week3' if x == 'week3' else x.capitalize())
df['week'] = df['week'].apply(lambda x: {
'Week1': '<=1 Week',
'Week2': '1=2 Week',
'Week3': '2+ Week'
}[x])
siteid = df.siteid.unique().tolist()
print(df.siteid.unique().tolist())
df
```
# All Sites
```
point=alt.OverlayMarkDef(filled=False, fill='white', strokeWidth=2)
def plot_stay(df=None, wave='First'):
d = df.copy()
d = d[d.siteid != 'META']
d = d[d.wave == wave]
plot = alt.Chart(
d
).mark_bar(
size=30,
strokeWidth=0,
stroke='black'
).encode(
x=alt.X("week:N", title=None, axis=alt.Axis(labelAngle=0, tickCount=10, labels=False if wave=='early' else True), scale=alt.Scale(padding=0), sort=['Week1', 'Week2', 'Week3']),
y=alt.Y("n:Q", title=None, scale=alt.Scale(zero=True, domain=[0, 1]), axis=alt.Axis(format='%')),
color=alt.Color("wave:N", scale=alt.Scale(domain=['First', 'Second'], range=['#D45E00', '#0072B2']), title="Wave")
).properties(
width=150,
height=100
)
text = plot.mark_text(
size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
).encode(
# x=alt.X('month:N'),
# y=alt.Y('value:Q', stack='zero'),
x=alt.X("week:N", title=None, axis=alt.Axis(labels=False)),
y=alt.Y("n:Q", title=None),
# detail='cat:N',
text=alt.Text('n:Q', format='.0%'),
# order="order:O",
# opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
)
plot = (plot + text).facet(
column=alt.Column("siteid:N", header=alt.Header(title=None, labels=True if wave=='First' else False), sort=sites)
).resolve_scale(color='shared')
# plot = plot.properties(
# title={
# "text": [
# f"Length Of Stay"
# ],
# "dx": 30,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
# "subtitleColor": "gray",
# }
# )
return plot
e = plot_stay(df=df, wave='First')
l = plot_stay(df=df, wave='Second')
print(df)
plot = alt.vconcat(e, l, spacing=10).properties(
title={
"text": [
f"Length Of Stay"
],
"dx": 40,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
header_label_orient='top',
legend_orient='top',
legend_title_orient='left',
axis_label_font_size=14,
header_label_font_size=16,
point_size=100
)
plot
save(plot,join("..", "result", "final-stay-site-rmDead.png"), scalefactor=8.0)
point=alt.OverlayMarkDef(filled=False, fill='white', strokeWidth=2)
def plot_stay(df=None, wave='early'):
d = df.copy()
d = d[d.siteid == 'META']
# d = d[d.wave == wave]
plot = alt.Chart(
d
).mark_bar(
size=30,
strokeWidth=0,
stroke='black'
).encode(
x=alt.X("wave:N", title=None, axis=alt.Axis(labelAngle=0, tickCount=10, labels=True), scale=alt.Scale(padding=0), sort=['Week1', 'Week2', 'Week3']),
y=alt.Y("n:Q", title=None, scale=alt.Scale(zero=False, domain=[0, 0.65]), axis=alt.Axis(format="%")),
color=alt.Color("wave:N", scale=alt.Scale(domain=['First', 'Second'], range=['#D45E00', '#0072B2']), title="Wave")
).properties(
width=100,
height=180
)
tick = plot.mark_errorbar(
opacity=0.7 #, color='black'
).encode(
x=alt.X("wave:N"),
y=alt.Y("ci_l:Q"),
y2=alt.Y2("ci_u:Q"),
stroke=alt.value('black'),
strokeWidth=alt.value(1)
)
text = plot.mark_text(
size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
).encode(
# x=alt.X('month:N'),
# y=alt.Y('value:Q', stack='zero'),
x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
y=alt.Y("n:Q", title=None),
# detail='cat:N',
text=alt.Text('n:Q', format='.0%'),
# order="order:O",
# opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
)
plot = (plot + text+tick).facet(
column=alt.Column("week:N", header=alt.Header(title=None, labels=True), sort=sites)
).resolve_scale(color='shared')
# plot = plot.properties(
# title={
# "text": [
# f"Length Of Stay"
# ],
# "dx": 30,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
# "subtitleColor": "gray",
# }
# )
return plot
e = plot_stay(df=df, wave='early')
# l = plot_stay(df=df, wave='late')
# plot = alt.hconcat(e, l, spacing=30).resolve_scale(y='shared').properties(
plot = e.properties(
title={
"text": [
f"Length of Stay Distribution by Pandemic Wave"
],
"dx": 30,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='bottom',
legend_title_orient='left',
axis_label_font_size=14,
header_label_orient='top',
header_label_font_size=16,
point_size=100
)
plot
save(plot,join("..", "result", "final-stay-meta-rmDead.png"), scalefactor=8.0)
```
## Country
```
print(df)
point=alt.OverlayMarkDef(filled=False, fill='white', strokeWidth=2)
#df['ci_l'] = df.n * 0.9
#df['ci_u'] = df.n * 1.1
def plot_stay(df=None, wave='early', country=None):
d = df.copy()
d = d[d.siteid == country]
# d = d[d.wave == wave]
plot = alt.Chart(
d
).mark_bar(
size=30,
strokeWidth=0,
stroke='black'
).encode(
x=alt.X("wave:N", title=None, axis=alt.Axis(labelAngle=0, tickCount=10, labels=True), scale=alt.Scale(padding=0), sort=['Week1', 'Week2', 'Week3']),
y=alt.Y("n:Q", title=None, scale=alt.Scale(zero=True, domain=[0, 0.9]), axis=alt.Axis(format="%")),
color=alt.Color("wave:N", scale=alt.Scale(domain=['First', 'Second'], range=['#D45E00', '#0072B2']), title="Wave")
).properties(
width=100,
height=180
)
tick = plot.mark_errorbar(
opacity=0.7 #, color='black'
).encode(
x=alt.X("wave:N"),
y=alt.Y("ci_l:Q"),
y2=alt.Y2("ci_u:Q"),
stroke=alt.value('black'),
strokeWidth=alt.value(1)
)
text = plot.mark_text(
size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
).encode(
# x=alt.X('month:N'),
# y=alt.Y('value:Q', stack='zero'),
x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
y=alt.Y("n:Q", title=None),
# detail='cat:N',
text=alt.Text('n:Q', format='.0%'),
# order="order:O",
# opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
)
plot = (plot + text+tick).facet(
column=alt.Column("week:N", header=alt.Header(title=None, labels=True), sort=sites)
).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
country.replace("META-", "")
],
"dx": 30,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
# plot = plot.properties(
# title={
# "text": [
# f"Length Of Stay"
# ],
# "dx": 30,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
# "subtitleColor": "gray",
# }
# )
return plot
country_list1=["META-USA", "META-FRANCE"]
country_list2=["META-ITALY", "META-GERMANY", "META-SPAIN"]
e1 = alt.hconcat(*(plot_stay(df=df, wave='early', country=country) for country in country_list1))
e2 = alt.hconcat(*(plot_stay(df=df, wave='early', country=country) for country in country_list2))
e=alt.vconcat(e1,e2)
# l = plot_stay(df=df, wave='late')
# plot = alt.hconcat(e, l, spacing=30).resolve_scale(y='shared').properties(
plot = apply_theme(
e,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='bottom',
legend_title_orient='left',
axis_label_font_size=14,
header_label_orient='top',
header_label_font_size=16,
point_size=100
)
plot
save(plot,join("..", "result", "final-stay-country-rmDead.png"), scalefactor=8.0)
```
| github_jupyter |
# Example: CanvasXpress splom Chart No. 3
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/splom-3.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="splom3",
data={
"z": {
"Species": [
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica"
]
},
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
"s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31",
"s32",
"s33",
"s34",
"s35",
"s36",
"s37",
"s38",
"s39",
"s40",
"s41",
"s42",
"s43",
"s44",
"s45",
"s46",
"s47",
"s48",
"s49",
"s50",
"s51",
"s52",
"s53",
"s54",
"s55",
"s56",
"s57",
"s58",
"s59",
"s60",
"s61",
"s62",
"s63",
"s64",
"s65",
"s66",
"s67",
"s68",
"s69",
"s70",
"s71",
"s72",
"s73",
"s74",
"s75",
"s76",
"s77",
"s78",
"s79",
"s80",
"s81",
"s82",
"s83",
"s84",
"s85",
"s86",
"s87",
"s88",
"s89",
"s90",
"s91",
"s92",
"s93",
"s94",
"s95",
"s96",
"s97",
"s98",
"s99",
"s100",
"s101",
"s102",
"s103",
"s104",
"s105",
"s106",
"s107",
"s108",
"s109",
"s110",
"s111",
"s112",
"s113",
"s114",
"s115",
"s116",
"s117",
"s118",
"s119",
"s120",
"s121",
"s122",
"s123",
"s124",
"s125",
"s126",
"s127",
"s128",
"s129",
"s130",
"s131",
"s132",
"s133",
"s134",
"s135",
"s136",
"s137",
"s138",
"s139",
"s140",
"s141",
"s142",
"s143",
"s144",
"s145",
"s146",
"s147",
"s148",
"s149",
"s150"
],
"smps": [
"Sepal.Length",
"Sepal.Width",
"Petal.Length",
"Petal.Width"
],
"data": [
[
5.1,
3.5,
1.4,
0.2
],
[
4.9,
3,
1.4,
0.2
],
[
4.7,
3.2,
1.3,
0.2
],
[
4.6,
3.1,
1.5,
0.2
],
[
5,
3.6,
1.4,
0.2
],
[
5.4,
3.9,
1.7,
0.4
],
[
4.6,
3.4,
1.4,
0.3
],
[
5,
3.4,
1.5,
0.2
],
[
4.4,
2.9,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.1
],
[
5.4,
3.7,
1.5,
0.2
],
[
4.8,
3.4,
1.6,
0.2
],
[
4.8,
3,
1.4,
0.1
],
[
4.3,
3,
1.1,
0.1
],
[
5.8,
4,
1.2,
0.2
],
[
5.7,
4.4,
1.5,
0.4
],
[
5.4,
3.9,
1.3,
0.4
],
[
5.1,
3.5,
1.4,
0.3
],
[
5.7,
3.8,
1.7,
0.3
],
[
5.1,
3.8,
1.5,
0.3
],
[
5.4,
3.4,
1.7,
0.2
],
[
5.1,
3.7,
1.5,
0.4
],
[
4.6,
3.6,
1,
0.2
],
[
5.1,
3.3,
1.7,
0.5
],
[
4.8,
3.4,
1.9,
0.2
],
[
5,
3,
1.6,
0.2
],
[
5,
3.4,
1.6,
0.4
],
[
5.2,
3.5,
1.5,
0.2
],
[
5.2,
3.4,
1.4,
0.2
],
[
4.7,
3.2,
1.6,
0.2
],
[
4.8,
3.1,
1.6,
0.2
],
[
5.4,
3.4,
1.5,
0.4
],
[
5.2,
4.1,
1.5,
0.1
],
[
5.5,
4.2,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.2
],
[
5,
3.2,
1.2,
0.2
],
[
5.5,
3.5,
1.3,
0.2
],
[
4.9,
3.6,
1.4,
0.1
],
[
4.4,
3,
1.3,
0.2
],
[
5.1,
3.4,
1.5,
0.2
],
[
5,
3.5,
1.3,
0.3
],
[
4.5,
2.3,
1.3,
0.3
],
[
4.4,
3.2,
1.3,
0.2
],
[
5,
3.5,
1.6,
0.6
],
[
5.1,
3.8,
1.9,
0.4
],
[
4.8,
3,
1.4,
0.3
],
[
5.1,
3.8,
1.6,
0.2
],
[
4.6,
3.2,
1.4,
0.2
],
[
5.3,
3.7,
1.5,
0.2
],
[
5,
3.3,
1.4,
0.2
],
[
7,
3.2,
4.7,
1.4
],
[
6.4,
3.2,
4.5,
1.5
],
[
6.9,
3.1,
4.9,
1.5
],
[
5.5,
2.3,
4,
1.3
],
[
6.5,
2.8,
4.6,
1.5
],
[
5.7,
2.8,
4.5,
1.3
],
[
6.3,
3.3,
4.7,
1.6
],
[
4.9,
2.4,
3.3,
1
],
[
6.6,
2.9,
4.6,
1.3
],
[
5.2,
2.7,
3.9,
1.4
],
[
5,
2,
3.5,
1
],
[
5.9,
3,
4.2,
1.5
],
[
6,
2.2,
4,
1
],
[
6.1,
2.9,
4.7,
1.4
],
[
5.6,
2.9,
3.6,
1.3
],
[
6.7,
3.1,
4.4,
1.4
],
[
5.6,
3,
4.5,
1.5
],
[
5.8,
2.7,
4.1,
1
],
[
6.2,
2.2,
4.5,
1.5
],
[
5.6,
2.5,
3.9,
1.1
],
[
5.9,
3.2,
4.8,
1.8
],
[
6.1,
2.8,
4,
1.3
],
[
6.3,
2.5,
4.9,
1.5
],
[
6.1,
2.8,
4.7,
1.2
],
[
6.4,
2.9,
4.3,
1.3
],
[
6.6,
3,
4.4,
1.4
],
[
6.8,
2.8,
4.8,
1.4
],
[
6.7,
3,
5,
1.7
],
[
6,
2.9,
4.5,
1.5
],
[
5.7,
2.6,
3.5,
1
],
[
5.5,
2.4,
3.8,
1.1
],
[
5.5,
2.4,
3.7,
1
],
[
5.8,
2.7,
3.9,
1.2
],
[
6,
2.7,
5.1,
1.6
],
[
5.4,
3,
4.5,
1.5
],
[
6,
3.4,
4.5,
1.6
],
[
6.7,
3.1,
4.7,
1.5
],
[
6.3,
2.3,
4.4,
1.3
],
[
5.6,
3,
4.1,
1.3
],
[
5.5,
2.5,
4,
1.3
],
[
5.5,
2.6,
4.4,
1.2
],
[
6.1,
3,
4.6,
1.4
],
[
5.8,
2.6,
4,
1.2
],
[
5,
2.3,
3.3,
1
],
[
5.6,
2.7,
4.2,
1.3
],
[
5.7,
3,
4.2,
1.2
],
[
5.7,
2.9,
4.2,
1.3
],
[
6.2,
2.9,
4.3,
1.3
],
[
5.1,
2.5,
3,
1.1
],
[
5.7,
2.8,
4.1,
1.3
],
[
6.3,
3.3,
6,
2.5
],
[
5.8,
2.7,
5.1,
1.9
],
[
7.1,
3,
5.9,
2.1
],
[
6.3,
2.9,
5.6,
1.8
],
[
6.5,
3,
5.8,
2.2
],
[
7.6,
3,
6.6,
2.1
],
[
4.9,
2.5,
4.5,
1.7
],
[
7.3,
2.9,
6.3,
1.8
],
[
6.7,
2.5,
5.8,
1.8
],
[
7.2,
3.6,
6.1,
2.5
],
[
6.5,
3.2,
5.1,
2
],
[
6.4,
2.7,
5.3,
1.9
],
[
6.8,
3,
5.5,
2.1
],
[
5.7,
2.5,
5,
2
],
[
5.8,
2.8,
5.1,
2.4
],
[
6.4,
3.2,
5.3,
2.3
],
[
6.5,
3,
5.5,
1.8
],
[
7.7,
3.8,
6.7,
2.2
],
[
7.7,
2.6,
6.9,
2.3
],
[
6,
2.2,
5,
1.5
],
[
6.9,
3.2,
5.7,
2.3
],
[
5.6,
2.8,
4.9,
2
],
[
7.7,
2.8,
6.7,
2
],
[
6.3,
2.7,
4.9,
1.8
],
[
6.7,
3.3,
5.7,
2.1
],
[
7.2,
3.2,
6,
1.8
],
[
6.2,
2.8,
4.8,
1.8
],
[
6.1,
3,
4.9,
1.8
],
[
6.4,
2.8,
5.6,
2.1
],
[
7.2,
3,
5.8,
1.6
],
[
7.4,
2.8,
6.1,
1.9
],
[
7.9,
3.8,
6.4,
2
],
[
6.4,
2.8,
5.6,
2.2
],
[
6.3,
2.8,
5.1,
1.5
],
[
6.1,
2.6,
5.6,
1.4
],
[
7.7,
3,
6.1,
2.3
],
[
6.3,
3.4,
5.6,
2.4
],
[
6.4,
3.1,
5.5,
1.8
],
[
6,
3,
4.8,
1.8
],
[
6.9,
3.1,
5.4,
2.1
],
[
6.7,
3.1,
5.6,
2.4
],
[
6.9,
3.1,
5.1,
2.3
],
[
5.8,
2.7,
5.1,
1.9
],
[
6.8,
3.2,
5.9,
2.3
],
[
6.7,
3.3,
5.7,
2.5
],
[
6.7,
3,
5.2,
2.3
],
[
6.3,
2.5,
5,
1.9
],
[
6.5,
3,
5.2,
2
],
[
6.2,
3.4,
5.4,
2.3
],
[
5.9,
3,
5.1,
1.8
]
]
},
"m": {
"Name": "Anderson's Iris data set",
"Description": "The data set consists of 50 Ss from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each S: the length and the width of the sepals and petals, in centimetres.",
"Reference": "R. A. Fisher (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics 7 (2): 179-188."
}
},
config={
"broadcast": True,
"colorBy": "Species",
"graphType": "Scatter2D",
"layoutAdjust": True,
"scatterPlotMatrix": True,
"scatterPlotMatrixType": "lower",
"theme": "CanvasXpress"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="splom_3.html")
```
| github_jupyter |
(nm_heun_method)=
# Heun's method
```{index} Heun's method
```
{ref}`Euler's method <nm_euler_method>` is first-order accurate because it calculates the derivative using only the information available at the beginning of the time step. Higher-order convergence can be obtained if we also employ information from other points in the interval - the more points that we employ, the more accurate method for solving ODEs can be. [Heun's method](https://en.wikipedia.org/wiki/Heun%27s_method) uses 2 points compared to Euler's one single point, increasing accuracy.
Heun's method may be derived by attempting to use derivative information at both the start and the end of the interval:
\\[u(t+\Delta t)\approx u(t)+\frac{\Delta t}{2}\left(u'(t)+u'(t+\Delta t)\right),\\\\\\
u(t+\Delta t)\approx u(t)+\frac{\Delta t}{2}\big(f(u(t),t)+f(u(t+\Delta t),t+\Delta t)\big).\\]
The difficulty with this approach is that we now require \\(u(t+\Delta t)\\) in order to calculate the final term in the equation, and that's what we set out to calculate so we don't know it yet! So at this point we have an example of an implicit algorithm and at this stage the above ODE solver would be referred to as the trapezoidal method if we could solve it exactly for \\(u(t+\Delta t)\\).
Heun's method, unlike Euler method, is an implicit method, meaning that we do not have all of the information needed. While we have the information about \\(u(t)\\) and \\(f(u(t),t)\\), we lack information about \\(u(t+\Delta t)\\) and \\(f(u(t+\Delta t),t)\\), and we have to deal with not knowing these things somehow.
The simplest solution to this dilemma, the one adopted in Heun's method, is to use a first guess at \\(x(t+\Delta t)\\) calculated using Euler's method:
\\[ \tilde{u}(t+\Delta t)=u(t)+\Delta tf(u(t),t). \\]
This first guess is then used to solve for \\(u(t+\Delta t)\\) using:
\\[ u(t+\Delta t)\approx u(t)+\frac{\Delta t}{2}\big(f(u(t),t)+f(\tilde{u}(t+\Delta t),t+\Delta t)\big).\\]
The generic term for schemes of this type is **predictor-corrector**. The initial calculation of \\(\tilde{u}(t+\Delta t)\\) is used to predict the new value of \\(u\\) and then this is used in a more accurate calculation to produce a more correct value.
Note that Heun's method is \\(O(\Delta t^2)\\), i.e. **2nd order accurate**.
## Implementation
We will write a function `heun(f,u0,t0,t_max,dt)` that takes as arguments the function \\(f(u,t)\\) on the RHS of our ODE,
an initial value for \\(u\\), the start and end time of the integration, and the time step.
We will use it to integrate the following ODEs up to time \\(t=10\\)
\\[u'(t)=u(t),\quad u(0)=1,\\]
plot the results and compare them to Euler method.
First let's define the functions:
```
import matplotlib.pyplot as plt
import numpy as np
def euler(f,u0,t0,t_max,dt):
u=u0; t=t0; u_all=[u0]; t_all=[t0];
while t<t_max:
u = u + dt*f(u,t)
u_all.append(u)
t = t + dt
t_all.append(t)
return(u_all,t_all)
def heun(f,u0,t0,t_max,dt):
u=u0; t=t0; u_all=[u0]; t_all=[t0];
while t<t_max:
ue = u + dt*f(u,t) # euler guess
u = u + 0.5*dt*(f(u,t) + f(ue,t+dt))
u_all.append(u)
t = t + dt
t_all.append(t)
return(u_all,t_all)
def f1(u,t):
val = u
return val
def f2(u,t):
val = np.cos(u)
return val
```
Plot the solution for the first function:
```
dt = 0.4
u0 = 1.0
t0 = 0.0
tf = 10.0
# set up figure
fig = plt.figure(figsize=(7, 5))
ax1 = plt.subplot(111)
(u_all,t_all) = euler(f1,u0,t0,tf,dt)
ax1.plot(t_all, u_all,'b',label='euler')
(u_all,t_all) = heun(f1,u0,t0,tf,dt)
ax1.plot(t_all, u_all,'r',label='heun')
# exact solution
ax1.plot(t_all, np.exp(t_all),'k',label='exact')
ax1.set_xlabel('t', fontsize=14)
ax1.set_ylabel('u(t)', fontsize=14)
ax1.grid(True)
ax1.legend(loc='best')
plt.show()
```
| github_jupyter |
# Temporal-Difference Methods
In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
---
### Part 0: Explore CliffWalkingEnv
We begin by importing the necessary packages.
```
import sys
import gym
import numpy as np
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
```
Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.
```
env = gym.make('CliffWalking-v0')
```
The agent moves through a $4\times 12$ gridworld, with states numbered as follows:
```
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
```
At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.
The agent has 4 potential actions:
```
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
```
Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below.
```
print(env.action_space)
print(env.observation_space)
```
In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function.
_**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._
```
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
```
### Part 1: TD Control: Sarsa
In this section, you will write your own implementation of the Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def epsilon_greedy_from_Q(env, Q, epsilon, state):
return np.argmax(Q[state]) if np.random.uniform() >= epsilon else env.action_space.sample()
def sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
epsilon = 1.0/i_episode
state = env.reset()
action = epsilon_greedy_from_Q(env, Q, epsilon, state)
while True:
next_state, reward, done, info = env.step(action)
if not done:
next_action = epsilon_greedy_from_Q(env, Q, epsilon, next_state)
Q[state][action] += alpha*(reward + gamma*Q[next_state][next_action] - Q[state][action])
state = next_state
action = next_action
else:
Q[state][action] += alpha*(reward - Q[state][action])
break
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
```
### Part 2: TD Control: Q-learning
In this section, you will write your own implementation of the Q-learning control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def epsilon_greedy_from_Q(env, Q, epsilon, state):
return np.argmax(Q[state]) if np.random.uniform() >= epsilon else env.action_space.sample()
def q_learning(env, num_episodes, alpha, gamma=1.0):
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(env.nA))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
epsilon = 1.0/i_episode
state = env.reset()
while True:
action = epsilon_greedy_from_Q(env, Q, epsilon, state)
next_state, reward, done, info = env.step(action)
if not done:
Q[state][action] += alpha*(reward + gamma*max(Q[next_state]) - Q[state][action])
state = next_state
else:
Q[state][action] += alpha*(reward - Q[state][action])
break
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
```
### Part 3: TD Control: Expected Sarsa
In this section, you will write your own implementation of the Expected Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Qs(env, Q, epsilon, gamma, alpha, nA, state, action, reward, next_state=None):
policy = np.ones(nA)*epsilon/nA
policy[np.argmax(Q[next_state])] = 1 - epsilon + epsilon/nA
return Q[state][action] + alpha*(reward + gamma*np.dot(Q[next_state], policy) - Q[state][action])
def expected_sarsa(env, num_episodes, alpha, gamma=1.0):
nA = env.action_space.n
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(env.nA))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
epsilon = 1.0/i_episode
state = env.reset()
while True:
action = epsilon_greedy_from_Q(env, Q, epsilon, state)
next_state, reward, done, info = env.step(action)
Q[state][action] = update_Qs(env, Q, epsilon, gamma, alpha, nA, state, action, reward, next_state)
state = next_state
if done:
break
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 50000, 1)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
```
| github_jupyter |
# Using `pyoscode` in cosmology
`pyoscode` is a fast numerical routine suitable for equations of the form
$$ \ddot{x} + 2\gamma(t)\dot{x} + \omega^2(t) = 0, $$
with
- $x(t)$: a scalar variable (e.g. curvature perturbation),
- $\omega(t)$: frequency,
- $\gamma(t)$: friction or first-derivative term.
In general $\gamma$, $\omega$ may not be explicit functions of time, and `pyoscode` can deal with them given as
- _in Python_: `numpy.array`s
- _in C++_: `array`s, `list`s, `std::vector`s, `Eigen::Vector`s, or functions.
Below we'll look at examples using the _Python_ interface, but first, let's look at the short summary of the relevant cosmology.
## Cosmology
We wish to calculate the primordial power spectrum of scalar perturbations in a universe with some spatial curvature. This involves
1. computing the isotropic, expanding "background" evolution,
2. then solving the equation of motion of the perturbations of varying lengthscales.
### Background evolution
The relevant equations are the Friedmann equations and the continuity equation. They can be cast into the following form:
$$ \frac{d\ln{\Omega_k}}{dN} = 4 + \Omega_k\big(4K - 2a^2V(\phi)\big), $$
$$ \Big(\frac{d\phi}{dN}\Big)^2 = 6 + \Omega_k\big(6K - 2a^2V(\phi)\big). $$
with
- $a$: scale factor of the universe
- $H$: Hubble parameter
- $N = \ln{a}$: number of e-folds, **the independent variable**
- $ \Omega_k = \frac{1}{(aH)^2}$, curvature density
- $K$: spatial curvature, $0, \pm1$ for flat, closed, and open universes
- $\phi$: inflaton field
- $ V$: inflationary potential
### Evolution of the perturbations
The equation of motion of the perturbations is given by the Mukhanov--Sasaki equation. It takes the form of a generalised oscillator, with frequency and damping terms given by (when written in terms of $N$):
$$ \omega^2 = \Omega_k\Bigg( (k_2 - K) - \frac{2Kk_2}{EK +k_2}\frac{\dot{E}}{E}\Bigg), $$
$$ 2\gamma = K\Omega_k + 3 - E + \frac{k_2}{EK + k_2}\frac{\dot{E}}{E}, $$
with
- $E = \frac{1}{2}\dot{\phi}^2$ (overdot is differentiation wrt $N$)
- $k_2 = k(k+2) - 3K$ if $K > 0$, and $k_2 = k^2 - 3K$ otherwise.
# Code
## A flat universe
```
import pyoscode
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import root_scalar
from scipy.integrate import solve_ivp
```
cosmological parameters:
- $m$: inflaton mass
- $mp$: Planck mass
- $nv$: exponent in inflationary potential
- $K$: curvature, $\pm1$, 0
```
m = 1
mp = 1
nv = 2
K = 0
```
Define the inflationary potential, its derivative, and the background equations. Also define initial conditions for the perturbations such that they start from the _Bunch-Davies_ vacuum.
```
def V(phi):
""" inflationary potential"""
return 0.5*m**2*phi**nv
def dV(phi):
""" derivative of the inflationary potential """
return 0.5*nv*m**2*phi**(nv-1)
def bgeqs(t, y):
""" System of equations describing the evolution of the cosmological
background """
dy = np.zeros(y.shape)
dy[0] = 4.0 + np.exp(y[0])*(4.0*K - 2.0*np.exp(2.0*t)*V(y[1]))
dy[1] = - np.sqrt(6.0 + np.exp(y[0])*(6.0*K -
2.0*np.exp(2.0*t)*V(y[1])))
return dy
def endinfl(t, y):
""" Crosses zero when inflation ends """
dphi = bgeqs(t,y)[1]
epsilon = 0.5*dphi**2
return epsilon - 1.
def bdic(k, phi, dphi, ddphi, N):
""" Defines the Bunch-Davies vacuum solution
for a given perturbation mode """
a0 = np.exp(N)
dz_z = ddphi/dphi + 1.
z = a0*dphi
R = 1./(np.sqrt(2.*k)*z) + 1j*0
dR = - R*dz_z - np.sqrt(k/2.*ok_i)/z*1j
return R,dR
def pps(k, rk1, rk2, x01, dx01, x02, dx02, x0, dx0):
""" Enforces x,dx as initial conditions by linear
combination of two solutions rk1 and rk2, which had
initial conditions x01, dx01 and x02, dx02 """
a = (x0*dx02 - dx0*x02)/(x01*dx02 - dx01*x02)
b = (x0*dx01 - dx0*x01)/(x02*dx01 - dx02*x01)
power = np.abs(a*rk1 + b*rk2)**2*k**3/(2*np.pi**2)
return power
```
Now solve the background with the help of `scipy.integrate`
```
# \Omega_k and N at the start of inflation fully
# parametrise the background.
ok_i = 2.1e-3
N_i = 1.
# Nominal end point of integration (we'll stop at the end of inflation)
N_f = 80.
# Points at which we'll obtain the background solution
Nbg = 10000 # This determines grid fineness, see note below.
N = np.linspace(N_i,N_f,Nbg)
# Initial conditions
phi_i = np.sqrt(4.*(1./ok_i + K)*np.exp(-2.0*N_i)/m**2)
logok_i = np.log(ok_i)
y_i = np.array([logok_i, phi_i])
# Solve for the background until the end of inflation
endinfl.terminal = True
endinfl.direction = 1
bgsol = solve_ivp(bgeqs, (N_i,N_f), y_i, events=endinfl, t_eval=N, rtol=1e-8, atol=1e-10)
```
**Note:** the most important parameter from a numerical perspective is $N_{\mathrm{bg}}$. This determines the fineness of the grid on which $\omega$ and $\gamma$ are defined. The speed of the method depends on how precisely numerical derivatives and integrals of $\omega$, $\gamma$ can be computed. If you experience slow-down, it is very likely that this grid was not fine enough.
The number of e-folds of inflation we got from this setup is
```
bgsol.t_events[0][0]-N_i
```
We're now ready to define the equation of motion of the perturbations. `pyoscode` takes the frequency and the damping term of the oscillator as `numpy.array`s.
```
logok = bgsol.y[0]
phi = bgsol.y[1]
N = bgsol.t
dphi = np.array([-np.sqrt(6.0 + np.exp(Logok)*(6.0*K -
2.0*np.exp(2.0*t)*V(Phi))) for Logok,Phi,t in zip(logok,phi,N) ])
dlogok = np.array([4.0 + np.exp(Logok)*(4.0*K - 2.0*np.exp(2.0*t)*V(Phi)) for Logok,Phi,t in zip(logok,phi,N) ])
dE_E = dlogok - 4. -2.*dV(phi)*np.exp(logok)*np.exp(2.*N)/dphi
E = 0.5*dphi**2
# Damping term
g = 0.5*(3 - E + dE_E)
# frequency
logw = 0.5*logok
```
Now we wish solve the Mukhanov--Sasaki equation in a loop, iterating over increasing values of $k$. We need to determine the range of integration for each: we'll start at a fixed $N$, and integrate until the mode is "well outside the Hubble horizon", $k < (aH)/100$.
```
# range of wavevectors
ks = np.logspace(0,4,1000)
end = np.zeros_like(ks,dtype=int)
endindex = 0
for i in range(len(ks)):
for j in range(endindex,Nbg):
if np.exp(-0.5*logok[j])/ks[i] > 100:
end[i] = j
endindex = j
break
```
We're now ready to solve the Mukhanov-Sasaki equation in a loop and generate a primordial power spectrum.
```
spectrum = np.zeros_like(ks,dtype=complex)
for i,k in enumerate(ks):
# Bunch-Davies i.c.
phi_0 = phi[0]
dphi_0 = dphi[0]
ddphi_0 = 0.5*dE_E[0]*dphi_0
N_0 = N_i
x0, dx0 = bdic(k, phi_0, dphi_0, ddphi_0, N_0)
x01 = 1.0
dx01 = 0.0
x02 = 0.0
dx02 = 1.0
# Linearly indep. solutions
sol1 = pyoscode.solve(N,logw+np.log(k),g,N_i,N[end[i]],x01,dx01,logw=True)
sol2 = pyoscode.solve(N,logw+np.log(k),g,N_i,N[end[i]],x02,dx02,logw=True)
rk1 = sol1["sol"][-1]
rk2 = sol2["sol"][-1]
spectrum[i] = pps(k, rk1, rk2, x01, dx01, x02, dx02, x0, dx0)
```
Plot the resulting spectrum:
```
plt.loglog(ks, spectrum)
plt.xlabel('comoving $k$')
plt.ylabel('$m^2 \\times P_{\mathcal{R}}(k)$')
plt.show()
plt.loglog(ks, spectrum)
plt.xlabel('comoving $k$')
plt.ylabel('$m^2 \\times P_{\mathcal{R}}(k)$')
plt.xlim((3e1,1e4))
plt.ylim((40,80))
plt.show()
```
## A closed universe
All we have to do differently is:
1. solve the background equations again with $K=1$,
```
K = 1
N_i = -1.74
ok_i = 1.0
N = np.linspace(N_i,N_f,Nbg)
# Initial conditions
phi_i = np.sqrt(4.*(1./ok_i + K)*np.exp(-2.0*N_i)/m**2)
logok_i = np.log(ok_i)
y_i = np.array([logok_i, phi_i])
# Solve for the background until the end of inflation
endinfl.terminal = True
endinfl.direction = 1
bgsol = solve_ivp(bgeqs, (N_i,N_f), y_i, events=endinfl, t_eval=N, rtol=1e-8, atol=1e-10)
```
Number of e-folds of inflation now is
```
bgsol.t_events[0][0]-N_i
```
2. Update the arrays storing the cosmological background:
```
logok = bgsol.y[0]
phi = bgsol.y[1]
N = bgsol.t
dphi = np.array([-np.sqrt(6.0 + np.exp(Logok)*(6.0*K -
2.0*np.exp(2.0*t)*V(Phi))) for Logok,Phi,t in zip(logok,phi,N) ])
dlogok = np.array([4.0 + np.exp(Logok)*(4.0*K - 2.0*np.exp(2.0*t)*V(Phi)) for Logok,Phi,t in zip(logok,phi,N) ])
dE_E = dlogok - 4. -2.*dV(phi)*np.exp(logok)*np.exp(2.*N)/dphi
E = 0.5*dphi**2
```
3. Update also the endpoint of integration for each mode:
```
# range of wavevectors
ks = np.concatenate((np.linspace(3,100,98), np.logspace(2,4,500)))
end = np.zeros_like(ks,dtype=int)
endindex = 0
for i in range(len(ks)):
for j in range(endindex,Nbg):
if np.exp(-0.5*logok[j])/ks[i] > 100:
end[i] = j
endindex = j
break
```
4. Solve the MS equation for each $k$. The frequency and the damping term now have non-trivial wavevector-dependence, so we'll compute them on the fly for each mode.
```
closed_spectrum = np.zeros_like(ks,dtype=complex)
for i,k in enumerate(ks):
# Bunch-Davies i.c.
phi_0 = phi[0]
dphi_0 = dphi[0]
ddphi_0 = 0.5*dE_E[0]*dphi_0
N_0 = N_i
x0, dx0 = bdic(k, phi_0, dphi_0, ddphi_0, N_0)
x01 = 1.0
dx01 = 0.0
x02 = 0.0
dx02 = 1.0
# wavenumber "squared"
k2 = complex(k*(k+2.)-3*K)
# Damping term
g = 0.5*(K*np.exp(logok) + 3 - E + dE_E*k2/(E*K+k2))
# frequency
logw = 0.5*(logok + np.log(k2 - K - 2.*K*k2*dE_E/(E*K + k2)))
# Linearly indep. solutions
sol1 = pyoscode.solve(N,logw,g,N_i,N[end[i]],x01,dx01,logw=True)
sol2 = pyoscode.solve(N,logw,g,N_i,N[end[i]],x02,dx02,logw=True)
rk1 = sol1["sol"][-1]
rk2 = sol2["sol"][-1]
closed_spectrum[i] = pps(k, rk1, rk2, x01, dx01, x02, dx02, x0, dx0)
```
Plot the resulting spectrum:
```
plt.loglog(ks, closed_spectrum)
plt.xlabel('comoving $k$')
plt.ylabel('$m^2 \\times P_{\mathcal{R}}(k)$')
plt.show()
```
Note that in the above spectrum, the jaggedness is due to the fact that the values $k$ takes are quantised (integers only).
| github_jupyter |
# Project 5: NLP on Financial Statements
## Instructions
Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.
## Packages
When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.
The other packages that we're importing are `project_helper` and `project_tests`. These are custom packages built to help you solve the problems. The `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.
### Install Packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
```
### Load Packages
```
import nltk
import numpy as np
import pandas as pd
import pickle
import pprint
import project_helper
import project_tests
from tqdm import tqdm
```
### Download NLP Corpora
You'll need two corpora to run this project: the stopwords corpus for removing stopwords and wordnet for lemmatizing.
```
nltk.download('stopwords')
nltk.download('wordnet')
```
## Get 10ks
We'll be running NLP analysis on 10-k documents. To do that, we first need to download the documents. For this project, we'll download 10-ks for a few companies. To lookup documents for these companies, we'll use their CIK. If you would like to run this against other stocks, we've provided the dict `additional_cik` for more stocks. However, the more stocks you try, the long it will take to run.
```
cik_lookup = {
'AMZN': '0001018724',
'BMY': '0000014272',
'CNP': '0001130310',
'CVX': '0000093410',
'FL': '0000850209',
'FRT': '0000034903',
'HON': '0000773840'}
additional_cik = {
'AEP': '0000004904',
'AXP': '0000004962',
'BA': '0000012927',
'BK': '0001390777',
'CAT': '0000018230',
'DE': '0000315189',
'DIS': '0001001039',
'DTE': '0000936340',
'ED': '0001047862',
'EMR': '0000032604',
'ETN': '0001551182',
'GE': '0000040545',
'IBM': '0000051143',
'IP': '0000051434',
'JNJ': '0000200406',
'KO': '0000021344',
'LLY': '0000059478',
'MCD': '0000063908',
'MO': '0000764180',
'MRK': '0000310158',
'MRO': '0000101778',
'PCG': '0001004980',
'PEP': '0000077476',
'PFE': '0000078003',
'PG': '0000080424',
'PNR': '0000077360',
'SYY': '0000096021',
'TXN': '0000097476',
'UTX': '0000101829',
'WFC': '0000072971',
'WMT': '0000104169',
'WY': '0000106535',
'XOM': '0000034088'}
```
### Get list of 10-ks
The SEC has a limit on the number of calls you can make to the website per second. In order to avoid hiding that limit, we've created the `SecAPI` class. This will cache data from the SEC and prevent you from going over the limit.
```
sec_api = project_helper.SecAPI()
```
With the class constructed, let's pull a list of filled 10-ks from the SEC for each company.
```
from bs4 import BeautifulSoup
def get_sec_data(cik, doc_type, start=0, count=60):
newest_pricing_data = pd.to_datetime('2018-01-01')
rss_url = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany' \
'&CIK={}&type={}&start={}&count={}&owner=exclude&output=atom' \
.format(cik, doc_type, start, count)
sec_data = sec_api.get(rss_url)
feed = BeautifulSoup(sec_data.encode('ascii'), 'xml').feed
entries = [
(
entry.content.find('filing-href').getText(),
entry.content.find('filing-type').getText(),
entry.content.find('filing-date').getText())
for entry in feed.find_all('entry', recursive=False)
if pd.to_datetime(entry.content.find('filing-date').getText()) <= newest_pricing_data]
return entries
```
Let's pull the list using the `get_sec_data` function, then display some of the results. For displaying some of the data, we'll use Amazon as an example.
```
example_ticker = 'AMZN'
sec_data = {}
for ticker, cik in cik_lookup.items():
sec_data[ticker] = get_sec_data(cik, '10-K')
pprint.pprint(sec_data[example_ticker][:5])
```
### Download 10-ks
As you see, this is a list of urls. These urls point to a file that contains metadata related to each filling. Since we don't care about the metadata, we'll pull the filling by replacing the url with the filling url.
```
raw_fillings_by_ticker = {}
for ticker, data in sec_data.items():
raw_fillings_by_ticker[ticker] = {}
for index_url, file_type, file_date in tqdm(data, desc='Downloading {} Fillings'.format(ticker), unit='filling'):
if (file_type == '10-K'):
file_url = index_url.replace('-index.htm', '.txt').replace('.txtl', '.txt')
raw_fillings_by_ticker[ticker][file_date] = sec_api.get(file_url)
print('Example Document:\n\n{}...'.format(next(iter(raw_fillings_by_ticker[example_ticker].values()))[:1000]))
```
### Get Documents
With theses fillings downloaded, we want to break them into their associated documents. These documents are sectioned off in the fillings with the tags `<DOCUMENT>` for the start of each document and `</DOCUMENT>` for the end of each document. There's no overlap with these documents, so each `</DOCUMENT>` tag should come after the `<DOCUMENT>` with no `<DOCUMENT>` tag in between.
Implement `get_documents` to return a list of these documents from a filling. Make sure not to include the tag in the returned document text.
```
import re
def get_documents(text):
"""
Extract the documents from the text
Parameters
----------
text : str
The text with the document strings inside
Returns
-------
extracted_docs : list of str
The document strings found in `text`
"""
# TODO: Implement
extracted_docs = list()
document_start = re.compile(r'<DOCUMENT>')
document_finish = re.compile(r'</DOCUMENT>')
page_content_st = re.finditer(document_start , text)
page_content_fs = re.finditer(document_finish, text)
start_idx = [word.end() for word in page_content_st]
finish_idx = [word.start() for word in page_content_fs]
for start, end in zip (start_idx, finish_idx):
extracted_docs.append(text[start:end])
return extracted_docs
project_tests.test_get_documents(get_documents)
```
With the `get_documents` function implemented, let's extract all the documents.
```
filling_documents_by_ticker = {}
for ticker, raw_fillings in raw_fillings_by_ticker.items():
filling_documents_by_ticker[ticker] = {}
for file_date, filling in tqdm(raw_fillings.items(), desc='Getting Documents from {} Fillings'.format(ticker), unit='filling'):
filling_documents_by_ticker[ticker][file_date] = get_documents(filling)
print('\n\n'.join([
'Document {} Filed on {}:\n{}...'.format(doc_i, file_date, doc[:200])
for file_date, docs in filling_documents_by_ticker[example_ticker].items()
for doc_i, doc in enumerate(docs)][:3]))
```
### Get Document Types
Now that we have all the documents, we want to find the 10-k form in this 10-k filing. Implement the `get_document_type` function to return the type of document given. The document type is located on a line with the `<TYPE>` tag. For example, a form of type "TEST" would have the line `<TYPE>TEST`. Make sure to return the type as lowercase, so this example would be returned as "test".
```
def get_document_type(doc):
"""
Return the document type lowercased
Parameters
----------
doc : str
The document string
Returns
-------
doc_type : str
The document type lowercased
"""
# TODO: Implement
regex = re.compile(r'<TYPE>([^\n]+)\b')
doc_type = regex.findall(doc)[0].lower()
return doc_type
project_tests.test_get_document_type(get_document_type)
```
With the `get_document_type` function, we'll filter out all non 10-k documents.
```
ten_ks_by_ticker = {}
for ticker, filling_documents in filling_documents_by_ticker.items():
ten_ks_by_ticker[ticker] = []
for file_date, documents in filling_documents.items():
for document in documents:
if get_document_type(document) == '10-k':
ten_ks_by_ticker[ticker].append({
'cik': cik_lookup[ticker],
'file': document,
'file_date': file_date})
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['cik', 'file', 'file_date'])
```
## Preprocess the Data
### Clean Up
As you can see, the text for the documents are very messy. To clean this up, we'll remove the html and lowercase all the text.
```
def remove_html_tags(text):
text = BeautifulSoup(text, 'html.parser').get_text()
return text
def clean_text(text):
text = text.lower()
text = remove_html_tags(text)
return text
```
Using the `clean_text` function, we'll clean up all the documents.
```
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Cleaning {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_clean'] = clean_text(ten_k['file'])
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_clean'])
```
### Lemmatize
With the text cleaned up, it's time to distill the verbs down. Implement the `lemmatize_words` function to lemmatize verbs in the list of words provided.
```
from nltk.stem import WordNetLemmatizer as WNL
from nltk.corpus import wordnet
def lemmatize_words(words):
"""
Lemmatize words
Parameters
----------
words : list of str
List of words
Returns
-------
lemmatized_words : list of str
List of lemmatized words
"""
# TODO: Implement
lemmatized_words = [WNL().lemmatize(w, pos='v') for w in words]
return lemmatized_words
project_tests.test_lemmatize_words(lemmatize_words)
```
With the `lemmatize_words` function implemented, let's lemmatize all the data.
```
word_pattern = re.compile('\w+')
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Lemmatize {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_lemma'] = lemmatize_words(word_pattern.findall(ten_k['file_clean']))
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_lemma'])
```
### Remove Stopwords
```
from nltk.corpus import stopwords
lemma_english_stopwords = lemmatize_words(stopwords.words('english'))
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Remove Stop Words for {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_lemma'] = [word for word in ten_k['file_lemma'] if word not in lemma_english_stopwords]
print('Stop Words Removed')
```
## Analysis on 10ks
### Loughran McDonald Sentiment Word Lists
We'll be using the Loughran and McDonald sentiment word lists. These word lists cover the following sentiment:
- Negative
- Positive
- Uncertainty
- Litigious
- Constraining
- Superfluous
- Modal
This will allow us to do the sentiment analysis on the 10-ks. Let's first load these word lists. We'll be looking into a few of these sentiments.
```
import os
sentiments = ['negative', 'positive', 'uncertainty', 'litigious', 'constraining', 'interesting']
sentiment_df = pd.read_csv(os.path.join('..', '..', 'data', 'project_5_loughran_mcdonald', 'loughran_mcdonald_master_dic_2016.csv'))
sentiment_df.columns = [column.lower() for column in sentiment_df.columns] # Lowercase the columns for ease of use
# Remove unused information
sentiment_df = sentiment_df[sentiments + ['word']]
sentiment_df[sentiments] = sentiment_df[sentiments].astype(bool)
sentiment_df = sentiment_df[(sentiment_df[sentiments]).any(1)]
# Apply the same preprocessing to these words as the 10-k words
sentiment_df['word'] = lemmatize_words(sentiment_df['word'].str.lower())
sentiment_df = sentiment_df.drop_duplicates('word')
sentiment_df.head()
```
### Bag of Words
using the sentiment word lists, let's generate sentiment bag of words from the 10-k documents. Implement `get_bag_of_words` to generate a bag of words that counts the number of sentiment words in each doc. You can ignore words that are not in `sentiment_words`.
```
from collections import defaultdict, Counter
from sklearn.feature_extraction.text import CountVectorizer as CV
import numpy as np
def get_bag_of_words(sentiment_words, docs):
"""
Generate a bag of words from documents for a certain sentiment
Parameters
----------
sentiment_words: Pandas Series
Words that signify a certain sentiment
docs : list of str
List of documents used to generate bag of words
Returns
-------
bag_of_words : 2-d Numpy Ndarray of int
Bag of words sentiment for each document
The first dimension is the document.
The second dimension is the word.
"""
# TODO: Implement
cv = CV(vocabulary=sentiment_words.values)
bag_of_words = cv.transform(docs).toarray()
return bag_of_words
project_tests.test_get_bag_of_words(get_bag_of_words)
```
Using the `get_bag_of_words` function, we'll generate a bag of words for all the documents.
```
sentiment_bow_ten_ks = {}
for ticker, ten_ks in ten_ks_by_ticker.items():
lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]
sentiment_bow_ten_ks[ticker] = {
sentiment: get_bag_of_words(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)
for sentiment in sentiments}
project_helper.print_ten_k_data([sentiment_bow_ten_ks[example_ticker]], sentiments)
```
### Jaccard Similarity
Using the bag of words, let's calculate the jaccard similarity on the bag of words and plot it over time. Implement `get_jaccard_similarity` to return the jaccard similarities between each tick in time. Since the input, `bag_of_words_matrix`, is a bag of words for each time period in order, you just need to compute the jaccard similarities for each neighboring bag of words. Make sure to turn the bag of words into a boolean array when calculating the jaccard similarity.
```
from sklearn.metrics import jaccard_similarity_score
def get_jaccard_similarity(bag_of_words_matrix):
"""
Get jaccard similarities for neighboring documents
Parameters
----------
bag_of_words : 2-d Numpy Ndarray of int
Bag of words sentiment for each document
The first dimension is the document.
The second dimension is the word.
Returns
-------
jaccard_similarities : list of float
Jaccard similarities for neighboring documents
"""
# TODO: Implement
# turn the bag of words into a boolean array
bag = bag_of_words_matrix.astype(bool)
jaccard_similarities = [jaccard_similarity_score(u,v) for u, v in zip(bag, bag[1:])]
return jaccard_similarities
project_tests.test_get_jaccard_similarity(get_jaccard_similarity)
```
Using the `get_jaccard_similarity` function, let's plot the similarities over time.
```
# Get dates for the universe
file_dates = {
ticker: [ten_k['file_date'] for ten_k in ten_ks]
for ticker, ten_ks in ten_ks_by_ticker.items()}
jaccard_similarities = {
ticker: {
sentiment_name: get_jaccard_similarity(sentiment_values)
for sentiment_name, sentiment_values in ten_k_sentiments.items()}
for ticker, ten_k_sentiments in sentiment_bow_ten_ks.items()}
project_helper.plot_similarities(
[jaccard_similarities[example_ticker][sentiment] for sentiment in sentiments],
file_dates[example_ticker][1:],
'Jaccard Similarities for {} Sentiment'.format(example_ticker),
sentiments)
```
### TFIDF
using the sentiment word lists, let's generate sentiment TFIDF from the 10-k documents. Implement `get_tfidf` to generate TFIDF from each document, using sentiment words as the terms. You can ignore words that are not in `sentiment_words`.
```
from sklearn.feature_extraction.text import TfidfVectorizer as TV
def get_tfidf(sentiment_words, docs):
"""
Generate TFIDF values from documents for a certain sentiment
Parameters
----------
sentiment_words: Pandas Series
Words that signify a certain sentiment
docs : list of str
List of documents used to generate bag of words
Returns
-------
tfidf : 2-d Numpy Ndarray of float
TFIDF sentiment for each document
The first dimension is the document.
The second dimension is the word.
"""
# TODO: Implement
tf_vector = TV(vocabulary=sentiment_words)
tfidf = tf_vector.fit_transform(docs).toarray()
return tfidf
project_tests.test_get_tfidf(get_tfidf)
```
Using the `get_tfidf` function, let's generate the TFIDF values for all the documents.
```
sentiment_tfidf_ten_ks = {}
for ticker, ten_ks in ten_ks_by_ticker.items():
lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]
sentiment_tfidf_ten_ks[ticker] = {
sentiment: get_tfidf(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)
for sentiment in sentiments}
project_helper.print_ten_k_data([sentiment_tfidf_ten_ks[example_ticker]], sentiments)
```
### Cosine Similarity
Using the TFIDF values, we'll calculate the cosine similarity and plot it over time. Implement `get_cosine_similarity` to return the cosine similarities between each tick in time. Since the input, `tfidf_matrix`, is a TFIDF vector for each time period in order, you just need to computer the cosine similarities for each neighboring vector.
```
from sklearn.metrics.pairwise import cosine_similarity
def get_cosine_similarity(tfidf_matrix):
"""
Get cosine similarities for each neighboring TFIDF vector/document
Parameters
----------
tfidf : 2-d Numpy Ndarray of float
TFIDF sentiment for each document
The first dimension is the document.
The second dimension is the word.
Returns
-------
cosine_similarities : list of float
Cosine similarities for neighboring documents
"""
# TODO: Implement
cosine_similarities = list(cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:])[0])
return cosine_similarities
project_tests.test_get_cosine_similarity(get_cosine_similarity)
```
Let's plot the cosine similarities over time.
```
cosine_similarities = {
ticker: {
sentiment_name: get_cosine_similarity(sentiment_values)
for sentiment_name, sentiment_values in ten_k_sentiments.items()}
for ticker, ten_k_sentiments in sentiment_tfidf_ten_ks.items()}
project_helper.plot_similarities(
[cosine_similarities[example_ticker][sentiment] for sentiment in sentiments],
file_dates[example_ticker][1:],
'Cosine Similarities for {} Sentiment'.format(example_ticker),
sentiments)
```
## Evaluate Alpha Factors
Just like we did in project 4, let's evaluate the alpha factors. For this section, we'll just be looking at the cosine similarities, but it can be applied to the jaccard similarities as well.
### Price Data
Let's get yearly pricing to run the factor against, since 10-Ks are produced annually.
```
pricing = pd.read_csv('../../data/project_5_yr/yr-quotemedia.csv', parse_dates=['date'])
pricing = pricing.pivot(index='date', columns='ticker', values='adj_close')
pricing
```
### Dict to DataFrame
The alphalens library uses dataframes, so we we'll need to turn our dictionary into a dataframe.
```
cosine_similarities_df_dict = {'date': [], 'ticker': [], 'sentiment': [], 'value': []}
for ticker, ten_k_sentiments in cosine_similarities.items():
for sentiment_name, sentiment_values in ten_k_sentiments.items():
for sentiment_values, sentiment_value in enumerate(sentiment_values):
cosine_similarities_df_dict['ticker'].append(ticker)
cosine_similarities_df_dict['sentiment'].append(sentiment_name)
cosine_similarities_df_dict['value'].append(sentiment_value)
cosine_similarities_df_dict['date'].append(file_dates[ticker][1:][sentiment_values])
cosine_similarities_df = pd.DataFrame(cosine_similarities_df_dict)
cosine_similarities_df['date'] = pd.DatetimeIndex(cosine_similarities_df['date']).year
cosine_similarities_df['date'] = pd.to_datetime(cosine_similarities_df['date'], format='%Y')
cosine_similarities_df.head()
```
### Alphalens Format
In order to use a lot of the alphalens functions, we need to aligned the indices and convert the time to unix timestamp. In this next cell, we'll do just that.
```
import alphalens as al
factor_data = {}
skipped_sentiments = []
for sentiment in sentiments:
cs_df = cosine_similarities_df[(cosine_similarities_df['sentiment'] == sentiment)]
cs_df = cs_df.pivot(index='date', columns='ticker', values='value')
try:
data = al.utils.get_clean_factor_and_forward_returns(cs_df.stack(), pricing, quantiles=5, bins=None, periods=[1])
factor_data[sentiment] = data
except:
skipped_sentiments.append(sentiment)
if skipped_sentiments:
print('\nSkipped the following sentiments:\n{}'.format('\n'.join(skipped_sentiments)))
factor_data[sentiments[0]].head()
```
### Alphalens Format with Unix Time
Alphalen's `factor_rank_autocorrelation` and `mean_return_by_quantile` functions require unix timestamps to work, so we'll also create factor dataframes with unix time.
```
unixt_factor_data = {
factor: data.set_index(pd.MultiIndex.from_tuples(
[(x.timestamp(), y) for x, y in data.index.values],
names=['date', 'asset']))
for factor, data in factor_data.items()}
```
### Factor Returns
Let's view the factor returns over time. We should be seeing it generally move up and to the right.
```
ls_factor_returns = pd.DataFrame()
for factor_name, data in factor_data.items():
ls_factor_returns[factor_name] = al.performance.factor_returns(data).iloc[:, 0]
(1 + ls_factor_returns).cumprod().plot()
```
### Basis Points Per Day per Quantile
It is not enough to look just at the factor weighted return. A good alpha is also monotonic in quantiles. Let's looks the basis points for the factor returns.
```
qr_factor_returns = pd.DataFrame()
for factor_name, data in unixt_factor_data.items():
qr_factor_returns[factor_name] = al.performance.mean_return_by_quantile(data)[0].iloc[:, 0]
(10000*qr_factor_returns).plot.bar(
subplots=True,
sharey=True,
layout=(5,3),
figsize=(14, 14),
legend=False)
```
### Turnover Analysis
Without doing a full and formal backtest, we can analyze how stable the alphas are over time. Stability in this sense means that from period to period, the alpha ranks do not change much. Since trading is costly, we always prefer, all other things being equal, that the ranks do not change significantly per period. We can measure this with the **Factor Rank Autocorrelation (FRA)**.
```
ls_FRA = pd.DataFrame()
for factor, data in unixt_factor_data.items():
ls_FRA[factor] = al.performance.factor_rank_autocorrelation(data)
ls_FRA.plot(title="Factor Rank Autocorrelation")
```
### Sharpe Ratio of the Alphas
The last analysis we'll do on the factors will be sharpe ratio. Let's see what the sharpe ratio for the factors are. Generally, a Sharpe Ratio of near 1.0 or higher is an acceptable single alpha for this universe.
```
daily_annualization_factor = np.sqrt(252)
(daily_annualization_factor * ls_factor_returns.mean() / ls_factor_returns.std()).round(2)
```
That's it! You've successfully done sentiment analysis on 10-ks!
## Submission
Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
| github_jupyter |
```
import datetime
import os
import yaml
import optuna
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Lecture du fichier d'environnement
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f) #, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
# Lecture du fichier de données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update'])
.assign(day=lambda _df: _df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df['day'] <= datetime.date(2020, 3, 24)]
)
def get_country(country):
return (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum','Active':'sum'})
.reset_index()
)
# Monkey Patch pd.DataFrame
pd.DataFrame.get_country = get_country
get_country("South Korea").head()
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(12, 5))
plt.plot(korea_df['day'], korea_df['Confirmed'], label='S.Korea confirmed')
plt.plot(korea_df['day'], korea_df['Active'], label='S.Korea infected')
plt.plot(italy_df['day'], italy_df['Confirmed'], label='Italy confirmed')
plt.plot(italy_df['day'], italy_df['Active'], label='Italy infected')
plt.grid(True)
plt.legend()
plt.show()
import scipy.integrate as spi
import numpy as np
import pylab as plt
%matplotlib inline
beta=2
gamma=0.6
TS=1.0
ND=63.0
S0=51_470_000-0.01
I0=0.0000001
INPUT = (S0, I0,0)
def diff_eqs(INP,t):
Y=np.zeros((3))
V = INP
Y[0] = - beta * V[0] * V[1]
Y[1] = beta * V[0] * V[1] - gamma * V[1]
Y[2] = gamma * V[1]
return Y
t_start = 8; t_end = ND;t_inc = TS
t_range = np.arange(t_start, t_end+t_inc,t_inc)
RES = spi.odeint(diff_eqs,INPUT,t_range)
plt.subplot(111)
plt.plot(RES[:,0], '-g', label='Susceptibles')
plt.plot(RES[:,2], '-k', label='Recovereds')
plt.plot(RES[:,1], '-r', label='Infectious')
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Susceptibles,Recovereds and Infectious')
# Djiby
beta_optimal = 5.67e-3
gamma_optimal = 24.7
# PC de la fac
beta_optimal = 0.06321101
gamma_optimal = 33.06340503
# Approximation Excel
beta_optimal = 1.5485e-9
gamma_optimal = 0.1839
beta = beta_optimal
gamma = gamma_optimal
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
solution_korea_optimal = solve_ivp(SIR, [0, 40], [51_470_000*0.1, 1, 0], t_eval=np.arange(0, 40, 1))
korea_df = epidemie_df.get_country("South Korea")
korea_df.head()
korea_df.iloc[:,1:3].head()
import numpy as np
import pandas as pd
from scipy.integrate import ode, solve_ivp
# Approximation Excel
beta_optimal = 1.5485e-9
gamma_optimal = 0.1839
print(beta_optimal)
parms = [1.5485e-08,0.05]
init = [51_470_000-1,1,0]
times = np.linspace(0,55)
sir_sol = solve_ivp(fun=lambda t, y: sir_ode(t, y, parms), t_span=[min(times),max(times)], y0=init, t_eval=times)
sir_out = pd.DataFrame({"t":sir_sol["t"],"S":sir_sol["y"][0],"I":sir_sol["y"][1],"R":sir_sol["y"][2]})
import matplotlib.pyplot as plt
plt.style.use("ggplot")
sline = plt.plot("t","S","",data=sir_out,color="blue",linewidth=2)
iline = plt.plot("t","I","",data=sir_out,color="red",linewidth=2)
rline = plt.plot("t","R","",data=sir_out,color="green",linewidth=2)
plt.xlabel("Time",fontweight="bold")
plt.ylabel("Number",fontweight="bold")
legend = plt.legend(title="Population",loc=5,bbox_to_anchor=(1.25,0.5))
frame = legend.get_frame()
frame.set_facecolor("white")
frame.set_linewidth(0)
```
# Modeling with the South Korea case
```
import numpy as np
import pandas as pd
from scipy.integrate import ode, solve_ivp
import matplotlib.pyplot as plt
PROCESSED_DIR = '../data/processed/'
pop_df = pd.read_csv(os.path.join(PROCESSED_DIR, 'world_population.csv'))
pop_df.columns = ['Country Name', 'Country Code','Pop']
pop_df=pop_df.drop(columns=['Country Code'])
def get_country(country):
return (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum','Active':'sum'})
.reset_index()
)
# Monkey Patch pd.DataFrame
pd.DataFrame.get_country = get_country
korea_df = get_country('South Korea')
def get_pop(country):
return int(pop_df.loc[pop_df['Country Name']== country,['Pop']]['Pop'])
get_pop("South Korea")
active_cases = korea_df['Active']
total_population = get_pop("South Korea")
nb_steps = len(active_cases)
def SIR(t,y):
S = y[0]
I = y[1]
R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
def sumsq(p):
beta, gamma = p
def SIR(t,y):
S = y[0]
I = y[1]
R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
sol = solve_ivp(SIR,[0,nb_steps-1],[total_population,1,0],t_eval=np.arange(0, nb_steps, 1))
return(sum((sol.y[1]-active_cases)**2))
from scipy.optimize import minimize
msol = minimize(sumsq,[0.001,1],method='Nelder-Mead')
msol.x
msol.x[1]
beta,gamma = [1.1e-08,0.05]
sol = solve_ivp(SIR,[0,nb_steps-1],[total_population,1,0],t_eval=np.arange(0, nb_steps, 1))
fig, ax1 = plt.subplots(figsize=(13,8))
#color = 'tab:red'
ax1.set_xlabel('time (day)')
ax1.set_ylabel('Population')
ax1.plot(sol.t, sol.y[0], "b-")
ax1.plot(sol.t,sol.y[1],"r-")
ax1.plot(sol.t,sol.y[2],"g-")
ax2 = ax1.twinx()
ax2.set_ylabel('True active') # we already handled the x-label with ax1
ax2.plot(np.arange(0,len(korea_df)),korea_df['Active'],"k*:")
plt.title('South Korea modeling')
fig.legend(["Susceptible","Infected","Recovered","True active"], loc='upper center', bbox_to_anchor=(1.1, 0.8),shadow=True,fancybox=True,fontsize="xx-large")
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
```
| github_jupyter |
<h1> Classifying Iris Flower Dataset Using Naive Bayes Classifier </h1>
<h2> Naive Bayes Classifier </h2>
Naive Bayes classifiers are a collection of classification algorithms based on Bayes’ Theorem. It comprises of a collection of algorithms where all of them share a common principle, that is every pair of features being classified is independent of each other.
The fundamental Naive Bayes assumption is that each feature is:
* Independent: We assume that no pair of features are dependent.
* Equal: Each feature is given the same weight (or importance). None of the attributes is irrelevant and assumed to be contributing equally to the outcome.
<h2>Pros and Cons of Naive Bayes</h2>
<b>Pros:</b>
<ul>
<li>This algorithm works very fast and can easily predict the class of a test dataset.</li>
<li>You can use it to solve multi-class prediction problems as it’s quite useful with them.</li>
<li>Naive Bayes classifier performs better than other models with less training data if the assumption of independence of features holds.</li>
<li>If you have categorical input variables, the Naive Bayes algorithm performs exceptionally well in comparison to numerical variables.</li>
</ul>
<b>Cons:</b>
<ul>
<li>If your test data set has a categorical variable of a category that wasn’t present in the training data set, the Naive Bayes model will assign it zero probability and won’t be able to make any predictions in this regard. This phenomenon is called ‘Zero Frequency,’ and you’ll have to use a smoothing technique to solve this problem.</li>
<li>It assumes that all the features are independent. While it might sound great in theory, in real life, you’ll hardly find a set of independent features.</li>
</ul>
<h2>What is Naive Bayes Classifier used for?</h2>
Naive Bayes Classifier has a wide range of application but is mostly used for cases that include multi class classification.
<ol>
<li><em>Real time Prediction:</em> Naive Bayes is an eager learning classifier and it is sure fast. Thus, it could be used for making predictions in real time. </li>
<li><em>Multi class Prediction:</em> This algorithm is also well known for multi class prediction feature. Here we can predict the probability of multiple classes of target variable.</li>
<li><em>Text classification/ Spam Filtering/ Sentiment Analysis:</em> Naive Bayes classifiers mostly used in text classification, have higher success rate as compared to other algorithms. As a result, it is widely used in Spam filtering and Sentiment Analysis.</li>
<li><em>Recommendation System:</em> Naive Bayes Classifier and Collaborative Filtering together builds a Recommendation System that uses machine learning and data mining techniques to filter unseen information and predict whether a user would like a given resource or not.</li>
</ol>
<h2>Steps to Build a Naive Bayes Classifier</h2>
<ol>
<li>Import the Dataset</li>
<li>Split the Dataset into Training and Testing Values</li>
<li>Create Naive Bayes Model</li>
<li>Predict the Output Values</li>
<li>Check the Error Rate</li>
</ol>
<h3>1. Import the Dataset</h3>
```
#Importing the IRIS dataset from scikit-learn
from sklearn.datasets import load_iris
#Load the IRIS dataset into a variable called iris
iris = load_iris()
#X contains all the features/labels (feature matrix: sepal length, sepal width, petal length and petal width) of the IRIS dataset
X = iris.data
#y contains all the target labels (response vector: Setosa, Versicolor, or Virginica) of the IRIS dataset
y = iris.target
#Display the features
X
#Display the target labels
y
```
<h3>2. Split the Dataset into Training and Testing Values</h3>
```
#Split the dataset into testing and training data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2, random_state=1)
```
<h3>3. Create Naive Bayes Model</h3>
```
#Import the Gaussian Naive Bayes Classifier
from sklearn.naive_bayes import GaussianNB
#Create and fit the model
gnb = GaussianNB()
gnb.fit(X_train,y_train)
```
<h3>4. Predict the Output Values</h3>
```
#Predict the class for test features
y_pred = gnb.predict(X_test)
#Creating a dataframe of the actual and predicted values of the IRIS dataset
import pandas as pd
df= pd.DataFrame({'Actual':y_test,'Predicted':y_pred})
df
```
<h3>5. Check the Error Rate</h3>
```
#Import metrics to check the accuarcy and loss of the model
from sklearn import metrics
#Testing the accuracy of the model
acc = metrics.accuracy_score(y_test, y_pred)*100
print("Accuracy of the Gaussian Naive Bayes Model is:", acc)
#Testing the error of the model
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Max Error:', metrics.max_error(y_test, y_pred))
```
<h3>Final Outcome</h3>
Frome the above results we can see that that we have successfully built a Naive Bayes Classifier with 96.67% accuracy which means that the classifier is able correctly classify the output class of a new unknown data sample with 96.67% accuracy.
The dataframe above show the actual class and the predicted class values of the IRIS Dataset.
| github_jupyter |
*Analytical Information Systems*
# Tutorial 1 - Introduction
Matthias Griebel<br>
Lehrstuhl für Wirtschaftsinformatik und Informationsmanagement
SS 2019
<h1>Agenda<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#1-Course-Overview" data-toc-modified-id="1-Course-Overview-1">1 Course Overview</a></span><ul class="toc-item"><li><span><a href="#1.1-Course-Organization" data-toc-modified-id="1.1-Course-Organization-1.1">1.1 Course Organization</a></span></li><li><span><a href="#1.2-Learning-Objective" data-toc-modified-id="1.2-Learning-Objective-1.2">1.2 Learning Objective</a></span></li><li><span><a href="#1.3-Programming-Language" data-toc-modified-id="1.3-Programming-Language-1.3">1.3 Programming Language</a></span></li><li><span><a href="#1.4-Teaching-Environment" data-toc-modified-id="1.4-Teaching-Environment-1.4">1.4 Teaching Environment</a></span></li><li><span><a href="#1.5-Getting-Started" data-toc-modified-id="1.5-Getting-Started-1.5">1.5 Getting Started</a></span></li></ul></li><li><span><a href="#2-Introduction-to-Jupyter-and-R-Tidyverse" data-toc-modified-id="2-Introduction-to-Jupyter-and-R-Tidyverse-2">2 Introduction to Jupyter and R-Tidyverse</a></span><ul class="toc-item"><li><span><a href="#2.1-Operators-in-R" data-toc-modified-id="2.1-Operators-in-R-2.1">2.1 Operators in R</a></span></li><li><span><a href="#2.2-Relational-Data-and-Data-Frames" data-toc-modified-id="2.2-Relational-Data-and-Data-Frames-2.2">2.2 Relational Data and Data Frames</a></span></li><li><span><a href="#2.3-Help-and-Documentation" data-toc-modified-id="2.3-Help-and-Documentation-2.3">2.3 Help and Documentation</a></span></li><li><span><a href="#2.4-Data-Transformation-with-dplyr" data-toc-modified-id="2.4-Data-Transformation-with-dplyr-2.4">2.4 Data Transformation with <em>dplyr</em></a></span></li></ul></li><li><span><a href="#3-Exam-Questions" data-toc-modified-id="3-Exam-Questions-3">3 Exam Questions</a></span><ul class="toc-item"><li><span><a href="#3.1-Exam-AIS-WS-2018/19" data-toc-modified-id="3.1-Exam-AIS-WS-2018/19-3.1">3.1 Exam AIS WS 2018/19</a></span></li></ul></li></ul></div>
## 1 Course Overview
### 1.1 Course Organization
__Course times__
- Lecture: Mon 12:15 - 13:45 HS Alte IHK
- Tutorial: Wed 14:15 - 15:45 CIP II
__WueCampus (Access Code: AIS19)__
- Lecture slides
- Exercises
- Forum
__Assessment__
- Exam
- Self-assessment through practice questions in Wuecampus
### 1.2 Learning Objective
- __Computational thinking__: Apply information technology, computational thinking, and utilize programming languages and software for data analysis
- __Data Management__: Data entry and annotation, data preparation and manipulation
- __Statistical techniques__: General statistical analysis techniques and their use for data inspection, exploration, and analysis
- __Machine Learning__: Effectively use approaches to perform Predictive Analytics
### 1.3 Programming Language
#### We will be using R
R is not the only language that can be used for data analysis. Why R rather than another? Here is a list:
- interactive language
- data structures & functions
- graphics
- packages & community!
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1b/R_logo.svg/724px-R_logo.svg.png" style="width:20%; float:right">
http://www.burns-stat.com/documents/tutorials/why-use-the-r-language/
#### The tidyverse
The [tidyverse](https://www.tidyverse.org) is a collection of R packages that share common philosophies and are designed to work together.
- Reuse existing data structures
- Compose simple functions with the pipe
- Embrace functional programming
- Design for humans
<img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/ecosystem.png" style="width:40%; float:right">
### 1.4 Teaching Environment
#### Jupyter Ecosystem
<img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/jupyter_nature.png" style="width:40%; float:right">
Project Jupyter exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages (https://jupyter.org/).
Perkel, J. M. (2018). __Why Jupyter is data scientists' computational notebook of choice__. Nature, 563(7729), 145.
[Link](https://www.nature.com/articles/d41586-018-07196-1)
#### The Jupyter Notebook
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/250px-Jupyter_logo.svg.png" style="width:20%; float:right">
- open-source web application
- create and share documents that contain
- live code and narrative text
- data cleaning and transformation
- numerical simulation
- statistical modeling
- data visualization
- machine learning
- and much more
#### Alternatively: RStudio
<img src="https://www.rstudio.com/wp-content/uploads/2018/10/RStudio-Logo-Flat.png" style="width:20%; float:right">
RStudio is an integrated development environment for R
It includes a
- console
- syntax-highlighting editor that supports direct code execution
- tools for plotting
- history, debugging and workspace management
RStudio is available in open source with additional commercial editions available
#### Binder (computing environments)
<img src="https://mybinder.readthedocs.io/en/latest/_static/logo.png" style="width:20%; float:right">
- allows to create custom computing environments
- that can be shared and used by many remote users
It is powered by BinderHub, which is an open-source tool that deploys the [Binder service in the cloud](https://mybinder.org/).
### 1.5 Getting Started
<img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/wuecampus.png" style="width:40%; float:right">
On wuecampus, you will find access to
- Jupyter environment
- RStudio environment
- Interactive Jupyter Notebooks for each Tutorial
__Notes__
- Sessions are limited to 12 hours
- Save your files on your local machine (down-/upload as required)
- Only start one session at the same time (course limit 100)
## 2 Introduction to Jupyter and R-Tidyverse
#### Jupyter Notebooks Cells
- A Markdown cell (this cell) contains text formatted using Markdown and displays its output in-place when it is run
- A code cell contains code to be executed in the kernel and displays its output below
To run the code, in the menu bar, click __Cell__ then select __Run Cells__, or use the keyboard shortcut Ctrl-Enter.<br>
You can add a cell using the '__+__' button.
Now, write and run your first code in the next cell
```R
string <- "your text"
print(string)
```
```
string <- "hello world"
print(string)
```
#### R Packages
Packages are the fundamental units of reproducible R code. They include reusable R functions, the documentation that describes how to use them, and sample data.
Example: Install and load the *tidyverse*
```R
# install
install.packages('tidyverse')
# load
library(tidyverse)
```
The *tidyverse* and all necessary packages for the tutorial are already pre-installed, so we just need to load them
```
library(tidyverse)
```
### 2.1 Operators in R
#### Assignment operators
These operators are used to assign values to variables
<table style="font-size: 100%;">
<tbody>
<tr>
<th>Operator</th>
<th>Description</th>
</tr>
<tr>
<td><-, =</td>
<td>Leftwards assignment</td>
</tr>
<tr>
<td>-></td>
<td>Rightwards assignment</td>
</tr>
</tbody>
</table>
```
x <- 5
x
```
#### Arithmetic operators
These operators are used to carry out mathematical operations like addition and multiplication.
<table style="font-size: 100%;">
<tbody>
<tr>
<th>Operator</th>
<th>Description</th>
</tr>
<tr>
<td>+</td>
<td>Addition</td>
</tr>
<tr>
<td>–</td>
<td>Subtraction</td>
</tr>
<tr>
<td>*</td>
<td>Multiplication</td>
</tr>
<tr>
<td>/</td>
<td>Division</td>
</tr>
<tr>
<td>^</td>
<td>Exponent</td>
</tr>
<tr>
<td>%%</td>
<td>Modulus (Remainder from division)</td>
</tr>
<tr>
<td>%/%</td>
<td>Integer Division</td>
</tr>
</tbody>
</table>
```
x^10
```
#### Relational operators
Relational operators test or define some kind of relation between two entities/values
<table style="font-size: 100%;">
<tbody>
<tr>
<th>Operator</th>
<th>Description</th>
</tr>
<tr>
<td><</td>
<td>Less than</td>
</tr>
<tr>
<td>></td>
<td>Greater than</td>
</tr>
<tr>
<td><=</td>
<td>Less than or equal to</td>
</tr>
<tr>
<td>>=</td>
<td>Greater than or equal to</td>
</tr>
<tr>
<td>==</td>
<td>Equal to</td>
</tr>
<tr>
<td>!=</td>
<td>Not equal to</td>
</tr>
</tbody>
</table>
#### The pipe operator
<img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/pipes.png" style="width:20%; float:right">
Pipes are a powerful tool for clearly expressing a sequence of multiple operations.<br>
In a pipe, we can rewrite the code as follows
```R
string %>%
print()
```
The pipe operator %>% comes from the *magrittr* package by Stefan Milton Bache.
```
string %>%
print()
```
### 2.2 Relational Data and Data Frames
#### ARIS Data View
<img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/aris.png" style="width:30%; float:right">
We will now explore possible implementations in this Tutorial.
The relational model represents the database as a collection of relations (= tables, in R: *data frames* or *tibbles*).
- Each row of a table represents a list of related data values (= data record). Such a line is referred to as a "tuple”
- A column corresponds to an attribute
- Attributes are assigned a data type, format, or value range
- Each attribute value is atomic and cannot be further broken down into components
#### Working on the Student Performance Data Set
The [data set](https://rstudio-pubs-static.s3.amazonaws.com/108835_65a73467d96f4c79a5f808f5b8833922.html) contains information on students in secondary education in Portugal.
Important attributes/columns:
- G1 - first period grade (from 0 to 20)
- G2 - second period grade (from 0 to 20)
- G3 - final grade (from 0 to 20)
Let's download the data and save it to the data frame "student_data"
```
url = "https://raw.githubusercontent.com/arunk13/MSDA-Assignments/master/IS607Fall2015/Assignment3/student-mat.csv"
student_data <- read.table(file= url, header = TRUE, sep = ";")
```
#### Have a look at the data
To view your data frame, write the name in a code cell and run it
```
student_data
```
#### View first or last part
*head()* and *tail()* return first or last part of the data frame
```
tail(student_data)
```
#### Get a glimpse of your data
*glimpse()* outputs a transposed version of the standard view: columns run down the page, and data runs across. This makes it possible to see every column in a data frame
```
glimpse(student_data)
```
#### Data summaries
You can use the *summary()* command to get a better feel for how your data are distributed
```
student_data %>%
summary()
```
### 2.3 Help and Documentation
#### Accessing the documentation with '?'
The question mark is a simple shortcut to get help
```R
?tidyverse
```
```
?tidyverse
```
#### Some Literature
R for Data Science (https://r4ds.had.co.nz/)
<img src="https://d33wubrfki0l68.cloudfront.net/b88ef926a004b0fce72b2526b0b5c4413666a4cb/24a30/cover.png" style="width:30%; float:left">
An Introduction to Statistical Learning (https://www.springer.com/de/book/9781461471370)
<img src="https://images.springer.com/sgw/books/medium/9781461471370.jpg" style="width:30%; float:left">
#### Cheat Sheets
- Cheat sheets make it easy to learn about your favorite packages
- [Here](https://www.rstudio.com/resources/cheatsheets/), you will find some cheat sheets
<img src="https://www.rstudio.com/wp-content/uploads/2018/08/data-transformation-600x464.png" style="width:30%; float:right">
### 2.4 Data Transformation with *dplyr*
The *dplyr* packages provides a grammar for manipulating tables in R. It can be conceptualized as an alternative to a traditional query language like SQL.
Main functions are
- *select()* extracts variables/columns as a table
- *filter()* extracts rows that meet logical criteria
- *group_by()* creates a "grouped" copy of a table. *dplyr* functions will manipulate each "group" separately and then combine the results
- *summarise()* applies summary functions to columns to create a new table of summary statistics based on grouping.
- *arrange()* orders rows by values of a column or columns
- *mutate()* computes new columns/variables
Multiple operations can be executed in sequence using the pipe operator:
```R
df %>%
filter() %>%
mutate() %>%
arrange()
```
We will now apply these functions to our student dataset. You can use the [Cheat Cheat](https://content.cdntwrk.com/files/aT05NjI5Mjgmdj0xJmlzc3VlTmFtZT1kYXRhLXRyYW5zZm9ybWF0aW9uLWNoZWF0LXNoZWV0JmNtZD1kJnNpZz01ZjdlZGUxZDJiM2QwMmYxNDUzODIwYzA0NzE5NTA2YQ%253D%253D) to work on the following tasks.
#### Select variables
Select the attributes *sex* and *age* from the data
```
student_data %>%
select(sex, age)
```
#### Make new variables
Calculate the average grade from the first period grade (G1) and the second period grade (G2) in a new columns 'MeanGrade'
```
student_data %>%
mutate("MeanGrade" = (G1+G2)/2)
```
#### Extract data
Filter only male students
```
student_data %>%
filter(sex=='M')
```
#### Sorting the data
Select only the female students and sort them by age.
```
student_data %>%
filter(sex=='F') %>%
arrange(-age)
```
#### Summarize the data
What is the average absences of the students?
```
student_data %>%
summarise(Mean_absences = mean(absences))
```
#### Grouping and summarizing
Calculate the average absences of both male and female students
```
student_data %>%
group_by(age, sex) %>%
summarise(Mean_absences = mean(absences))
```
## 3 Exam Questions
### 3.1 Exam AIS WS 2018/19
##### Data Engineering & Integration (5 points)
(a) __Analytic Pipelines__: Consider the following diamonds data set:
```
# This code was not included in the exam, values may differ
library(tidyverse)
set.seed(5)
ggplot2::diamonds %>%
sample_n(10) %>%
arrange(cut) -> diamonds
diamonds
```
i. (1 points) You are executing the code below. How many rows does the resulting data frame contain? Briefly explain your answer.
```R
diamonds %>%
group_by(cut) %>%
summarize(median(depth))
```
__Solution__:
```
diamonds %>%
group_by(cut) %>%
summarize(median(depth))
# 4 rows (distinct 'cuts')
```
ii. (2 points) You are executing the code below. What are the column names of the resulting data frame?
```R
diamonds %>%
group_by(clarity , color) %>%
filter(price > 1000) %>%
mutate(volume = x * y * z) %>%
summarise(x = mean(carat),y = mean(price)) %>%
mutate(z = x * y)
```
__Solution__:
```
diamonds %>%
group_by(clarity , color) %>%
summarise(x = mean(carat),y = mean(price)) %>%
mutate(z = x * y) %>%
colnames()
# 'clarity' 'color' 'x' 'y' 'z'
```
iii. (2 points) Explain in pseudo code (e.g., dplyr pipelines) how to obtain the following transformed table from the given data set.
<table style="font-size: 100%;">
<thead>
<tr><th scope=col>color</th><th scope=col>max_price</th><th scope=col>min_price</th></tr>
</thead>
<tbody>
<tr><td>F</td><td>1630</td><td> 786</td></tr>
<tr><td>G</td><td>2593</td><td>2593</td></tr>
<tr><td>H</td><td>7604</td><td>1723</td></tr>
<tr><td>I</td><td>4195</td><td>1840</td></tr>
<tr><td>J</td><td>5463</td><td>5463</td></tr>
</tbody>
</table>
```
diamonds %>%
group_by(color) %>%
summarise(max_price = max(price),
min_price = min(price))
# optional: arrange(color)
```
| github_jupyter |
Taller práctico - SQL
===
**Juan David Velásquez Henao**
jdvelasq@unal.edu.co
Universidad Nacional de Colombia, Sede Medellín
Facultad de Minas
Medellín, Colombia
---
Haga click [aquí](https://github.com/jdvelasq/R-for-data-science/blob/master/01-uso-interactivo.ipynb) para acceder a la última versión online
Haga click [aquí](http://nbviewer.jupyter.org/github/jdvelasq/R-for-data-science/blob/master/01-uso-interactivo.ipynb) para ver la última versión online en `nbviewer`.
---
## Descripción del problema
Usted ha sido elegido como gerente de talento humano de una compañía manufacturera que se dedica a la producción y comercialización de un producto orgánico a base de caña de azucar, en Colombia. Su primera tarea consiste en incluir la información sobre los empleados, que reposan hoy en día en archivos de Excel, en una base de datos estructurada y a continuación evaluar la equidad de salarios entre los cargos.
Los archivos contienen de la siguiente información:
> **salarios.csv**
> empleado_id = Identificación del empleado
> salario = Salario mensual en USD
> desde = Fecha de inicio del contrato actual
> hasta = Fecha de finalización del contrato actual
> **empleados.csv**
> empleado_id
> nombre
> f_nacimiento = Fecha de nacimiento
> departamento_id = Identificación del departamento al que pertenece
> SesionLog = identificación de la persona que recolectó la información.
> **contacto.csv**
> empleado_id
> celular = Teléfono celular del empleado
> dirección
> email
> teléfono
> **departamento.csv**
> empleado_id
> departamento_id
> departamento
> cargo
A partir del planteamiento anterior, responda ante los siguientes requerimientos de información:
```
from traitlets.config import Bool, Unicode
%load_ext sql
%sql mysql+pymysql://root:sistemas@localhost
```
### Requerimiento 1.
Cree una base de datos llamada `talento humano`.
```
%sql CREATE DATABASE talento_humano;
%sql use talento_humano;
```
### Requerimiento 2.
De acuerdo con los archivos existentes, cree tablas en la base de datos que permitan almacenar toda la información.
```
%%sql
CREATE TABLE IF NOT EXISTS
contacto(empleado_ID CHAR(15),celular VARCHAR(15),direccion VARCHAR(50),email CHAR(50),telefono VARCHAR(25));
%sql DESCRIBE contacto;
%%sql
CREATE TABLE IF NOT EXISTS
departamento(empleado_ID CHAR(15),departamento_ID CHAR(5),departamento VARCHAR(20),cargo VARCHAR(20));
%sql DESCRIBE departamento;
%%sql
CREATE TABLE IF NOT EXISTS
empleados(empleado_ID CHAR(15),nombre VARCHAR(25),f_nacimiento DATE,departamento_ID CHAR(5), SesionLog CHAR(10));
%sql DESCRIBE empleados;
%%sql
CREATE TABLE IF NOT EXISTS
salarios(empleado_ID CHAR(15),salario INT,desde DATE,hasta DATE);
%sql DESCRIBE salarios;
```
### Requerimiento 3.
Establezca el ID de empleado como el registro de identificación en la base de datos, es decir, como la clave primaria de las tablas creadas.
```
%%sql
ALTER TABLE empleados MODIFY COLUMN empleado_ID INT PRIMARY KEY;
%sql DESCRIBE empleados;
%%sql
ALTER TABLE salarios MODIFY COLUMN empleado_ID INT PRIMARY KEY;
%%sql
ALTER TABLE departamento MODIFY COLUMN empleado_ID INT PRIMARY KEY;
%%sql
ALTER TABLE contacto MODIFY COLUMN empleado_ID INT PRIMARY KEY;
```
### Requerimiento 4.
Cargue la información de los archivos a la base de datos.
```
%%sql
LOAD DATA INFILE 'contacto.csv' INTO TABLE contacto FIELDS TERMINATED BY ';' IGNORE 1 LINES (empleado_ID,celular,direccion,email,telefono);
%%sql
LOAD DATA INFILE 'departamento.csv' INTO TABLE departamento FIELDS TERMINATED BY ';' IGNORE 1 LINES (empleado_ID,departamento_ID,departamento,cargo);
%%sql
LOAD DATA INFILE 'empleados.csv' INTO TABLE empleados FIELDS TERMINATED BY ';' IGNORE 1 LINES (empleado_ID,nombre,f_nacimiento,departamento_ID, SesionLog);
%%sql
LOAD DATA INFILE 'salarios.csv' INTO TABLE salarios FIELDS TERMINATED BY ';' IGNORE 1 LINES (empleado_ID,salario,desde,hasta);
```
### Requerimiento 5.
Obtenga salarios de todos los empleados de la compañía.
```
%%sql
select * from contacto limit 3;
%sql SELECT salario from salarios;
```
### Requerimiento 6.
Actualice la tabla que contiene la información de los empleados ya que la columna `SesionLog` no es pertinente para la base de datos.
```
%%sql
ALTER TABLE empleados DROP SesionLog;
```
### Requerimiento 7.
Actualice el número de teléfono del empleado con ID = 70632172 con el dato (984) 331-3366.
```
%%sql
UPDATE contacto SET telefono = "(984) 331-3366" WHERE empleado_ID = '70632172';
```
### Requerimiento 8.
Obtenga los cargos únicos por departamento.
```
%%sql
SELECT DISTINCT departamento,cargo from departamento order by departamento;
```
### Requerimiento 9.
Obtenga el número total de empleados y el salario promedio.
```
%%sql
select count(*), avg(salario) from salarios;
```
### Requerimiento 10.
Obtenga una tabla que contenga el ID de los empleados con cargo `Asistente` y aquellos que tenga un salario mayor a `6000 USD`.
```
%%sql
select empleado_ID
from salarios where (salario >= 6000)
UNION
select empleado_ID
from departamento where (cargo = 'Asistente');
```
### Requerimiento 11.
Obtenga una tabla que muestre la identificación de los empleados y su edad en años.
```
%%sql
SELECT empleado_ID, TIMESTAMPDIFF(YEAR,f_nacimiento,CURDATE()) AS EDAD_AÑOS FROM empleados;
```
### Requerimiento 12.
Obtenga una tabla que contenga el empleado_ID y nombre de los empleados que tienen menos de `30 años`.
```
%%sql
select * from (SELECT empleado_ID,nombre,TIMESTAMPDIFF(YEAR,f_nacimiento,CURDATE()) AS EDAD FROM empleados)
AS table_1 where (EDAD <=30);
```
### Requerimiento 13.
Obtenga una tabla que muestre la identificación de los empleados, la duración de sus respectivos contratos en meses y cuanto tiempo resta del mismo.
```
%%sql
SELECT empleado_ID, TIMESTAMPDIFF(MONTH,desde,hasta) AS PERIODO_CONTRATACION_MESES,
TIMESTAMPDIFF(MONTH,desde,CURDATE()) AS TIEMPO_RESTANTE_MESES FROM salarios;
```
### Requerimiento 14.
Obtenga el salario promedio por departamento
```
%%sql
SELECT departamento.departamento, avg(salario) FROM salarios JOIN departamento
ON departamento.empleado_ID = salarios.empleado_ID group by departamento.departamento order by avg(salario);
```
### Requerimiento 15.
Obtenga el salario promedio por cargo
```
%%sql
SELECT departamento.cargo, avg(salario) FROM salarios JOIN departamento
ON departamento.empleado_ID = salarios.empleado_ID group by departamento.cargo order by avg(salario);
```
### Requerimiento 16.
Obtenga una tabla que contenga los datos personales del empleado, departamento, cargo y salario.
```
%%sql
SELECT nombre, f_nacimiento, departamento.departamento, departamento.cargo, salarios.salario FROM empleados
INNER JOIN departamento ON departamento.empleado_ID = empleados.empleado_ID
INNER JOIN salarios ON empleados.empleado_ID = salarios.empleado_ID;
```
### Requerimiento 17.
Exporte la tabla anterior a un archivo `.csv`.
```
%%sql
SELECT nombre, f_nacimiento, departamento.departamento, departamento.cargo, salarios.salario FROM empleados
INNER JOIN departamento ON departamento.empleado_ID = empleados.empleado_ID
INNER JOIN salarios ON empleados.empleado_ID = salarios.empleado_ID INTO OUTFILE 'tabla_resumen.csv';
```
### Requerimiento 18.
Obtenga una tabla que muestre el nombre, email y teléfono de los empleados que tienen entre `25 - 28 años`.
```
%%sql
select table_3.nombre, contacto.email, contacto.telefono, table_3.EDAD
from (SELECT empleado_ID,nombre,TIMESTAMPDIFF(YEAR,f_nacimiento,CURDATE()) AS EDAD FROM empleados)
AS table_3 JOIN contacto ON table_3.empleado_ID = contacto.empleado_ID where (EDAD BETWEEN 25 AND 28);
```
Taller práctico - SQL
===
**Juan David Velásquez Henao**
jdvelasq@unal.edu.co
Universidad Nacional de Colombia, Sede Medellín
Facultad de Minas
Medellín, Colombia
---
Haga click [aquí](https://github.com/jdvelasq/R-for-data-science/blob/master/01-uso-interactivo.ipynb) para acceder a la última versión online
Haga click [aquí](http://nbviewer.jupyter.org/github/jdvelasq/R-for-data-science/blob/master/01-uso-interactivo.ipynb) para ver la última versión online en `nbviewer`.
---
| github_jupyter |
```
%load_ext watermark
%watermark -v -p numpy,sklearn,scipy,matplotlib,tensorflow
```
**14장 – 순환 신경망**
_이 노트북은 14장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._
# 설정
파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
```
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rnn"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
```
텐서플로를 임포트합니다:
```
import tensorflow as tf
```
# 기본 RNN
## 수동으로 RNN 만들기
```
reset_graph()
n_inputs = 3
n_neurons = 5
X0 = tf.placeholder(tf.float32, [None, n_inputs])
X1 = tf.placeholder(tf.float32, [None, n_inputs])
Wx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))
Wy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))
b = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))
Y0 = tf.tanh(tf.matmul(X0, Wx) + b)
Y1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b)
init = tf.global_variables_initializer()
import numpy as np
X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0
X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1
with tf.Session() as sess:
init.run()
Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})
print(Y0_val)
print(Y1_val)
```
## `static_rnn()`을 사용하여 만들기
```
n_inputs = 3
n_neurons = 5
reset_graph()
X0 = tf.placeholder(tf.float32, [None, n_inputs])
X1 = tf.placeholder(tf.float32, [None, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
output_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, [X0, X1],
dtype=tf.float32)
Y0, Y1 = output_seqs
init = tf.global_variables_initializer()
X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]])
X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]])
with tf.Session() as sess:
init.run()
Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})
Y0_val
Y1_val
from tensorflow_graph_in_jupyter import show_graph
show_graph(tf.get_default_graph())
```
## 시퀀스 패딩
```
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
X_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2]))
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
output_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, X_seqs,
dtype=tf.float32)
outputs = tf.transpose(tf.stack(output_seqs), perm=[1, 0, 2])
init = tf.global_variables_initializer()
X_batch = np.array([
# t = 0 t = 1
[[0, 1, 2], [9, 8, 7]], # 샘플 1
[[3, 4, 5], [0, 0, 0]], # 샘플 2
[[6, 7, 8], [6, 5, 4]], # 샘플 3
[[9, 0, 1], [3, 2, 1]], # 샘플 4
])
with tf.Session() as sess:
init.run()
outputs_val = outputs.eval(feed_dict={X: X_batch})
print(outputs_val)
print(np.transpose(outputs_val, axes=[1, 0, 2])[1])
```
## Using `dynamic_rnn()`
```
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
with tf.Session() as sess:
init.run()
outputs_val = outputs.eval(feed_dict={X: X_batch})
print(outputs_val)
show_graph(tf.get_default_graph())
```
## 시퀀스 길이 지정
```
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
seq_length = tf.placeholder(tf.int32, [None])
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,
sequence_length=seq_length)
init = tf.global_variables_initializer()
X_batch = np.array([
# 스텝 0 스텝 1
[[0, 1, 2], [9, 8, 7]], # 샘플 1
[[3, 4, 5], [0, 0, 0]], # 샘플 2 (0 벡터로 패딩)
[[6, 7, 8], [6, 5, 4]], # 샘플 3
[[9, 0, 1], [3, 2, 1]], # 샘플 4
])
seq_length_batch = np.array([2, 1, 2, 2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})
print(outputs_val)
print(states_val)
```
## 시퀀스 분류기 훈련하기
```
reset_graph()
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
logits = tf.layers.dense(states, n_outputs)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
```
주의: `tf.examples.tutorials.mnist`은 삭제될 예정이므로 대신 `tf.keras.datasets.mnist`를 사용하겠습니다.
```
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
X_test = X_test.reshape((-1, n_steps, n_inputs))
X_valid = X_valid.reshape((-1, n_steps, n_inputs))
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/")
# X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
# y_test = mnist.test.labels
n_epochs = 100
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "배치 데이터 정확도:", acc_batch, "검증 세트 정확도:", acc_valid)
```
# 다층 RNN
```
reset_graph()
n_steps = 28
n_inputs = 28
n_outputs = 10
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
n_neurons = 100
n_layers = 3
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
states_concat = tf.concat(axis=1, values=states)
logits = tf.layers.dense(states_concat, n_outputs)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "배치 데이터 정확도:", acc_batch, "검증 세트 정확도:", acc_valid)
```
# 시계열
```
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t*5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, int((t_max - t_min) / resolution))
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.title("시계열 데이터 (인공 생성)", fontsize=14)
plt.plot(t, time_series(t), label=r"$t . \sin(t) / 3 + 2 . \sin(5t)$")
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "b-", linewidth=3, label="훈련 샘플")
plt.legend(loc="lower left", fontsize=14)
plt.axis([0, 30, -17, 13])
plt.xlabel("시간")
plt.ylabel("값", rotation=0)
plt.subplot(122)
plt.title("훈련 샘플", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=12, label="샘플")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markeredgewidth=0.5, markeredgecolor="b", markersize=14, label="타깃")
plt.legend(loc="upper left")
plt.xlabel("시간")
save_fig("time_series_plot")
plt.show()
X_batch, y_batch = next_batch(1, n_steps)
np.c_[X_batch[0], y_batch[0]]
```
## `OuputProjectionWrapper` 사용하기
RNN 하나를 만들어 보겠습니다. 이 신경망은 100개의 순환 뉴런을 가지고 있고 각 훈련 샘플은 20개의 입력 길이로 구성되므로 20개의 타임 스텝에 펼칠 것입니다. 각 입력은 하나의 특성을 가집니다(각 시간에서의 값 하나). 타깃도 20개의 입력 시퀀스이고 하나의 값을 가집니다:
```
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
```
각 타임 스텝에서 크기가 100인 출력 벡터가 만들어 집니다. 하지만 각 타임 스텝에서 하나의 출력 값을 원합니다. 간단한 방법은 `OutputProjectionWrapper`로 셀을 감싸는 것입니다.
```
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
learning_rate = 0.001
loss = tf.reduce_mean(tf.square(outputs - y)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(iteration, "\tMSE:", mse)
saver.save(sess, "./my_time_series_model") # not shown in the book
with tf.Session() as sess: # 책에는 없음
saver.restore(sess, "./my_time_series_model") # 책에는 없음
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
y_pred
plt.title("모델 테스트", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=12, label="샘플")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markeredgewidth=0.5, markeredgecolor="b", markersize=14, label="타깃")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="예측")
plt.legend(loc="upper left")
plt.xlabel("시간")
save_fig("time_series_pred_plot")
plt.show()
```
## `OutputProjectionWrapper` 사용하지 않기
```
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
n_outputs = 1
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(iteration, "\tMSE:", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
saver.save(sess, "./my_time_series_model")
y_pred
plt.title("모델 테스트", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("시간")
plt.show()
```
## 새로운 시퀀스 생성하기
```
with tf.Session() as sess: # 책에는 없음
saver.restore(sess, "./my_time_series_model") # 책에는 없음
sequence = [0.] * n_steps
for iteration in range(300):
X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence.append(y_pred[0, -1, 0])
plt.figure(figsize=(8,4))
plt.plot(np.arange(len(sequence)), sequence, "b-")
plt.plot(t[:n_steps], sequence[:n_steps], "b-", linewidth=3)
plt.xlabel("시간")
plt.ylabel("값")
plt.show()
with tf.Session() as sess:
saver.restore(sess, "./my_time_series_model")
sequence1 = [0. for i in range(n_steps)]
for iteration in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for iteration in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, "b-")
plt.plot(t[:n_steps], sequence1[:n_steps], "b-", linewidth=3)
plt.xlabel("시간")
plt.ylabel("값", rotation=0)
plt.subplot(122)
plt.plot(t, sequence2, "b-")
plt.plot(t[:n_steps], sequence2[:n_steps], "b-", linewidth=3)
plt.xlabel("시간")
save_fig("creative_sequence_plot")
plt.show()
```
# 심층 RNN
## MultiRNNCell
```
reset_graph()
n_inputs = 2
n_steps = 5
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
n_neurons = 100
n_layers = 3
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.random.rand(2, n_steps, n_inputs)
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run([outputs, states], feed_dict={X: X_batch})
outputs_val.shape
```
## 여러 GPU에 심층 RNN 분산하기
이렇게 사용해서는 **안됩니다**:
```
with tf.device("/gpu:0"): # 이 할당은 무시됩니다
layer1 = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
with tf.device("/gpu:1"): # 이 할당은 무시됩니다
layer2 = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
```
대신 `DeviceCellWrapper`를 사용합니다:
```
import tensorflow as tf
class DeviceCellWrapper(tf.contrib.rnn.RNNCell):
def __init__(self, device, cell):
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.device(self._device):
return self._cell(inputs, state, scope)
reset_graph()
n_inputs = 5
n_steps = 20
n_neurons = 100
X = tf.placeholder(tf.float32, shape=[None, n_steps, n_inputs])
devices = ["/cpu:0", "/cpu:0", "/cpu:0"] # 만약 GPU가 세 개 있다면 ["/gpu:0", "/gpu:1", "/gpu:2"]로 바꿉니다
cells = [DeviceCellWrapper(dev,tf.contrib.rnn.BasicRNNCell(num_units=n_neurons))
for dev in devices]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(cells)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
```
또 다른 방법으로 텐서플로 1.1부터 `tf.contrib.rnn.DeviceWrapper` 클래스를 사용할 수 있습니다(텐서플로 1.2부터는 `tf.nn.rnn_cell.DeviceWrapper`가 되었습니다).
```
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
print(sess.run(outputs, feed_dict={X: np.random.rand(2, n_steps, n_inputs)}))
```
## 드롭아웃
```
reset_graph()
n_inputs = 1
n_neurons = 100
n_layers = 3
n_steps = 20
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
```
노트: `input_keep_prob` 매개변수는 플레이스홀더로 훈련하는 동안에는 어느 값이나 가능하고 테스트할 때는 1.0으로 지정합니다(드롭아웃을 끕니다).
```
keep_prob = tf.placeholder_with_default(1.0, shape=())
cells = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
for layer in range(n_layers)]
cells_drop = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
for cell in cells]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(cells_drop)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
learning_rate = 0.01
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
train_keep_prob = 0.5
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
_, mse = sess.run([training_op, loss],
feed_dict={X: X_batch, y: y_batch,
keep_prob: train_keep_prob})
if iteration % 100 == 0: # not shown in the book
print(iteration, "훈련 MSE:", mse) # not shown
saver.save(sess, "./my_dropout_time_series_model")
with tf.Session() as sess:
saver.restore(sess, "./my_dropout_time_series_model")
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
plt.title("모델 테스트", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("시간")
plt.show()
```
이런 드롭아웃이 이 경우엔 크게 도움이 안되네요. :/
# LSTM
```
reset_graph()
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
n_layers = 3
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
lstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name="softmax")
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print("에포크", epoch, "배치 데이터 정확도 =", acc_batch, "검증 세트 정확도 =", acc_valid)
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("테스트 세트 정확도 =", acc_test)
lstm_cell = tf.contrib.rnn.LSTMCell(num_units=n_neurons, use_peepholes=True)
gru_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)
```
# 임베딩
이 섹션은 텐서플로의 [Word2Vec 튜토리얼](https://www.tensorflow.org/versions/r0.11/tutorials/word2vec/index.html)을 기반으로 합니다.
## 데이터 추출
```
from six.moves import urllib
import errno
import os
import zipfile
WORDS_PATH = "datasets/words"
WORDS_URL = 'http://mattmahoney.net/dc/text8.zip'
def mkdir_p(path):
"""디렉토리 생성, 이미 있다면 그냥 통과
이 함수는 파이썬 2 버전을 지원하기 위해서입니다.
파이썬 3.2 이상이면 다음과 같이 쓸 수 있습니다:
>>> os.makedirs(path, exist_ok=True)
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def fetch_words_data(words_url=WORDS_URL, words_path=WORDS_PATH):
os.makedirs(words_path, exist_ok=True)
zip_path = os.path.join(words_path, "words.zip")
if not os.path.exists(zip_path):
urllib.request.urlretrieve(words_url, zip_path)
with zipfile.ZipFile(zip_path) as f:
data = f.read(f.namelist()[0])
return data.decode("ascii").split()
words = fetch_words_data()
words[:5]
```
## 사전 구축
```
from collections import Counter
vocabulary_size = 50000
vocabulary = [("UNK", None)] + Counter(words).most_common(vocabulary_size - 1)
vocabulary = np.array([word for word, _ in vocabulary])
dictionary = {word: code for code, word in enumerate(vocabulary)}
data = np.array([dictionary.get(word, 0) for word in words])
" ".join(words[:9]), data[:9]
" ".join([vocabulary[word_index] for word_index in [5241, 3081, 12, 6, 195, 2, 3134, 46, 59]])
words[24], data[24]
```
## 배치 생성
```
import random
from collections import deque
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # buffer 중간에 타깃 레이블을 둡니다
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
data_index=0
batch, labels = generate_batch(8, 2, 1)
batch, [vocabulary[word] for word in batch]
labels, [vocabulary[word] for word in labels[:, 0]]
```
## 모델 구성
```
batch_size = 128
embedding_size = 128 # 임베딩 벡터 차원
skip_window = 1 # 고려할 왼쪽과 오른쪽 단어의 개수
num_skips = 2 # 레이블을 생성하기 위한 입력의 재사용 횟수
# 가까운 이웃을 샘플링하기 위해 랜덤한 검증 세트를 만듭니다.
# 검증 샘플은 가장 흔한 단어인 낮은 ID 번호를 가진 것으로 제한합니다.
valid_size = 16 # 유사도를 평가하기 위해 랜덤하게 구성할 단어 세트 크기
valid_window = 100 # 검증 샘플을 전체 샘플의 앞 부분에서만 선택합니다
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # 부정 샘플링(negative sampling)의 수
learning_rate = 0.01
reset_graph()
# 입력 데이터
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
vocabulary_size = 50000
embedding_size = 150
# 입력을 위해 임베딩을 조회합니다
init_embeds = tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)
embeddings = tf.Variable(init_embeds)
train_inputs = tf.placeholder(tf.int32, shape=[None])
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# NCE 손실을 위한 변수를 만듭니다
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / np.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# 배치에서 NCE 손실의 평균을 계산합니다.Compute the average NCE loss for the batch.
# tf.nce_loss는 자동으로 손실을 평가할 때마다 음성 레이블에서 새로운 샘플을 뽑습니다.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, train_labels, embed,
num_sampled, vocabulary_size))
# Adam 옵티마이저
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# 미니배치 샘플과 모든 임베딩 사이의 코사인 유사도를 계산합니다
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), axis=1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# 초기화를 위한 연산
init = tf.global_variables_initializer()
```
## 모델 훈련
```
num_steps = 10001
with tf.Session() as session:
init.run()
average_loss = 0
for step in range(num_steps):
print("\r반복: {}".format(step), end="\t")
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# 훈련 연산을 평가하여 스텝을 한 단계를 업데이트합니다(session.run()에서 반환된 값을 사용합니다)
_, loss_val = session.run([training_op, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# 평균 손실은 2000개 배치에 대한 손실의 추정입니다.
print("스텝 ", step, "에서의 평균 손실: ", average_loss)
average_loss = 0
# 이 코드는 비용이 많이 듭니다 (500 스텝마다 ~20%씩 느려집니다)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = vocabulary[valid_examples[i]]
top_k = 8 # 가장 가까운 단어의 개수
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "%s에 가장 가까운 단어:" % valid_word
for k in range(top_k):
close_word = vocabulary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
```
마지막 임베딩을 저장합니다(물론 텐서플로의 `Saver`를 사용해도 됩니다):
```
np.save("./my_final_embeddings.npy", final_embeddings)
```
## 임베딩 그래프
```
def plot_with_labels(low_dim_embs, labels):
assert low_dim_embs.shape[0] >= len(labels), "임베딩보다 레이블이 많습니다."
plt.figure(figsize=(18, 18)) # 인치 크기
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
from sklearn.manifold import TSNE
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [vocabulary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
```
# 기계 번역
`basic_rnn_seq2seq()` 함수는 간단한 인코더/디코더 모델을 만듭니다. 먼저 `encoder_inputs`를 상태 벡터로 인코딩하는 RNN을 실행하고 그다음 `decoder_inputs`을 마지막 인코더 상태로 초기화시킨 디코더를 실행합니다. 인코더와 디코더는 같은 RNN 셀 타입을 사용하지만 파라미터를 공유하지는 않습니다.
```
import tensorflow as tf
reset_graph()
n_steps = 50
n_neurons = 200
n_layers = 3
num_encoder_symbols = 20000
num_decoder_symbols = 20000
embedding_size = 150
learning_rate = 0.01
X = tf.placeholder(tf.int32, [None, n_steps]) # 영어 문장
Y = tf.placeholder(tf.int32, [None, n_steps]) # 프랑스어 번역
W = tf.placeholder(tf.float32, [None, n_steps - 1, 1])
Y_input = Y[:, :-1]
Y_target = Y[:, 1:]
encoder_inputs = tf.unstack(tf.transpose(X)) # 1D 텐서의 리스트
decoder_inputs = tf.unstack(tf.transpose(Y_input)) # 1D 텐서의 리스트
lstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
for layer in range(n_layers)]
cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)
output_seqs, states = tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size)
logits = tf.transpose(tf.unstack(output_seqs), perm=[1, 0, 2])
logits_flat = tf.reshape(logits, [-1, num_decoder_symbols])
Y_target_flat = tf.reshape(Y_target, [-1])
W_flat = tf.reshape(W, [-1])
xentropy = W_flat * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_target_flat, logits=logits_flat)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
```
# 연습문제 해답
## 1. to 6.
부록 A 참조.
## 7. 임베딩된 레버(Reber) 문법
먼저 문법에 맞는 문자열을 생성하는 함수가 필요합니다. 이 문법은 각 상태에서 가능한 전이 상태의 리스트입니다. 하나의 전이는 출력할 문자열(또는 생성할 문법)과 다음 상태를 지정합니다.
```
from random import choice, seed
# 일관된 출력을 위한 유사난수 초기화
seed(42)
np.random.seed(42)
default_reber_grammar = [
[("B", 1)], # (상태 0) =B=>(상태 1)
[("T", 2), ("P", 3)], # (상태 1) =T=>(상태 2) or =P=>(상태 3)
[("S", 2), ("X", 4)], # (상태 2) =S=>(상태 2) or =X=>(상태 4)
[("T", 3), ("V", 5)], # 등등..
[("X", 3), ("S", 6)],
[("P", 4), ("V", 6)],
[("E", None)]] # (상태 6) =E=>(종료 상태)
embedded_reber_grammar = [
[("B", 1)],
[("T", 2), ("P", 3)],
[(default_reber_grammar, 4)],
[(default_reber_grammar, 5)],
[("T", 6)],
[("P", 6)],
[("E", None)]]
def generate_string(grammar):
state = 0
output = []
while state is not None:
production, state = choice(grammar[state])
if isinstance(production, list):
production = generate_string(grammar=production)
output.append(production)
return "".join(output)
```
기본 레버 문법에 맞는 문자열을 몇 개 만들어 보겠습니다:
```
for _ in range(25):
print(generate_string(default_reber_grammar), end=" ")
```
좋습니다. 이제 임베딩된 레버 문법에 맞는 문자열을 몇 개 만들어 보겠습니다:
```
for _ in range(25):
print(generate_string(embedded_reber_grammar), end=" ")
```
좋네요, 이제 이 문법을 따르지 않는 문자열을 생성할 함수를 만듭니다. 무작위하게 문자열을 만들 수 있지만 그렇게 하면 너무 문제가 쉬워지므로 대신 문법을 따르는 문자열을 만든 후 하나의 문자만 바꾸어 놓도록 하겠습니다:
```
def generate_corrupted_string(grammar, chars="BEPSTVX"):
good_string = generate_string(grammar)
index = np.random.randint(len(good_string))
good_char = good_string[index]
bad_char = choice(list(set(chars) - set(good_char)))
return good_string[:index] + bad_char + good_string[index + 1:]
```
잘못된 문자열 몇 개를 만들어 보죠:
```
for _ in range(25):
print(generate_corrupted_string(embedded_reber_grammar), end=" ")
```
문자열을 바로 RNN에 주입할 수는 없습니다. 먼저 벡터의 연속으로 바꾸어야 합니다. 각 벡터는 원-핫 인코딩을 사용하여 하나의 문자를 나타냅니다. 예를 들어, 벡터 `[1, 0, 0, 0, 0, 0, 0]`는 문자 "B"를 나타내고 벡터 `[0, 1, 0, 0, 0, 0, 0]`는 문자 "E"를 나타내는 식입니다. 이런 원-핫 벡터의 연속으로 문자열을 바꾸는 함수를 작성해 보겠습니다. 문자열이 `n_steps`보다 짧으면 0 벡터로 패딩됩니다(나중에, 텐서플로에게 각 문자열의 실제 길이를 `sequence_length` 매개변수로 전달할 것입니다).
```
def string_to_one_hot_vectors(string, n_steps, chars="BEPSTVX"):
char_to_index = {char: index for index, char in enumerate(chars)}
output = np.zeros((n_steps, len(chars)), dtype=np.int32)
for index, char in enumerate(string):
output[index, char_to_index[char]] = 1.
return output
string_to_one_hot_vectors("BTBTXSETE", 12)
```
이제 50%는 올바른 문자열 50%는 잘못된 문자열로 이루어진 데이터셋을 만듭니다:
```
def generate_dataset(size):
good_strings = [generate_string(embedded_reber_grammar)
for _ in range(size // 2)]
bad_strings = [generate_corrupted_string(embedded_reber_grammar)
for _ in range(size - size // 2)]
all_strings = good_strings + bad_strings
n_steps = max([len(string) for string in all_strings])
X = np.array([string_to_one_hot_vectors(string, n_steps)
for string in all_strings])
seq_length = np.array([len(string) for string in all_strings])
y = np.array([[1] for _ in range(len(good_strings))] +
[[0] for _ in range(len(bad_strings))])
rnd_idx = np.random.permutation(size)
return X[rnd_idx], seq_length[rnd_idx], y[rnd_idx]
X_train, l_train, y_train = generate_dataset(10000)
```
첫 번째 훈련 샘플을 확인해 보겠습니다:
```
X_train[0]
```
데이터셋에서 가장 긴 문자열 때문에 패딩된 0 벡터가 많습니다. 문자열 길이가 얼마나 될까요?
```
l_train[0]
```
타깃 클래스는?
```
y_train[0]
```
아주 좋습니다! 올바른 문자열을 구분할 RNN을 만들 준비가 되었습니다. 앞서 MNIST 이미지를 분류하기 위해 만든 것과 매우 비슷한 시퀀스 분류기를 만듭니다. 차이점은 다음 두 가지입니다:
* 첫째, 입력 문자열이 가변 길이이므로 `dynamic_rnn()` 함수를 호출할 때 `sequence_length`를 지정해야 합니다.
* 둘째, 이진 분류기이므로 출력 뉴런은 하나만 필요합니다. 이 뉴런은 각 문자열에 대해 올바른 문자열일 추정 로그 확률을 출력할 것입니다. 다중 클래스 분류에서는 `sparse_softmax_cross_entropy_with_logits()`를 사용했지만 이진 분류에서는 `sigmoid_cross_entropy_with_logits()`를 사용합니다.
```
reset_graph()
possible_chars = "BEPSTVX"
n_inputs = len(possible_chars)
n_neurons = 30
n_outputs = 1
learning_rate = 0.02
momentum = 0.95
X = tf.placeholder(tf.float32, [None, None, n_inputs], name="X")
seq_length = tf.placeholder(tf.int32, [None], name="seq_length")
y = tf.placeholder(tf.float32, [None, 1], name="y")
gru_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(gru_cell, X, dtype=tf.float32,
sequence_length=seq_length)
logits = tf.layers.dense(states, n_outputs, name="logits")
y_pred = tf.cast(tf.greater(logits, 0.), tf.float32, name="y_pred")
y_proba = tf.nn.sigmoid(logits, name="y_proba")
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=momentum,
use_nesterov=True)
training_op = optimizer.minimize(loss)
correct = tf.equal(y_pred, y, name="correct")
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
훈련하는 동안 진척 상황을 확인할 수 있도록 검증 세트를 만듭니다:
```
X_val, l_val, y_val = generate_dataset(5000)
n_epochs = 50
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
X_batches = np.array_split(X_train, len(X_train) // batch_size)
l_batches = np.array_split(l_train, len(l_train) // batch_size)
y_batches = np.array_split(y_train, len(y_train) // batch_size)
for X_batch, l_batch, y_batch in zip(X_batches, l_batches, y_batches):
loss_val, _ = sess.run(
[loss, training_op],
feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})
acc_val = accuracy.eval(feed_dict={X: X_val, seq_length: l_val, y: y_val})
print("{:4d} 훈련 손실: {:.4f}, 정확도: {:.2f}% 검증 세트 정확도: {:.2f}%".format(
epoch, loss_val, 100 * acc_train, 100 * acc_val))
saver.save(sess, "./my_reber_classifier")
```
이제 두 개의 문자열에 이 RNN을 테스트해 보죠. 첫 번째는 잘못된 것이고 두 번째는 올바른 것입니다. 이 문자열은 마지막에서 두 번째 글자만 다릅니다. RNN이 이를 맞춘다면 두 번째 문자가 항상 끝에서 두 번째 문자와 같아야 한다는 패턴을 알게 됐다는 것을 의미합니다. 이렇게 하려면 꽤 긴 단기 기억(long short-term memory)이 필요합니다(그래서 GRU 셀을 사용했습니다).
```
test_strings = [
"BPBTSSSSSSSSSSSSXXTTTTTVPXTTVPXTTTTTTTVPXVPXVPXTTTVVETE",
"BPBTSSSSSSSSSSSSXXTTTTTVPXTTVPXTTTTTTTVPXVPXVPXTTTVVEPE"]
l_test = np.array([len(s) for s in test_strings])
max_length = l_test.max()
X_test = [string_to_one_hot_vectors(s, n_steps=max_length)
for s in test_strings]
with tf.Session() as sess:
saver.restore(sess, "./my_reber_classifier")
y_proba_val = y_proba.eval(feed_dict={X: X_test, seq_length: l_test})
print()
print("레버 문자열일 추정 확률:")
for index, string in enumerate(test_strings):
print("{}: {:.2f}%".format(string, 100 * y_proba_val[index][0]))
```
쨘! 잘 작동하네요. 이 RNN이 완벽한 신뢰도로 정확한 답을 냈습니다. :)
## 8. 과 9.
Coming soon...
| github_jupyter |
# EventVestor: Credit Facility
In this notebook, we'll take a look at EventVestor's *Credit Facility* dataset, available on the [Quantopian Store](https://www.quantopian.com/store). This dataset spans January 01, 2007 through the current day, and documents financial events covering new or extended credit facilities.
### Blaze
Before we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as [Blaze](http://blaze.pydata.org). Blaze provides the Quantopian user with a convenient interface to access very large datasets.
Blaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side.
It is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization.
Helpful links:
* [Query building for Blaze](http://blaze.pydata.org/en/latest/queries.html)
* [Pandas-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-pandas.html)
* [SQL-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-sql.html).
Once you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using:
> `from odo import odo`
> `odo(expr, pandas.DataFrame)`
### Free samples and limits
One other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze.
There is a *free* version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day.
With preamble in place, let's get started:
```
# import the dataset
from quantopian.interactive.data.eventvestor import credit_facility
# or if you want to import the free dataset, use:
# from quantopian.data.eventvestor import credit_facility_free
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
credit_facility.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
credit_facility.count()
# Let's see what the data looks like. We'll grab the first three rows.
credit_facility[:3]
```
Let's go over the columns:
- **event_id**: the unique identifier for this event.
- **asof_date**: EventVestor's timestamp of event capture.
- **trade_date**: for event announcements made before trading ends, trade_date is the same as event_date. For announcements issued after market close, trade_date is next market open day.
- **symbol**: stock ticker symbol of the affected company.
- **event_type**: this should always be *Credit Facility/Credit facility*.
- **event_headline**: a brief description of the event
- **credit_amount**: the amount of credit_units being availed
- **credit_units**: the units for credit_amount: currency or other value. Most commonly in millions of USD.
- **event_rating**: this is always 1. The meaning of this is uncertain.
- **timestamp**: this is our timestamp on when we registered the data.
- **sid**: the equity's unique identifier. Use this instead of the symbol.
We've done much of the data processing for you. Fields like `timestamp` and `sid` are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the `sid` across all our equity databases.
We can select columns and rows with ease. Below, we'll fetch all events in which \$ 200M of credit was availed.
```
twohundreds = credit_facility[(credit_facility.credit_amount==200) & (credit_facility.credit_units=="$M")]
# When displaying a Blaze Data Object, the printout is automatically truncated to ten rows.
twohundreds.sort('timestamp')
```
Finally, suppose we want a DataFrame of that data, but we only want the symbol, timestamp, and event headline:
```
twohundred_df = odo(twohundreds, pd.DataFrame)
reduced = twohundred_df[['symbol','event_headline','timestamp']]
# When printed: pandas DataFrames display the head(30) and tail(30) rows, and truncate the middle.
reduced
```
| github_jupyter |
#### Package Import
```
import numpy as np
from numpy import math
from scipy.stats import norm
from scipy import stats
import matplotlib.pyplot as plt
import progressbar
```
#### Model Specification: OU Process
1. $dX_{t} = \theta_{1}(\theta_{2} - X_{t})dt + \sigma dW_{t}$, $Y_{t}|X_{t} \sim \mathcal{N}(X_{t}, \theta_{3}^2)$
2. $\mathbb{E}[X_{t}] = x_{0} e^{-\theta_1t} + \theta_{2} (1-e^{-\theta_{1}t})$, $Var[X_{t}] = \frac{\sigma^{2}}{2\theta_{1}}(1-e^{-2t\theta_1})$
3. $Y_{1},Y_{2},...$ mutually independent, $Y_{t} \sim_{i.i.d.} \mathcal{N}(\mathbb{E}[X_{t}], \theta_{3}^2 + Var[X_{t}])$, for $t \in \mathbb{N}_{0}$
```
initial_val = 1
sigma = 0.5
theta = np.array([1,0,np.sqrt(0.2)])
def diff_coef(x, dt, dw):
return sigma*np.math.sqrt(dt)*dw
def drift_coef(x, dt):
return theta[0]*(theta[1]-x)*dt
# Log-scaled unnormalized likelihood function p(y|x)
def likelihood_logscale(y, x):
d = (y-x)
gn = -1/2*(d**2/(theta[2]**2))
return gn
def likelihood_update(y,un,unormal_weight):
gamma = math.sqrt(0.2)
d = (y-un)
gn1 = -1/2*(d**2/(theta[2]**2)) + unormal_weight
return gn1
def sig_mean(t,theta):
return initial_val*np.exp(-theta[0]*t) + theta[1]*(1-np.exp(-theta[0]*t))
## Used only when theta[0] != 0
def sig_var(t,theta):
return (sigma**2 / (2*theta[0])) * (1-np.exp(-2*theta[0]*t))
def gen_data(T):
Y = np.zeros(T+1)
for t in range(T+1):
std = np.sqrt(sig_var(t,theta) + theta[2]**2)
Y[t] = sig_mean(t,theta) + std * np.random.randn(1)
return Y
def Kalmanfilter(T,Y):
m = np.zeros((T+1))
mhat = np.zeros((T+1))
c = np.zeros((T+1))
a = theta[0]
s = sigma
# observational noise variance is gam^2*I
gam = theta[2]
# dynamics noise variance is sig^2*I
sig = np.sqrt(s**2/2/a*(1-np.exp(-2*a)))
# dynamics determined by A
A = np.exp(-a)
# initial mean&covariance
m[0] = initial_val
c[0] = 0
H = 1
# solution & assimilate!
for t in range(T):
mhat[t] = A*m[t] + theta[1]*(1-A)
chat = A*c[t]*A + sig**2
########################
d = Y[t+1] - H*mhat[t]
# Kalmab Gain
K = (chat*H) / (H*chat*H + gam**2)
# Mean Update
m[t+1] = mhat[t] + K*d
# Covariance update
c[t+1] = (1-K*H)*chat
tv = m[T]
return tv
def Kalmanfilter_path(T,Y):
m = np.zeros((T+1))
mhat = np.zeros((T+1))
c = np.zeros((T+1))
a = theta[0]
s = sigma
# observational noise variance is gam^2*I
gam = theta[2]
# dynamics noise variance is sig^2*I
sig = np.sqrt(s**2/2/a*(1-np.exp(-2*a)))
# dynamics determined by A
A = np.exp(-a)
# initial mean&covariance
m[0] = initial_val
c[0] = 0
H = 1
# solution & assimilate!
for t in range(T):
mhat[t] = A*m[t] + theta[1]*(1-A)
chat = A*c[t]*A + sig**2
########################
d = Y[t+1] - H*mhat[t]
# Kalmab Gain
K = (chat*H) / (H*chat*H + gam**2)
# Mean Update
m[t+1] = mhat[t] + K*d
# Covariance update
c[t+1] = (1-K*H)*chat
return m
```
#### Main Function
```
# Resampling - input one-dimensional particle x
def resampling(weight, gn, x, N):
ess = 1/((weight**2).sum())
if ess <= (N/2):
## Sample with uniform dice
dice = np.random.random_sample(N)
## np.cumsum obtains CDF out of PMF
bins = np.cumsum(weight)
## np.digitize gets the indice of the bins where the dice belongs to
x_hat = x[np.digitize(dice,bins)]
## after resampling we reset the accumulating weight
gn = np.zeros(N)
if ess > (N/2):
x_hat = x
return x_hat, gn
# Coupled Wasserstein Resampling
def coupled_wasserstein(fine_weight, coarse_weight, gn, gc, fine_par, coarse_par, N):
ess = 1/((fine_weight**2).sum())
fine_hat = fine_par
coarse_hat = coarse_par
if ess <= (N/2):
# Sort in ascending order of particles
ind = np.argsort(fine_par[:])
inc = np.argsort(coarse_par[:])
fine_par = fine_par[ind]
fine_weight = fine_weight[ind]
coarse_par = coarse_par[inc]
coarse_weight = coarse_weight[inc]
# Sample with uniform dice
dice = np.random.random_sample(N)
# CDF
bins = np.cumsum(fine_weight)
bins1 = np.cumsum(coarse_weight)
# get the indices of the bins where the dice belongs to
fine_hat = fine_par[np.digitize(dice, bins)]
coarse_hat = coarse_par[np.digitize(dice, bins1)]
# reset accumulating weight after resampling
gn = np.zeros(N)
gc = np.zeros(N)
if ess > (N/2):
fine_hat = fine_par
coarse_hat = coarse_par
return fine_hat, gn, coarse_hat, gc
# Maixmally Coupled Resampling
def coupled_maximal(fine_weight, coarse_weight, gn, gc, fine_par, coarse_par, N):
ess = 1/((fine_weight**2).sum())
if ess <= (N/2):
# Maximal coupled resampling
fine_hat, coarse_hat = maximal_resample(fine_weight, coarse_weight, fine_par, coarse_par, N)
# reset accumulating weight after resampling
gn = np.zeros(N)
gc = np.zeros(N)
if ess > (N/2):
fine_hat = fine_par
coarse_hat = coarse_par
return fine_hat, gn, coarse_hat, gc
def maximal_resample(weight1,weight2,x1,x2,N):
# Initialize
x1_hat = np.zeros(N)
x2_hat = np.zeros(N)
# Calculating many weights
unormal_min_weight = np.minimum(weight1, weight2)
min_weight_sum = np.sum(unormal_min_weight)
min_weight = unormal_min_weight / min_weight_sum
unormal_reduce_weight1 = weight1 - unormal_min_weight
unormal_reduce_weight2 = weight2 - unormal_min_weight
## Sample with uniform dice
dice = np.random.random_sample(N)
## [0] takes out the numpy array which is suitable afterwards
coupled = np.where(dice <= min_weight_sum)[0]
independ = np.where(dice > min_weight_sum)[0]
ncoupled = np.sum(dice <= min_weight_sum)
nindepend = np.sum(dice > min_weight_sum)
if ncoupled>=0:
dice1 = np.random.random_sample(ncoupled)
bins = np.cumsum(min_weight)
x1_hat[coupled] = x1[np.digitize(dice1,bins)]
x2_hat[coupled] = x2[np.digitize(dice1,bins)]
## nindepend>0 implies min_weight_sum>0 imples np.sum(unormal_reduce_weight*) is positive, thus the division won't report error
if nindepend>0:
reduce_weight1 = unormal_reduce_weight1 / np.sum(unormal_reduce_weight1)
reduce_weight2 = unormal_reduce_weight2 / np.sum(unormal_reduce_weight2)
dice2 = np.random.random_sample(nindepend)
bins1 = np.cumsum(reduce_weight1)
bins2 = np.cumsum(reduce_weight2)
x1_hat[independ] = x1[np.digitize(dice2,bins1)]
x2_hat[independ] = x2[np.digitize(dice2,bins2)]
return x1_hat, x2_hat
def Particle_filter(l,T,N,Y):
hl = 2**(-l)
un = np.zeros(N)+initial_val
un_hat = un
gn = np.zeros(N)
for t in range(T):
un_hat = un
for dt in range(2**l):
dw = np.random.randn(N)
un = un + drift_coef(un, hl) + diff_coef(un, hl, dw)
# Cumulating weight function
gn = likelihood_logscale(Y[t+1], un) + gn
what = np.exp(gn-np.max(gn))
wn = what/np.sum(what)
# Wasserstein resampling
un_hat, gn = resampling(wn, gn, un, N)
return(np.sum(un*wn))
def Coupled_particle_filter_wasserstein(l,T,N,Y):
hl = 2**(-l)
## Initial value
un1 = np.zeros(N) + initial_val
cn1 = np.zeros(N) + initial_val
gn = np.ones(N)
gc = np.ones(N)
for t in range(T):
un = un1
cn = cn1
for dt in range(2**(l-1)):
dw = np.random.randn(2,N)
for s in range(2):
un = un + drift_coef(un, hl) + diff_coef(un, hl, dw[s,:])
cn = cn + drift_coef(cn, hl*2) + diff_coef(cn, hl, (dw[0,:] + dw[1,:]))
## Accumulating Weight Function
gn = likelihood_update(Y[t+1], un, gn)
what = np.exp(gn-np.max(gn))
wn = what/np.sum(what)
gc = likelihood_update(Y[t+1], cn, gc)
wchat = np.exp(gc-np.max(gc))
wc = wchat/np.sum(wchat)
## Wassersteing Resampling
un1, gn, cn1, gc = coupled_wasserstein(wn,wc,gn,gc,un,cn,N)
return(np.sum(un*wn-cn*wc))
def Coupled_particle_filter_maximal(l,T,N,Y):
hl = 2**(-l)
## Initial value
un1 = np.zeros(N) + initial_val
cn1 = np.zeros(N) + initial_val
gn = np.ones(N)
gc = np.ones(N)
for t in range(T):
un = un1
cn = cn1
for dt in range(2**(l-1)):
dw = np.random.randn(2,N)
for s in range(2):
un = un + drift_coef(un, hl) + diff_coef(un, hl, dw[s,:])
cn = cn + drift_coef(cn, hl*2) + diff_coef(cn, hl, (dw[0,:] + dw[1,:]))
## Accumulating Weight Function
gn = likelihood_update(Y[t+1], un, gn)
what = np.exp(gn-np.max(gn))
wn = what/np.sum(what)
gc = likelihood_update(Y[t+1], cn, gc)
wchat = np.exp(gc-np.max(gc))
wc = wchat/np.sum(wchat)
## Wassersteing Resampling
un1, gn, cn1, gc = coupled_maximal(wn,wc,gn,gc,un,cn,N)
return(np.sum(un*wn-cn*wc))
def coef(x, y):
# number of observations/points
n = np.size(x)
# mean of x and y vector
m_x, m_y = np.mean(x), np.mean(y)
# calculating cross-deviation and deviation about x
SS_xy = np.sum(y*x) - n*m_y*m_x
SS_xx = np.sum(x*x) - n*m_x*m_x
# calculating regression coefficients
b_1 = SS_xy / SS_xx
b_0 = m_y - b_1*m_x
return(b_0, b_1)
```
#### Based on one Model and one Dataset, we need to fit:
1. $\mathbb{E}[(\eta_{t}^{l,N}(\varphi) - \eta_{t}^{l}(\varphi))^2] = C_{2} \frac{1}{N}$
2. $\mathbb{E}[\big((\eta_{t}^{l}-\eta_{t}^{l-1})^{N}(\varphi) - (\eta_{t}^{l}-\eta_{t}^{l-1})(\varphi)\big)^2] = C_{3} \frac{\Delta_{l}^{\beta}}{N}$
```
# Function to tune values of C_2
def fit_c2(data_path):
rep_num = 100
num_seq = np.zeros(6)
var_pf = np.zeros(6)
T = data_path.shape[0]-1
for i in range(6):
num_seq[i] = 100 * 2**i
rep_val = np.zeros(rep_num)
#pr = progressbar.ProgressBar(max_value=rep_num).start()
for j in range(rep_num):
rep_val[j] = Particle_filter(0,T,int(num_seq[i]),data_path)
#pr.update(j+1)
#pr.finish()
print(i,'in 6 finished')
var_pf[i] = np.var(rep_val)
x = np.log10(num_seq)
y = np.log10(var_pf)
b=coef(x,y)
print('slope is:',b[1])
print('c2 value:',10**(b[0]))
return 10**(b[0])
# Function to tune values of C_3, as well as values of beta
def fit_c3_beta(data_path):
rep_num = 100
N = 200
l_seq = np.zeros(6)
delt_seq = np.zeros(6)
var_cpf = np.zeros(6)
T = data_path.shape[0]-1
for i in range(6):
l_seq[i] = i+1
delt_seq[i] = 2**(-(i+1))
rep_val = np.zeros(rep_num)
#pr = progressbar.ProgressBar(max_value=rep_num).start()
for j in range(rep_num):
rep_val[j] = Coupled_particle_filter_maximal(int(l_seq[i]),T,N,data_path)
#pr.update(j+1)
#pr.finish()
print(i,'in 6 finished')
var_cpf[i] = np.var(rep_val)
x = np.log10(delt_seq)
y = np.log10(var_cpf)
b=coef(x,y)
beta = b[1]
print('beta decimal value is:',b[1])
print('c3 value:',10**(b[0])*N)
return 10**(b[0])*N, round(b[1])
```
#### Parallel Particle Filter: Untuned
1. Choice of truncated distribution $\mathbb{P}_{P}(p) = 2^{-p}$ for $p \in \{0,1,...,P_{max}\}$, $\mathbb{P}_{L}(l) = 2^{-\beta l}$ for $l \in \{1,2,...,L_{max}\}$, $L_{max} = P_{max}$.
2. $N_{p} = 2^{p}N_{0}$, $N_{0} = C P_{max}^{2}2^{2P_{max}}$ $\Delta_{l}=2^{-l\beta}$
3. For MSE target of $\mathcal{O}(\epsilon^{2})$, we need cost of $\mathcal{O}(\epsilon^{-2} (\log(\epsilon))^{3})$ when $\beta=2$, $\mathcal{O}(\epsilon^{-2} (\log(\epsilon))^{4})$ when $\beta=1$ and $\mathcal{O}(\epsilon^{-2.5} (\log(\epsilon))^{3})$ when $\beta=\frac{1}{2}$.
#### Parallel Particle Filter:
1. if $l=0$, $N_{p}=C C_{2} P_{max}^{2}2^{2P_{max}}$
2. if $l>0$, $N_{p}=C C_{3} P_{max}^{2}2^{2P_{max}}$
3. The constant $C$ is tuned so that the MSE of the PPF estimator is of the same order (roughly twice) as its variance
```
def num_coupled_par(p, p_max, const):
return int(2**(p+2*p_max) * (p_max**2) * const * c3)
def num_par(p, p_max, const):
return int(2**(p+2*p_max) * (p_max**2) * const * c2)
def prob_l_func(max_val):
prob = np.zeros(max_val)
for l in range(max_val):
prob[l] = 2**(-l*beta)
prob = prob / np.sum(prob)
return prob
def prob_p_func(max_val):
prob = np.zeros(max_val)
for p in range(max_val):
prob[p] = 2**(-p)
prob = prob / np.sum(prob)
return prob
def Xi_zero(T,p_prob,p_max,const,Y):
# sample the variable P
p = int(np.random.choice(p_max, 1, p=p_prob)[0])
#print('p_val is',p)
# construct the estimator
Xi_zero = (Particle_filter(0,T,num_par(p, p_max, const),Y) - Particle_filter(0,T,num_par(p-1, p_max, const),Y)) / p_prob[p]
return Xi_zero
def Xi_nonzero(l,T,p_prob,p_max,const,Y):
# sample the variable P
p = int(np.random.choice(p_max, 1, p=p_prob)[0])
#print('p_val is',p)
# construct the estimator
Xi = (Coupled_particle_filter_maximal(l,T,num_coupled_par(p,p_max,const),Y) - Coupled_particle_filter_maximal(l,T,num_coupled_par(p-1,p_max,const),Y)) / p_prob[p]
return Xi
def Xi(T,l_prob,l_max,p_prob,p_max,const,Y):
l = int(np.random.choice(l_max, 1, p=l_prob)[0])
#print('value of l is',l)
if l==0:
Xi = Xi_zero(T,p_prob,p_max,const,Y)
if l!=0:
Xi = Xi_nonzero(l,T,p_prob,p_max,const,Y)
est = Xi / l_prob[l]
return est
def parallel_particle_filter(M,T,max_val,const,Y):
l_max = max_val
p_max = max_val
l_prob = prob_l_func(l_max)
p_prob = prob_p_func(p_max)
est_summand = np.zeros(M)
for m in range(M):
est_summand[m] = Xi(T,l_prob,l_max,p_prob,p_max,const,Y)
return (np.mean(est_summand))
def parallel_particle_filter_record(M,T,max_val,const,Y):
l_max = max_val
p_max = max_val
l_prob = prob_l_func(l_max)
p_prob = prob_p_func(p_max)
est_summand = np.zeros(M)
pr = progressbar.ProgressBar(max_value=M).start()
for m in range(M):
est_summand[m] = Xi(T,l_prob,l_max,p_prob,p_max,const,Y)
pr.update(m+1)
pr.finish()
return est_summand
def Xi_zero_with_p(T,p_prob,p_max,const,Y):
# sample the variable P
p = int(np.random.choice(p_max, 1, p=p_prob)[0])
#print('p_val is',p)
# construct the estimator
Xi_zero = (Particle_filter(0,T,num_par(p, p_max, const),Y) - Particle_filter(0,T,num_par(p-1, p_max, const),Y)) / p_prob[p]
return Xi_zero, p
def Xi_nonzero_with_p(l,T,p_prob,p_max,const,Y):
# sample the variable P
p = int(np.random.choice(p_max, 1, p=p_prob)[0])
#print('p_val is',p)
# construct the estimator
Xi = (Coupled_particle_filter_maximal(l,T,num_coupled_par(p,p_max,const),Y) - Coupled_particle_filter_maximal(l,T,num_coupled_par(p-1,p_max,const),Y)) / p_prob[p]
return Xi, p
def Xi_with_pl(T,l_prob,l_max,p_prob,p_max,const,Y):
l = int(np.random.choice(l_max, 1, p=l_prob)[0])
#print('value of l is',l)
if l==0:
Xi, p_val = Xi_zero_with_p(T,p_prob,p_max,const,Y)
if l!=0:
Xi, p_val = Xi_nonzero_with_p(l,T,p_prob,p_max,const,Y)
est = Xi / l_prob[l]
return est, l, p_val
def cost_proxy_ppf(p_collect,l_collect, max_val, const):
M = p_collect.shape[0]
cost_proxy_val = 0
for i in range(M):
if l_collect[i] == 0:
cost_proxy_val += num_par(p_collect[i], max_val, const)
if l_collect[i] != 0:
cost_proxy_val += num_coupled_par(p_collect[i], max_val, const) * 2**(l_collect[i])
return cost_proxy_val
def parallel_particle_filter_record_with_cost(M,T,max_val,const,Y):
l_max = max_val
p_max = max_val
l_prob = prob_l_func(l_max)
p_prob = prob_p_func(p_max)
est_summand = np.zeros(M)
p_collect = np.zeros(M)
l_collect = np.zeros(M)
pr = progressbar.ProgressBar(max_value=M).start()
for m in range(M):
est_summand[m], p_collect[m], l_collect[m] = Xi_with_pl(T,l_prob,l_max,p_prob,p_max,const,Y)
pr.update(m+1)
pr.finish()
cost_proxy_val = T * cost_proxy_ppf(p_collect,l_collect, max_val, const)
return est_summand, cost_proxy_val
```
#### Multilevel Particle Filter: Untuned
1. When $\beta=2$, $N_{l} = 2^{2L-1.5l}$, to target a MSE of $\epsilon^{2}$, cost required is $\mathcal{O}(\epsilon^{-2})$
2. When $\beta=1$, $N_{l} = 2^{2L-l}L$, to target a MSE of $\epsilon^{2}$, cost required is $\mathcal{O}(\epsilon^{-2}(\log(\epsilon))^{2})$
3. When $\beta=\frac{1}{2}$, $N_{l} = 2^{2.25L - 0.75l}$, to target a MSE of $\epsilon^{2}$, cost required is $\mathcal{O}(\epsilon^{-2.5})$
#### Multilevel Particle Filter:
1. When $\beta=2$, $N_{0} = C_{1}C_{2}2^{2L}$, $N_{l}=C_{1}C_{3}2^{2L-1.5l}$
2. When $\beta=1$, $N_{0}=C_{1}C_{2}2^{2L}L$, $N_{l}=C_{1}C_{3}2^{2L-l}L$
3. When $\beta=\frac{1}{2}$, $N_{0}=C_{1}C_{2}2^{2.25L}$, $N_{l}=C_{1}C_{3}2^{2.25L-0.75l}$
4. The constant $C_{1}$ is choosen such that the MSE of MLPF estimator $\eta_{t}^{L,N_{0:L}}$ is of same order (roughly twice) as its variance.
```
# For OU process, beta=2
def num_ml_coupled(l,lmax,const):
return 2**(2*lmax-1.5*l) * const * c3
def num_ml_single(l,lmax,const):
return 2**(2*lmax-1.5*l) * const * c2
def mlpf(T,max_val,const,Y):
L = max_val
level_est = np.zeros(L)
level_est[0] = Particle_filter(0,T,int(num_ml_single(0,L,const)),Y)
for l in range(1,L):
level_est[l] = Coupled_particle_filter_maximal(l,T,int(num_ml_coupled(l,L,const)),Y)
return np.sum(level_est)
def proxy_cost_mlpf(T,max_val,const):
cost_proxy_val = 0
cost_proxy_val += T*num_ml_single(0,max_val,const)
for l in range(max_val):
cost_proxy_val += T*num_ml_coupled(l,max_val,const) * 2**(l)
return cost_proxy_val
```
#### Data set for specific OU model used in HPC implementaion:
```
T = 100
data_path = np.load('ou_model_data_path.npy')
c2, c3, beta = np.load('ou_fit_values.npy')
```
#### Experiment Example: Comparing MLPF with PPF on sigle processor
1. For a given $L_{max}$ values, for instance $L_{max}=2$, we first tune the constant $C$ for the PPF (parallel particle filter). We denote the PPF estimator as $\frac{1}{M}\sum_{i=1}^{M}\Xi^{i}$, on single processor, we assume $M=1$, we check the value of $Var(\Xi)=\mathcal{O}(C^{-1})$ for any initial guess on $C$ value, and then obtain the true $C$ by ensuring the variance of the PPF estimator is roughly equal to its squared bias. In this case, we should set $C=1000000$.
2. Computing Time of PPF estimator, this can be extremly costly to run, proxy cost represented by the number of Euler discretizations is used.
3. We include also cell to count the actual computing time for PPF, as for the HPC implementation, we will compare instead the actual computation time.
```
# Rep_num here is different from M, we record all the xi values and take variance
const = 1000
true_val = Kalmanfilter(T,data_path)
mse_seq = np.zeros(6)
var_seq = np.zeros(6)
square_bias_seq = np.zeros(6)
l_seq = np.arange(2,3)
rep_num = 1000
for i,lmax in enumerate(l_seq):
est_val = parallel_particle_filter_record(rep_num, T, lmax, const, data_path)
mse_seq[i] = np.mean((est_val-true_val)**2)
var_seq[i] = np.var(est_val)
square_bias_seq[i] = mse_seq[i] - var_seq[i]
print('At level',lmax,'Mse val:',mse_seq[i], 'Var val:', var_seq[i], 'Square Bias val:', square_bias_seq[i])
# Actual Time for a single replication
lmax = 1
const = 1000000
%timeit parallel_particle_filter(1, T, lmax, const, data_path)
# Proxy Time Cost for a single replication
lmax = 1
const = 1000000
cost_proxy_val = parallel_particle_filter_record_with_cost(1, T, lmax, const, data_path)[1]
print('Estimated Cost for PPF estimator:', cost_proxy_val)
```
4. In order to achieve similar MSE levels, we test and see that $L=6$ is required for MLPF estimator. In order to keep its variance to be roughly the same as its squared bias, we tune $C_{1}$ in a similar way. We can conlude that $C_{1}=600$ here.
5. Again the cost is evaluated through a proxy, the number of Euler discretizations involved to construct the MLPF estimator.
6. We include also cell to count the actual computing time for MLPF, as for the HPC implementation, we will compare instead the actual computation time.
```
const = 10
true_val = Kalmanfilter(T,data_path)
mse_seq = np.zeros(6)
var_seq = np.zeros(6)
square_bias_seq = np.zeros(6)
l_seq = np.arange(6,7)
rep_num = 1000
for i,lmax in enumerate(l_seq):
# repe of mlpf estimator
est_val = np.zeros(rep_num)
pr = progressbar.ProgressBar(max_value=rep_num).start()
for j in range(rep_num):
est_val[j] = mlpf(T,lmax,const,data_path)
pr.update(j+1)
pr.finish()
mse_seq[i] = np.mean((est_val-true_val)**2)
var_seq[i] = np.var(est_val)
square_bias_seq[i] = mse_seq[i] - var_seq[i]
print('At level',lmax,'Mse val:',mse_seq[i], 'Var val:', var_seq[i], 'Square Bias val:', square_bias_seq[i])
# Actual Computing Time
const = 600
lmax = 6
%timeit mlpf(T,lmax,const,data_path)
# Proxy Computation Time
const = 100
lmax = 6
proxy_cost_mlpf(T,lmax,const)
```
| github_jupyter |
```
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
file_list = drive.ListFile({'q': "'1DOnBtDpU_q9xr6b20Vpmfd0_IIvNkrr8' in parents and trashed=false"}).GetList()
for file1 in file_list:
print('title: %s\t\t\t id: %s' % (file1['title'], file1['id']))
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.utils import shuffle
#RENSET DATASETT: STOPWORDS ETC:
#nace_download = drive.CreateFile({'id':file_list[2]['id']})
#nace_download.GetContentFile(file_list[2]['title'])
#inp = pd.read_csv('Cleaned_besk.csv', dtype=object, encoding='iso-8859-1', names=['besk'], skiprows=[0])
#nace=inp["besk"].str.split(";", n=1, expand=True)
#nace=nace[nace[0].notnull()]
#nace=shuffle(nace)
#from sklearn.model_selection import train_test_split
#besk=nace[0].values
#sn07=nace[1].values
###############################
#URENSET DATASETT:
nace_download = drive.CreateFile({'id':file_list[4]['id']})
nace_download.GetContentFile(file_list[4]['title'])
nace = pd.read_csv('BESK_ALLE.csv', sep=';', encoding='iso-8859-1')
nace=nace[nace["besk"].notnull()]
nace=shuffle(nace)
"""Split into training and test set"""
from sklearn.model_selection import train_test_split
besk=nace["besk"].values
sn07=nace["SN07_1"].values
###############################
from sklearn import preprocessing
from sklearn import utils
lab_enc = preprocessing.LabelEncoder()
y_encode = lab_enc.fit_transform(sn07)
import keras
y = keras.utils.to_categorical(y_encode, 841)
from keras.preprocessing.text import Tokenizer
t = Tokenizer()
t.fit_on_texts(besk)
X=t.texts_to_sequences(besk)
vocab_size=len(t.word_index)+1
from keras.preprocessing.sequence import pad_sequences
maxlen=162
X_pad=pad_sequences(X, padding='post', maxlen=maxlen)
X_train, X_test, y_train, y_test = train_test_split(X_pad, y, test_size=0.5, random_state=1000)
#normalizer = preprocessing.Normalizer().fit(X_train)
#X_train_norm=normalizer.transform(X_train)
#X_test_norm=normalizer.transform(X_test)
#x_train= tf.keras.utils.normalize(X_train, axis=1)
#x_test= tf.keras.utils.normalize(X_test, axis=1)
#longest_string_sn07=max(sn07, key=len)
#len(longest_string_sn07)
#longest_string_besk=max(besk, key=len)
#len(longest_string_besk)
#max(y_encode)
from keras.models import Sequential
from keras import layers
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, SeparableConv1D, MaxPooling1D
from keras.optimizers import SGD, Adam
from tensorflow.keras.utils import normalize
import tensorflow as tf
import keras
import time
from tensorflow.keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
import math
# learning rate schedule
#def step_decay(epoch):
# initial_lrate = 0.001
# drop = 0.1
# epochs_drop = 2
# lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
# return lrate
#input_dim = X_train.shape[1:]
embedding_dim=50
dense_layers=[0,1,2]
conv_layers=[1,2,3]
layer_sizes=[64,128,256]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME="{}-dense-{}-conv-{}-layer-{}".format(dense_layer, conv_layer, layer_size, int(time.time()))
tensorboard=TensorBoard(log_dir='logs/6/{}'.format(NAME))
print(NAME)
model=Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=maxlen))
model.add(layers.SeparableConv1D(layer_size ,5, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=2))
for l in range(conv_layer-1):
model.add(layers.SeparableConv1D(layer_size ,5, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Flatten())
for l in range(dense_layer):
model.add(layers.Dense(layer_size, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(841, activation='softmax'))
epochs = 10
learning_rate = 0.001
decay_rate = learning_rate / epochs
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=decay_rate, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#lrate = LearningRateScheduler(step_decay)
callbacks_list = [tensorboard]
model.fit(X_train, y_train,
epochs=10,
validation_data=(X_test, y_test), callbacks=callbacks_list)
#åpne tensorboard: tensorboard --logdi=ml/logs/
```
| github_jupyter |
# pyscal `Trajectory`
`Trajectory` is a `pyscal` module intended for working with molecular dynamics trajectories which contain more than one time slice. Currently, the module only supports [LAMMPS dump](https://lammps.sandia.gov/doc/dump.html) text file formats. It can be used to get a single or slices from a trajectory, trim the trajectory or even combine multiple trajectories. The example below illustrates various uses of the module.
Start with importing the module
```
from pyscal.trajectory import Trajectory
```
Read in a trajectory.
```
traj = Trajectory("traj.light")
```
When using the above statement, the trajectory is not yet read in to memory. Just the basic information is available now.
```
traj
```
You can also access some basic properties of the trajectories
```
traj.nblocks
```
`traj.nblocks` gives the number of time slices in the trajectory. `natoms` gives the number of atoms.
```
traj.natoms
```
`Trajectory` only works with fixed number of atoms.
Now, one can get a single slice or multiple slices just as is done with a python list. Getting the 2nd slice (counting starts from 0!).
```
sl = traj[2]
sl
```
This slice can now be converted to a number of usable formats:
Convert to a pyscal `System` object.
```
sys = sl.to_system()
sys
```
`System` objects contain all the information. The atomic positions, simulation box and so on are easily accessible.
```
sys[0].box
sys[0].atoms[0].pos
```
If information other than positions are required, the `customkeys` keyword can be used. For example, for velocity in the x direction,
```
sys = sl.to_system(customkeys=["vx"])
sys
sys[0].atoms[0].custom["vx"]
```
Convert to an ASE atoms object,
```
aseobj = sl.to_ase(species=["Au"])
aseobj
```
It can also be converted to a python dictionary with easily accessible quantities,
```
adict = sl.to_dict()
adict[0].keys()
```
The atom properties can also be accessed.
```
adict[0]["atoms"]["x"][0]
```
Instead of creating a System object, the slice can also be written to a file directly.
```
sl.to_file("test.dump")
```
Or can be saved also in a HDF format
```
sl.to_hdf("test.h5")
```
If one needs to call any of the above methods on the complete trajectory, you can use the `:` index. For example,
```
complete_dict = traj[:].to_dict()
```
Like normal python lists, multiple slices can also be accessed directly
```
sl1 = traj[0:4]
sl1
```
`to_system` and `to_file` methods can be used on this object too.
Multiple slices can be added together
```
sl2 = traj[5:7]
sl2
slnew = sl1+sl2
slnew
```
Once again, one could write the combined trajectory slice to a file, or create a System object out of it.
| github_jupyter |
# Adding and Removing Data
We will be working with the `data/earthquakes.csv` file again, so we need to handle our imports and read it in.
## About the Data
In this notebook, we will be working with Earthquake data from September 18, 2018 - October 13, 2018 (obtained from the US Geological Survey (USGS) using the [USGS API](https://earthquake.usgs.gov/fdsnws/event/1/))
## Setup
```
import numpy as np
import pandas as pd
df = pd.read_csv(
'data/earthquakes.csv',
usecols=['time', 'title', 'place', 'magType', 'mag', 'alert', 'tsunami']
)
```
## Creating new data
### Adding new columns
New columns get added to the right of the dataframe and can be a single value:
```
df['ones'] = 1
df.head()
```
...or a Boolean mask:
```
df['mag_negative'] = df.mag < 0
df.head()
```
#### Adding the `parsed_place` column
We have an entity recognition problem on our hands with the `place` column. There are several entities that have multiple names in the data (e.g., CA and California, NV and Nevada).
```
df.place.str.extract(r', (.*$)')[0].sort_values().unique()
```
Replace parts of the `place` names to fit our needs:
```
df['parsed_place'] = df.place.str.replace(
r'.* of ', '' # remove anything saying <something> of <something>
).str.replace(
r'the ', '' # remove things starting with "the"
).str.replace(
r'CA$', 'California' # fix California
).str.replace(
r'NV$', 'Nevada' # fix Nevada
).str.replace(
r'MX$', 'Mexico' # fix Mexico
).str.replace(
r' region$', '' # chop off endings with "region"
).str.replace(
r'northern ', '' # remove "northern"
).str.replace(
r'Fiji Islands', 'Fiji' # line up the Fiji places
).str.replace(
r'^.*, ', '' # remove anything else extraneous from the beginning
).str.strip() # remove any extra spaces
```
Now we can use a single name to get all earthquakes for that place:
```
df.parsed_place.sort_values().unique()
```
#### Using the `assign()` method to create columns
To create many columns at once or update existing columns, we can use `assign()`:
```
df.assign(
in_ca=df.parsed_place.str.endswith('California'),
in_alaska=df.parsed_place.str.endswith('Alaska')
).head()
```
#### Concatenation
Say we were working with two separate dataframes, one with earthquakes accompanied by tsunamis and the other with earthquakes without tsunamis. If we wanted to look at earthquakes as a whole, we would want to concatenate the dataframes into a single one:
```
tsunami = df[df.tsunami == 1]
no_tsunami = df[df.tsunami == 0]
tsunami.shape, no_tsunami.shape
```
Concatenating along the row axis (`axis=0`) is equivalent to appending to the bottom. By concatenating our earthquakes with tsunamis and those without tsunamis, we get the full earthquake data set back:
```
pd.concat([tsunami, no_tsunami]).shape
```
Note that the previous result is equivalent to running the `append()` method of the dataframe:
```
tsunami.append(no_tsunami).shape
```
We have been working with a subset of the columns from the CSV file, but now we want to get some of the columns we ignored when we read in the data. Since we have added new columns in this notebook, we won't want to read in the file again and perform those operations again. Instead, we will concatenate along the columns (`axis=1`) to add back what we are missing:
```
additional_columns = pd.read_csv(
'data/earthquakes.csv', usecols=['tz', 'felt', 'ids']
)
pd.concat(
[df.head(2), additional_columns.head(2)], axis=1
)
```
Notice what happens if the index doesn't align though:
```
additional_columns = pd.read_csv(
'data/earthquakes.csv', usecols=['tz', 'felt', 'ids', 'time'], index_col='time'
)
pd.concat(
[df.head(2), additional_columns.head(2)], axis=1
)
```
If the index doesn't align, we can align it before attempting the concatentation, which we will discuss in [chapter 3](https://github.com/stefmolin/Hands-On-Data-Analysis-with-Pandas/tree/master/ch_03):
Say we want to join the `tsunami` and `no_tsunami` dataframes, but the `no_tsunami` dataframe has an additional column. The `join` parameter specifies how to handle any overlap in column names (when appending to the bottom) or in row names (when concatenating to the left/right). By default, this is `outer`, so we keep everything; however, if we use `inner`, we will only keep what is in common:
```
pd.concat(
[tsunami.head(2), no_tsunami.head(2).assign(type='earthquake')], join='inner'
)
```
In addition, we use `ignore_index`, since the index doesn't mean anything for us here. This gives us sequential values instead of what we had in the previous result:
```
pd.concat(
[tsunami.head(2), no_tsunami.head(2).assign(type='earthquake')], join='inner', ignore_index=True
)
```
## Deleting Unwanted Data
Columns can be deleted using dictionary syntax with `del`:
```
del df['ones']
df.columns
```
If we don't know if the column exists, we can use a `try`/`except` block:
```
try:
del df['ones']
except KeyError:
# handle the error here
print('not there anymore')
```
We can also use `pop()`. This will allow us to use the series we remove later. Note there will be an error if the key doesn't exist, so we can also use a `try`/`except` here:
```
mag_negative = df.pop('mag_negative')
df.columns
```
Notice we have a mask in `mag_negative` now:
```
mag_negative.value_counts()
```
Now we can use `mag_negative` to select:
```
df[mag_negative].head()
```
### Using the `drop()` method
We can drop rows by passing a list of indices to the `drop()` method. Notice in the following example that when asking for the first 2 rows with `head()` we get the 3rd and 4th rows because we dropped the original first 2 with `drop([0, 1])`:
```
df.drop([0, 1]).head(2)
```
The `drop()` method drops along the row axis by default. If we pass in `axis=1` for the column axis, we can delete columns:
```
df.drop(
columns=[col for col in df.columns \
if col not in \
['alert', 'mag', 'title', 'time', 'tsunami']]
).head()
```
We also have the option of passing the column names to the `columns` parameter instead of using `axis=1`:
```
df.drop(
columns=[col for col in df.columns \
if col not in \
['alert', 'mag', 'title', 'time', 'tsunami']]
).equals(
df.drop(
[col for col in df.columns \
if col not in ['alert', 'mag', 'title', 'time', 'tsunami']],
axis=1
)
)
```
By default, `drop()`, along with the majority of `DataFrame` methods, will return a new `DataFrame` object. If we just want to change the one we are working with, we can pass `inplace=True`. This should be used with care:
```
df.drop(
columns=[col for col in df.columns \
if col not in \
['alert', 'mag', 'parsed_place', 'time', 'tsunami']],
inplace=True
)
df.head()
```
| github_jupyter |
# Triplet Loss for Implicit Feedback Neural Recommender Systems
The goal of this notebook is first to demonstrate how it is possible to build a bi-linear recommender system only using positive feedback data.
In a latter section we show that it is possible to train deeper architectures following the same design principles.
This notebook is inspired by Maciej Kula's [Recommendations in Keras using triplet loss](
https://github.com/maciejkula/triplet_recommendations_keras). Contrary to Maciej we won't use the BPR loss but instead will introduce the more common margin-based comparator.
## Loading the movielens-100k dataset
For the sake of computation time, we will only use the smallest variant of the movielens reviews dataset. Beware that the architectural choices and hyperparameters that work well on such a toy dataset will not necessarily be representative of the behavior when run on a more realistic dataset such as [Movielens 10M](https://grouplens.org/datasets/movielens/10m/) or the [Yahoo Songs dataset with 700M rating](https://webscope.sandbox.yahoo.com/catalog.php?datatype=r).
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os.path as op
from zipfile import ZipFile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2 compat
from urllib import urlretrieve
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = ML_100K_URL.rsplit('/', 1)[1]
ML_100K_FOLDER = 'ml-100k'
if not op.exists(ML_100K_FILENAME):
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME)
if not op.exists(ML_100K_FOLDER):
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME).extractall('.')
data_train = pd.read_csv(op.join(ML_100K_FOLDER, 'ua.base'), sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
data_test = pd.read_csv(op.join(ML_100K_FOLDER, 'ua.test'), sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
data_train.describe()
data_train.head()
# data_test.describe()
max_user_id = max(data_train['user_id'].max(), data_test['user_id'].max())
max_item_id = max(data_train['item_id'].max(), data_test['item_id'].max())
n_users = max_user_id + 1
n_items = max_item_id + 1
print('n_users=%d, n_items=%d' % (n_users, n_items))
```
## Implicit feedback data
Consider ratings >= 4 as positive feed back and ignore the rest:
```
pos_data_train = data_train.query("rating >= 4")
pos_data_test = data_test.query("rating >= 4")
```
Because the median rating is around 3.5, this cut will remove approximately half of the ratings from the datasets:
```
pos_data_train['rating'].count()
pos_data_test['rating'].count()
```
## The Triplet Loss
The following section demonstrates how to build a low-rank quadratic interaction model between users and items. The similarity score between a user and an item is defined by the unormalized dot products of their respective embeddings.
The matching scores can be use to rank items to recommend to a specific user.
Training of the model parameters is achieved by randomly sampling negative items not seen by a pre-selected anchor user. We want the model embedding matrices to be such that the similarity between the user vector and the negative vector is smaller than the similarity between the user vector and the positive item vector. Furthermore we use a margin to further move appart the negative from the anchor user.
Here is the architecture of such a triplet architecture. The triplet name comes from the fact that the loss to optimize is defined for triple `(anchor_user, positive_item, negative_item)`:
<img src="images/rec_archi_implicit_2.svg" style="width: 600px;" />
We call this model a triplet model with bi-linear interactions because the similarity between a user and an item is captured by a dot product of the first level embedding vectors. This is therefore not a deep architecture.
```
import tensorflow as tf
def identity_loss(y_true, y_pred):
"""Ignore y_true and return the mean of y_pred
This is a hack to work-around the design of the Keras API that is
not really suited to train networks with a triplet loss by default.
"""
return tf.reduce_mean(y_pred + 0 * y_true)
def margin_comparator_loss(inputs, margin=1.):
"""Comparator loss for a pair of precomputed similarities
If the inputs are cosine similarities, they each have range in
(-1, 1), therefore their difference have range in (-2, 2). Using
a margin of 1. can therefore make sense.
If the input similarities are not normalized, it can be beneficial
to use larger values for the margin of the comparator loss.
"""
positive_pair_sim, negative_pair_sim = inputs
return tf.maximum(negative_pair_sim - positive_pair_sim + margin, 0)
```
Here is the actual code that builds the model(s) with shared weights. Note that here we use the cosine similarity instead of unormalized dot products (both seems to yield comparable results).
```
from keras.models import Model
from keras.layers import Embedding, Flatten, Input, Dense, merge
from keras.regularizers import l2
from keras_fixes import dot_mode, cos_mode
def build_models(n_users, n_items, latent_dim=64, l2_reg=0):
"""Build a triplet model and its companion similarity model
The triplet model is used to train the weights of the companion
similarity model. The triplet model takes 1 user, 1 positive item
(relative to the selected user) and one negative item and is
trained with comparator loss.
The similarity model takes one user and one item as input and return
compatibility score (aka the match score).
"""
# Common architectural components for the two models:
# - symbolic input placeholders
user_input = Input((1,), name='user_input')
positive_item_input = Input((1,), name='positive_item_input')
negative_item_input = Input((1,), name='negative_item_input')
# - embeddings
l2_reg = None if l2_reg == 0 else l2(l2_reg)
user_layer = Embedding(n_users, latent_dim, input_length=1,
name='user_embedding', W_regularizer=l2_reg)
# The following embedding parameters will be shared to encode both
# the positive and negative items.
item_layer = Embedding(n_items, latent_dim, input_length=1,
name="item_embedding", W_regularizer=l2_reg)
user_embedding = Flatten()(user_layer(user_input))
positive_item_embedding = Flatten()(item_layer(positive_item_input))
negative_item_embedding = Flatten()(item_layer(negative_item_input))
# - similarity computation between embeddings
positive_similarity = merge([user_embedding, positive_item_embedding],
mode=cos_mode, output_shape=(1,),
name="positive_similarity")
negative_similarity = merge([user_embedding, negative_item_embedding],
mode=cos_mode, output_shape=(1,),
name="negative_similarity")
# The triplet network model, only used for training
triplet_loss = merge([positive_similarity, negative_similarity],
mode=margin_comparator_loss, output_shape=(1,),
name='comparator_loss')
triplet_model = Model(input=[user_input,
positive_item_input,
negative_item_input],
output=triplet_loss)
# The match-score model, only use at inference to rank items for a given
# model: the model weights are shared with the triplet_model therefore
# we do not need to train it and therefore we do not need to plug a loss
# and an optimizer.
match_model = Model(input=[user_input, positive_item_input],
output=positive_similarity)
return triplet_model, match_model
triplet_model, match_model = build_models(n_users, n_items, latent_dim=64,
l2_reg=1e-6)
```
### Exercise:
How many trainable parameters does each model. Count the shared parameters only once per model.
```
# print(match_model.summary())
# print(triplet_model.summary())
# %load solutions/triplet_parameter_count.py
```
## Quality of Ranked Recommendations
Now that we have a randomly initialized model we can start computing random recommendations. To assess their quality we do the following for each user:
- compute matching scores for items (except the movies that the user has already seen in the training set),
- compare to the positive feedback actually collected on the test set using the ROC AUC ranking metric,
- average ROC AUC scores across users to get the average performance of the recommender model on the test set.
```
from sklearn.metrics import roc_auc_score
def average_roc_auc(match_model, data_train, data_test):
"""Compute the ROC AUC for each user and average over users"""
max_user_id = max(data_train['user_id'].max(), data_test['user_id'].max())
max_item_id = max(data_train['item_id'].max(), data_test['item_id'].max())
user_auc_scores = []
for user_id in range(1, max_user_id + 1):
pos_item_train = data_train[data_train['user_id'] == user_id]
pos_item_test = data_test[data_test['user_id'] == user_id]
# Consider all the items already seen in the training set
all_item_ids = np.arange(1, max_item_id + 1)
items_to_rank = np.setdiff1d(all_item_ids, pos_item_train['item_id'].values)
# Ground truth: return 1 for each item positively present in the test set
# and 0 otherwise.
expected = np.in1d(items_to_rank, pos_item_test['item_id'].values)
if np.sum(expected) >= 1:
# At least one positive test value to rank
repeated_user_id = np.empty_like(items_to_rank)
repeated_user_id.fill(user_id)
predicted = match_model.predict([repeated_user_id, items_to_rank],
batch_size=4096)
user_auc_scores.append(roc_auc_score(expected, predicted))
return sum(user_auc_scores) / len(user_auc_scores)
```
By default the model should make predictions that rank the items in random order. The **ROC AUC score** is a ranking score that represents the **expected value of correctly ordering uniformly sampled pairs of recommendations**.
A random (untrained) model should yield 0.50 ROC AUC on average.
```
average_roc_auc(match_model, pos_data_train, pos_data_test)
```
## Training the Triplet Model
Let's now fit the parameters of the model by sampling triplets: for each user, select a movie in the positive feedback set of that user and randomly sample another movie to serve as negative item.
Note that this sampling scheme could be improved by removing items that are marked as positive in the data to remove some label noise. In practice this does not seem to be a problem though.
```
def sample_triplets(pos_data, max_item_id, random_seed=0):
"""Sample negatives at random"""
rng = np.random.RandomState(random_seed)
user_ids = pos_data['user_id'].values
pos_item_ids = pos_data['item_id'].values
neg_item_ids = rng.randint(low=1, high=max_item_id + 1,
size=len(user_ids))
return [user_ids, pos_item_ids, neg_item_ids]
```
Let's train the triplet model:
```
# we plug the identity loss and the a fake target variable ignored by
# the model to be able to use the Keras API to train the triplet model
triplet_model.compile(loss=identity_loss, optimizer="adam")
fake_y = np.ones_like(pos_data_train['user_id'])
n_epochs = 15
for i in range(n_epochs):
# Sample new negatives to build different triplets at each epoch
triplet_inputs = sample_triplets(pos_data_train, max_item_id,
random_seed=i)
# Fit the model incrementally by doing a single pass over the
# sampled triplets.
triplet_model.fit(triplet_inputs, fake_y, shuffle=True,
batch_size=64, nb_epoch=1)
# Monitor the convergence of the model
test_auc = average_roc_auc(match_model, pos_data_train, pos_data_test)
print("Epoch %d/%d: test ROC AUC: %0.4f"
% (i + 1, n_epochs, test_auc))
```
## Training a Deep Matching Model on Implicit Feedback
Instead of using hard-coded cosine similarities to predict the match of a `(user_id, item_id)` pair, we can instead specify a deep neural network based parametrisation of the similarity. The parameters of that matching model are also trained with the margin comparator loss:
<img src="images/rec_archi_implicit_1.svg" style="width: 600px;" />
### Exercise to complete as a home assignment:
- Implement a `deep_match_model`, `deep_triplet_model` pair of models
for the architecture described in the schema. The last layer of
the embedded Multi Layer Perceptron outputs a single scalar that
encodes the similarity between a user and a candidate item.
- Evaluate the resulting model by computing the per-user average
ROC AUC score on the test feedback data.
- Check that the AUC ROC score is close to 0.50 for a randomly
initialized model.
- Check that you can reach at least 0.91 ROC AUC with this deep
model (you might need to adjust the hyperparameters).
Hints:
- it is possible to reuse the code to create embeddings from the previous model
definition;
- the concatenation between user and the positive item embedding can be
obtained with:
```py
positive_embeddings_pair = merge([user_embedding, positive_item_embedding],
mode='concat',
name="positive_embeddings_pair")
negative_embeddings_pair = merge([user_embedding, negative_item_embedding],
mode='concat',
name="negative_embeddings_pair")
```
- those embedding pairs should be fed to a shared MLP instance to compute the similarity scores.
```
from keras.models import Sequential
def make_interaction_mlp(input_dim, n_hidden=1, hidden_size=64,
dropout=0, l2_reg=None):
mlp = Sequential()
# TODO:
return mlp
def build_models(n_users, n_items, user_dim=32, item_dim=64,
n_hidden=1, hidden_size=64, dropout=0, l2_reg=0):
# TODO:
# Inputs and the shared embeddings can be defined as previously.
# Use a single instance of the MLP created by make_interaction_mlp()
# and use it twice: once on the positive pair, once on the negative
# pair
interaction_layers = make_interaction_mlp(
user_dim + item_dim, n_hidden=n_hidden, hidden_size=hidden_size,
dropout=dropout, l2_reg=l2_reg)
# Build the models: one for inference, one for triplet training
deep_match_model = None
deep_triplet_model = None
return deep_match_model, deep_triplet_model
# %load solutions/deep_implicit_feedback_recsys.py
```
### Exercise:
Count the number of parameters in `deep_match_model` and `deep_triplet_model`. Which model has the largest number of parameters?
```
# print(deep_match_model.summary())
# print(deep_triplet_model.summary())
# %load solutions/deep_triplet_parameter_count.py
```
## Possible Extensions
You can implement any of the following ideas if you want to get a deeper understanding of recommender systems.
### Leverage User and Item metadata
As we did for the Explicit Feedback model, it's also possible to extend our models to take additional user and item metadata as side information when computing the match score.
### Better Ranking Metrics
In this notebook we evaluated the quality of the ranked recommendations using the ROC AUC metric. This score reflect the ability of the model to correctly rank any pair of items (sampled uniformly at random among all possible items).
In practice recommender systems will only display a few recommendations to the user (typically 1 to 10). It is typically more informative to use an evaluatio metric that characterize the quality of the top ranked items and attribute less or no importance to items that are not good recommendations for a specific users. Popular ranking metrics therefore include the **Precision at k** and the **Mean Average Precision**.
You can read up online about those metrics and try to implement them here.
### Hard Negatives Sampling
In this experiment we sampled negative items uniformly at random. However, after training the model for a while, it is possible that the vast majority of sampled negatives have a similarity already much lower than the positive pair and that the margin comparator loss sets the majority of the gradients to zero effectively wasting a lot of computation.
Given the current state of the recsys model we could sample harder negatives with a larger likelihood to train the model better closer to its decision boundary. This strategy is implemented in the WARP loss [1].
The main drawback of hard negative sampling is increasing the risk of sever overfitting if a significant fraction of the labels are noisy.
### Factorization Machines
A very popular recommender systems model is called Factorization Machines [2][3]. They two use low rank vector representations of the inputs but they do not use a cosine similarity or a neural network to model user/item compatibility.
It is be possible to adapt our previous code written with Keras to replace the cosine sims / MLP with the low rank FM quadratic interactions by reading through [this gentle introduction](http://tech.adroll.com/blog/data-science/2015/08/25/factorization-machines.html).
If you choose to do so, you can compare the quality of the predictions with those obtained by the [pywFM project](https://github.com/jfloff/pywFM) which provides a Python wrapper for the [official libFM C++ implementation](http://www.libfm.org/). Maciej Kula also maintains a [lighfm](http://www.libfm.org/) that implements an efficient and well documented variant in Cython and Python.
## References:
[1] Wsabie: Scaling Up To Large Vocabulary Image Annotation
Jason Weston, Samy Bengio, Nicolas Usunier, 2011
https://research.google.com/pubs/pub37180.html
[2] Factorization Machines, Steffen Rendle, 2010
https://www.ismll.uni-hildesheim.de/pub/pdfs/Rendle2010FM.pdf
[3] Factorization Machines with libFM, Steffen Rendle, 2012
in ACM Trans. Intell. Syst. Technol., 3(3), May.
http://doi.acm.org/10.1145/2168752.2168771
| github_jupyter |
# Discrete Bayes Animations
```
from __future__ import division, print_function
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'..') # allow us to format the book
sys.path.insert(0,'../code')
import book_format
book_format.load_style(directory='..')
```
This notebook creates the animations for the Discrete Bayesian filters chapter. It is not really intended to be a readable part of the book, but of course you are free to look at the source code, and even modify it. However, if you are interested in running your own animations, I'll point you to the examples subdirectory of the book, which contains a number of python scripts that you can run and modify from an IDE or the command line. This module saves the animations to GIF files, which is quite slow and not very interactive.
```
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
from book_plots import bar_plot
%matplotlib inline
# the predict algorithm of the discrete bayesian filter
def predict(pos, move, p_correct, p_under, p_over):
n = len(pos)
result = np.array(pos, dtype=float)
for i in range(n):
result[i] = \
pos[(i-move) % n] * p_correct + \
pos[(i-move-1) % n] * p_over + \
pos[(i-move+1) % n] * p_under
return result
def normalize(p):
s = sum(p)
for i in range (len(p)):
p[i] = p[i] / s
# the update algorithm of the discrete bayesian filter
def update(pos, measure, p_hit, p_miss):
q = np.array(pos, dtype=float)
for i in range(len(hallway)):
if hallway[i] == measure:
q[i] = pos[i] * p_hit
else:
q[i] = pos[i] * p_miss
normalize(q)
return q
import matplotlib
# make sure our matplotlibrc has been edited to use imagemagick
matplotlib.matplotlib_fname()
matplotlib.rcParams['animation.writer']
from gif_animate import animate
pos = [1.0,0,0,0,0,0,0,0,0,0]
def bar_animate(nframe):
global pos
bar_plot(pos)
plt.title('Step {}'.format(nframe + 1))
pos = predict(pos, 1, .8, .1, .1)
for i in range(10):
bar_animate(i)
fig = plt.figure(figsize=(6.5, 2.5))
animate('02_no_info.gif', bar_animate, fig=fig, frames=75, interval=75);
```
<img src="02_no_info.gif">
```
pos = np.array([.1]*10)
hallway = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])
def bar_animate(nframe):
global pos
#if nframe == 0:
# return
bar_plot(pos, ylim=(0,1.0))
plt.title('Step {}'.format(nframe + 1))
if nframe % 2 == 0:
pos = predict(pos, 1, .9, .05, .05)
else:
x = int((nframe/2) % len(hallway))
z = hallway[x]
pos = update(pos, z, .9, .2)
fig = plt.figure(figsize=(6.5, 2.5))
animate('02_simulate.gif', bar_animate, fig=fig, frames=40, interval=85);
```
<img src="02_simulate.gif">
| github_jupyter |
# nn explain
nn has two main parts : data and model components
containers are responsible for model components and parameters/buffers are responsible for model data
containers : Module, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict for module construction
parameters : parameter(...) for model training
buffers : parameter(...) for model aux
```
import torch
import torch.nn as nn
```
## 0.parameters and buffers
**parameter is just tensor with requires_grad=True and have their own space in model.parameters() and model ordered list**
**buffer is just tensor with requires_grad=True/False and have their own space in model.buffers() and model ordered list**
In one model,
parameter needs to backward and be updated by optimizer.step
buffer needs to be used in backward but not be updated by optimizer.step
both of these data are responsible for the whole module, thus they would be saved by model.state_dict() in form of OrderDict. Moreover, they would be loaded by model.load_state_dict(...)
nn.Parameter(...) should be used in the __init__ function in order to have init para at the first place.
```
class test(nn.Module):
def __init__(self):
super(test, self).__init__()
self.a = nn.Parameter(torch.randn(4,4))
self.linear = nn.Linear(4,5)
self.tensor_test = torch.rand((1,1), requires_grad=True)
print("Not added in nn.Module parameters : {}".format(self.tensor_test))
model = test()
print(model)
for para in model.parameters():
print(para)
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# fist way to have a parameter
# self.x = nn.Parameter(...) directly add one var into OrderDict
self.param1 = nn.Parameter(torch.tensor(1.))
# second way to have a parameter
# x = nn.Parameter(...) and self.register_parameter() in order to add normal parameter into OrderDict
param2 = nn.Parameter(torch.tensor(2.))
self.register_parameter('param2', param2)
# the only way to have buffer
# self.register_buffer in order to add normal tensor into OrderDict
buff = torch.tensor(3.)
self.register_buffer('buffer', buff)
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
x = self.param1
y = self.param2
z = torch.mm(x,y)
return z
model = MyModule()
print("=====para=====")
for para in model.parameters():
print(para)
print("=====buff=====")
for buff in model.buffers():
print(buff)
print("=====orderlist=====")
print(model.state_dict())
print("=====save&load=====")
# save model and load
PATH = './MyModule_dict'
torch.save(model.state_dict(), PATH)
model2 = MyModule()
model2.load_state_dict(torch.load(PATH))
print(model2.state_dict())
```
## 1. containers include Module, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
Among them, nn.Module is the father class and the five following classes should be put under nn.Module class.
These containers can be used for adding module components.
**It is quite important to notice that nn supports nesting. Once there is one class from nn.Module, any nn.Linear or other nn.Module defined inside the nn.Module woulde automatically added to the whole nn.Module.**
```
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
model = MyModule()
for para in model.parameters():
print(para)
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
# torch.mm() a@b
# torch.mul() a*b
x = self.params[choice].mm(x)
return x
model = MyModule()
model(torch.ones((10,10)), 'left')
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
model = MyModule()
model(torch.ones((10,10,3,3)), 'conv', 'prelu')
```
## 2.difference between nn.Sequential and nn.Modulelist
both of them are subclasses of containers in torch.nn
The sequential class stores sequential list.
```
class seq_net(nn.Module):
def __init__(self):
super(seq_net, self).__init__()
self.seq = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
def forward(self, x):
return self.seq(x)
model = seq_net()
print(model)
```
The ModuleList can be used as list, all elements can be used as elements in the list, but the modules in the list are registered automatically to the whole net and the parameters are automatically put on the whole nn.Module model.
```
class modlist_net(nn.Module):
def __init__(self):
super(modlist_net, self).__init__()
self.modlist = nn.ModuleList([
nn.Conv2d(1, 20, 5),
nn.ReLU(),
nn.Conv2d(20, 64, 5),
nn.ReLU()
])
def forward(self, x):
for m in self.modlist:
x = m(x)
return x
model = modlist_net()
print(model)
```
Diff 1 : nn.ModuleList has no forward functions but nn.Sequential has default forward functions
Diff 2 : nn.Sequential can be named using OrderedDict but nn.ModuleList cannot.
```
from collections import OrderedDict
class seq_net(nn.Module):
def __init__(self):
super(seq_net, self).__init__()
self.seq = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
def forward(self, x):
return self.seq(x)
model = seq_net()
print(model)
```
Diff 3 : module in nn.ModuleList has no order, we can put modules in casual order.
Diff 4 : we can use "for" for duplicate modules in nn.ModuleList.
```
class modlist_net(nn.Module):
def __init__(self):
super(modlist_net, self).__init__()
self.modlist = nn.ModuleList([nn.Linear(10,10) for i in range(10)]
)
def forward(self, x):
for m in self.modlist:
x = m(x)
return x
model = modlist_net()
print(model)
```
## 3. Other APIs for nn.Module base class
collect other APIs not mentioned in the above.
train : effect Dropout & BatchNorm layers
eval : effect Dropout & BatchNorm layers ---> equivalent to self.train(false)
requires_grad_ : change if autograd should record operations on parameters
register_forward_pre_hook : be called every time before forward() is invoked
register_forward_hook : be called every time when forward() is invoked
named_parameters / named_buffers / named_modules / named_children
parameters / buffers / modules / children
add_module
apply
```
# when it comes to tensor we use requires_grad_() or requires_grad = False
x = torch.rand((4,4))
x.requires_grad_(False)
x.requires_grad = False
print(x)
# when it comes to nn.Module we use requires_grad_() or requires_grad = False
# this can be used for freezing parameters when fine tuning
# because the grad would not be changed when passing requires_grad_(False) layers
# ========= QUITE IMPORTANT ============
# since the grad in y = None, we just skip the whole step altogether
y = nn.Linear(2,2)
y.requires_grad_(False)
# or
y.requires_grad = False
print(y)
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# nn.Parameter actually transform the torch.tensor(requires_grad=True)
# --> torch.tensor(requires_grad=True) and add this parameter into the orderedlist of nn.Module
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
model = MyModule()
x = model(torch.ones((10,10)))
model.requires_grad_(False)
loss = torch.sum(x)
loss.backward()
```
| github_jupyter |
## First day: list comprehensions and generators
> List comprehensions and generators are in my top 5 favorite Python features leading to clean, robust and Pythonic code.
```
from collections import Counter
import calendar
import itertools
import random
import re
import string
import requests
```
### List comprehensions
Let's dive straight into a practical example. We all know how to use the classic for loop in Python, say I want to loop through a bunch of names title-casing each one:
```
names = 'pybites mike bob julian tim sara guido'.split()
names
for name in names:
print(name.title())
```
Then I want to only keep the names that start with A-M, the `strings` module makes it easier (we love Python's standard library!):
```
first_half_alphabet = list(string.ascii_lowercase)[:13]
first_half_alphabet
new_names = []
for name in names:
if name[0] in first_half_alphabet:
new_names.append(name.title())
new_names
```
Feels verbose, not?
If you don't know about list comprehensions you might start using them everywhere after seeing the next refactoring:
```
new_names2 = [name.title() for name in names if name[0] in first_half_alphabet]
new_names2
assert new_names == new_names2
```
From 4 to 1 lines of code, and it reads pretty well too. That's why we love and stick with Python!
Here is another example I used recently to do a most common word count on Harry Potter. I used some list comprehensions to clean up the words before counting them:
```
resp = requests.get('http://projects.bobbelderbos.com/pcc/harry.txt')
words = resp.text.lower().split()
words[:5]
cnt = Counter(words)
cnt.most_common(5)
```
Hmm should not count stopwords, also:
```
'-' in words
```
Let's first clean up any non-alphabetic characters:
```
words = [re.sub(r'\W+', r'', word) for word in words]
'-' in words
'the' in words
```
Ok let's filter those stopwords out plus the empty strings caussed by the previous list comprehension:
```
resp = requests.get('http://projects.bobbelderbos.com/pcc/stopwords.txt')
stopwords = resp.text.lower().split()
stopwords[:5]
words = [word for word in words if word.strip() and word not in stopwords]
words[:5]
'the' in words
```
Now it looks way better:
```
cnt = Counter(words)
cnt.most_common(5)
```
What's interesting here is that the first bit of the list comprehension can be an expression like `re.sub`. The final bit can be a compound statement: here we checked for a non-empty word (' ' -> `strip()` -> '' = `False` in Python) `and` we checked `word not in stopwords`.
Again, a lot is going on in one line of code, but the beauty of it is that it is totally fine, because it reads like plain English :)
### Generators
A generator is a function that returns an iterator. It generates values using the `yield` keyword, when called with next() (a for loop does this implicitly), and it raises a `StopIteration` exception when there are no more values to generate. Let's see what this means with a very simple example:
```
def num_gen():
for i in range(5):
yield i
gen = num_gen()
next(gen)
# note it takes off where we left it last statement
for i in gen:
print(i)
# no more values to generate
next(gen)
# for catches the exception for us
for i in gen:
print(i)
```
> The `StopIteration` error appears because there are no more yield statements in the function. Calling next on the generator after this does not cause it to loop over and start again. - [Generators are Awesome, Learning by Example
](https://pybit.es/generators.html)
Since learning about generators, a common pattern I use is to build up my sequences:
```
options = 'red yellow blue white black green purple'.split()
options
```
My older code:
```
def create_select_options(options=options):
select_list = []
for option in options:
select_list.append(f'<option value={option}>{option.title()}</option>')
return select_list
select_list_new = [f'<option value={option}>{option.title()}</option>' for option in options]
assert create_select_options(options) == select_list_new
from pprint import pprint as pp
pp(create_select_options())
```
Using a generator you can write this in 2 lines of code - my newer code:
```
def create_select_options_gen(options=options):
for option in options:
yield f'<option value={option}>{option.title()}</option>'
print(create_select_options_gen())
```
Note that generators are _lazy_ so you need to explicitly consume them by iterating over them, for example by looping over them. Another way is to pass them into the `list()` constructor:
```
list(create_select_options_gen())
```
Specially when working with large data sets you definitely want to use generators. Lists can only get as big as they fit memory size. Generators are lazily evaluated meaning that they only hold a certain amount of data in memory at once. Just for the sake of giving Python something to do, let's calculate leap years for a million years, and compare performance of list vs generator:
```
# list
def leap_years_lst(n=1000000):
leap_years = []
for year in range(1, n+1):
if calendar.isleap(year):
leap_years.append(year)
return leap_years
# generator
def leap_years_gen(n=1000000):
for year in range(1, n+1):
if calendar.isleap(year):
yield year
```
PRO tip: [since Python 3.3](https://docs.python.org/3/whatsnew/3.3.html) you can use the `yield from` syntax.
```
# this had me waiting for a few seconds
%timeit -n1 leap_years_lst()
# this was instant
%timeit -n1 leap_years_gen()
```
That is pretty impressive. This is an important concept to know about because Big Data is here to stay!
## Second day: practice
Look at your code and see if you can refactor it to use list comprehensions. Same for generators. Are you building up a list somewhere where you could potentially use a generator?
And/or exercise here, take this list of names:
```
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
```
Can you write a simple list comprehension to convert these names to title case (brad pitt -> Brad Pitt). Or reverse the first and last name?
Then use this same list and make a little generator, for example to randomly return a pair of names, try to make this work:
pairs = gen_pairs()
for _ in range(10):
next(pairs)
Should print (values might change as random):
Arnold teams up with Brad
Alec teams up with Julian
Have fun!
## Third day: solution / simulate unix pipelines
I hope yesterday's exercise was reasonably doable for you. Here are the answers in case you got stuck:
```
# list comprehension to title case names
[name.title() for name in NAMES]
# list comprehension to reverse first and last names
# using a helper here to show you that list comprehensions can be passed in functions!
def reverse_first_last_names(name):
first, last = name.split()
# ' '.join([last, first]) -- wait we have f-strings now (>= 3.6)
return f'{last} {first}'
[reverse_first_last_names(name) for name in NAMES]
def gen_pairs():
# again a list comprehension is great here to get the first names
# and title case them in just 1 line of code (this comment took 2)
first_names = [name.split()[0].title() for name in NAMES]
while True:
# added this when I saw Julian teaming up with Julian (always test your code!)
first, second = None, None
while first == second:
first, second = random.sample(first_names, 2)
yield f'{first} teams up with {second}'
pairs = gen_pairs()
for _ in range(10):
print(next(pairs))
```
Another way to get a slice of a generator is using `itertools.islice`:
```
first_ten = itertools.islice(pairs, 10)
first_ten
list(first_ten)
```
### Further practice
Read up on set and dict comprehensions, then try these two Bites:
- [Bite 5. Parse a list of names](https://codechalleng.es/bites/5/) (use a set comprehension in first function)
- [Bite 26. Dictionary comprehensions are awesome](https://codechalleng.es/bites/promo/awesome-dict-comprehensions)
Here is a more advanced generators exercise you can try: [Code Challenge 11 - Generators for Fun and Profit](https://codechalleng.es/challenges/11/)
### Time to share what you've accomplished!
Be sure to share your last couple of days work on Twitter or Facebook. Use the hashtag **#100DaysOfCode**.
Here are [some examples](https://twitter.com/search?q=%23100DaysOfCode) to inspire you. Consider including [@talkpython](https://twitter.com/talkpython) and [@pybites](https://twitter.com/pybites) in your tweets.
*See a mistake in these instructions? Please [submit a new issue](https://github.com/talkpython/100daysofcode-with-python-course/issues) or fix it and [submit a PR](https://github.com/talkpython/100daysofcode-with-python-course/pulls).*
| github_jupyter |
# 7. Alfven operator
```
from numpy import linspace, meshgrid, pi, zeros, asarray
from scipy.linalg import eig
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import Math
from sympy.core.containers import Tuple
from sympy import symbols
from sympy import Symbol
from sympy import Lambda
from sympy import IndexedBase
from gelato.glt import glt_symbol
from gelato.glt import glt_lambdify
from gelato.glt import glt_approximate_eigenvalues
from gelato.printing.latex import glt_latex
from gelato.calculus import (Dot, Cross, Grad, Curl, Rot, Div)
from gelato.calculus import Constant
from gelato.fem.assembly import assemble_matrix
from gelato.fem.utils import compile_kernel
from gelato.fem.utils import compile_symbol
from spl.fem.splines import SplineSpace
from spl.fem.tensor import TensorFemSpace
from spl.fem.vector import VectorFemSpace
x,y,z = symbols('x y z')
u = IndexedBase('u')
v = IndexedBase('v')
bx = Constant('bx')
by = Constant('by')
bz = Constant('bz')
b = Tuple(bx, by, bz)
c0,c1,c2 = symbols('c0 c1 c2')
a = Lambda((x,y,z,v,u), ( c0 * Dot(u, v)
+ c1 * Div(u) * Div(v)
+ c2 * Dot(Curl(Cross(b,u)), Curl(Cross(b,v)))))
# ... create a finite element space
p1 = 2 ; p2 = 2 ; p3 = 2
ne1 = 8 ; ne2 = 8 ; ne3 = 8
grid_1 = linspace(0., 1., ne1+1)
grid_2 = linspace(0., 1., ne2+1)
grid_3 = linspace(0., 1., ne3+1)
V1 = SplineSpace(p1, grid=grid_1)
V2 = SplineSpace(p2, grid=grid_2)
V3 = SplineSpace(p3, grid=grid_3)
W = TensorFemSpace(V1, V2, V3)
# ...
V = VectorFemSpace(W, W, W)
from IPython.display import Math
from gelato.printing.latex import glt_latex
from sympy import latex
symbol = glt_symbol(a, space=V, evaluate=False)
Math(latex(symbol))
#Math(glt_latex(symbol))
d_types = {'c0': 'double', 'c1': 'double', 'c2': 'double',
'bx': 'double', 'by': 'double', 'bz': 'double'}
kernel = compile_kernel('kernel_alfven', a, V,
d_args=d_types,
backend='fortran')
symbol_f90 = compile_symbol('symbol_alfven', a, V,
d_args=d_types,
backend='fortran')
d_args = {'c0': 1.e-12, 'c1': 1.e-4, 'c2': 1.e-2,
'bx': 0., 'by': 0., 'bz': 1.}
M = assemble_matrix(V, kernel, args=d_args)
from utils import coo_from_blocks
n_block_rows = 3 ; n_block_cols = 3
matrices = []
for ir in range(0, n_block_rows):
line = []
for ic in range(0, n_block_cols):
coo = M[ir][ic].tocoo()
coo.eliminate_zeros()
line.append(coo)
matrices.append(line)
_M = M
M = coo_from_blocks(matrices,
n_block_rows=n_block_rows,
n_block_cols=n_block_cols)
# import eigenvalue solver
from scipy.linalg import eig
M = M.todense()
w, v = eig(M)
wr = w.real
wr.sort()
# ... example of symbol evaluation
n1 = V1.nbasis
n2 = V2.nbasis
n3 = V3.nbasis
t1 = linspace(-pi,pi, n1)
t2 = linspace(-pi,pi, n2)
t3 = linspace(-pi,pi, n3)
x1 = linspace(0.,1., n1)
x2 = linspace(0.,1., n2)
x3 = linspace(0.,1., n3)
e = zeros((3, 3, n1, n2, n3), order='F')
symbol_f90(x1,x2,x3,t1,t2,t3, e,
d_args['bx'], d_args['by'], d_args['bz'],
d_args['c0'], d_args['c1'], d_args['c2'])
# ...
W = []
for i1 in range(0, n1):
for i2 in range(0, n2):
for i3 in range(0, n3):
mat = e[:,:,i1,i2,i3]
w,v = eig(mat)
W += list(w.real)
t = asarray(W)
t.sort()
plt.plot(t, "+b", label="glt symbol")
plt.plot(wr, "xr", label="eigenvalues")
plt.legend(loc=2);
from IPython.core.display import HTML
def css_styling():
styles = open("../../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
| github_jupyter |
# Contextual Bandit Content Personalization
In the Contextual Bandit(CB) introduction tutorial, we learnt about CB and different CB algorithms. In this tutorial we will simulate the scenario of personalizing news content on a site, using CB, to users. The goal is to maximize user engagement quantified by measuring click through rate (CTR).
Let's recall that in a CB setting, a data point has four components,
- Context
- Action
- Probability of choosing action
- Reward/cost for chosen action
In our simulator, we will need to generate a context, get an action/decision for the given context and also simulate generating a reward.
In our simulator, our goal is to maximize reward (click through rate/CTR) or minimize loss (-CTR)
- We have two website visitors: 'Tom' and 'Anna'
- Each of them may visit the website either in the morning or in the afternoon
The **context** is therefore (user, time_of_day)
We have the option of recommending a variety of articles to Tom and Anna. Therefore, **actions** are the different choices of articles: "politics", "sports", "music", "food", "finance", "health", "cheese"
The **reward** is whether they click on the article or not: 'click' or 'no click'
Let's first start with importing the necessary packages:
```
import vowpalwabbit
import random
import matplotlib.pyplot as plt
import pandas as pd
import itertools
```
## Simulate reward
In the real world, we will have to learn Tom and Anna's preferences for articles as we observe their interactions. Since this is a simulation, we will have to define Tom and Anna's preference profile. The reward that we provide to the learner will follow this preference profile. Our hope is to see if the learner can take better and better decisions as we see more samples which in turn means we are maximizing the reward.
We will also modify the reward function in a few different ways and see if the CB learner picks up the changes. We will compare the CTR with and without learning.
VW optimizes to minimize **cost which is negative of reward**. Therefore, we will always pass negative of reward as cost to VW.
```
# VW tries to minimize loss/cost, therefore we will pass cost as -reward
USER_LIKED_ARTICLE = -1.0
USER_DISLIKED_ARTICLE = 0.0
```
The reward function below specifies that Tom likes politics in the morning and music in the afternoon whereas Anna likes sports in the morning and politics in the afternoon. It looks dense but we are just simulating our hypothetical world in the format of the feedback the learner understands: cost. If the learner recommends an article that aligns with the reward function, we give a positive reward. In our simulated world this is a click.
```
def get_cost(context,action):
if context['user'] == "Tom":
if context['time_of_day'] == "morning" and action == 'politics':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'music':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
elif context['user'] == "Anna":
if context['time_of_day'] == "morning" and action == 'sports':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'politics':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
```
## Understanding VW format
There are some things we need to do to get our input into a format VW understands. This function handles converting from our context as a dictionary, list of articles and the cost if there is one into the text format VW understands.
```
# This function modifies (context, action, cost, probability) to VW friendly format
def to_vw_example_format(context, actions, cb_label = None):
if cb_label is not None:
chosen_action, cost, prob = cb_label
example_string = ""
example_string += "shared |User user={} time_of_day={}\n".format(context["user"], context["time_of_day"])
for action in actions:
if cb_label is not None and action == chosen_action:
example_string += "0:{}:{} ".format(cost, prob)
example_string += "|Action article={} \n".format(action)
#Strip the last newline
return example_string[:-1]
```
To understand what's going on here let's go through an example. Here, it's the morning and the user is Tom. There are four possible articles. So in the VW format there is one line that starts with shared, this is the shared context, followed by four lines each corresponding to an article.
```
context = {"user":"Tom","time_of_day":"morning"}
actions = ["politics", "sports", "music", "food"]
print(to_vw_example_format(context,actions))
```
## Getting a decision
When we call VW we get a _pmf_, [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function), as the output. Since we are incorporating exploration into our strategy, VW will give us a list of probabilities over the set of actions. This means that the probability at a given index in the list corresponds to the likelihood of picking that specific action. In order to arrive at a decision/action, we will have to sample from this list.
So, given a list `[0.7, 0.1, 0.1, 0.1]`, we would choose the first item with a 70% chance. `sample_custom_pmf` takes such a list and gives us the index it chose and what the probability of choosing that index was.
```
def sample_custom_pmf(pmf):
total = sum(pmf)
scale = 1/total
pmf = [x * scale for x in pmf]
draw = random.random()
sum_prob = 0.0
for index, prob in enumerate(pmf):
sum_prob += prob
if(sum_prob > draw):
return index, prob
```
We have all of the information we need to choose an action for a specific user and context. To use VW to achieve this, we will do the following:
1. We convert our context and actions into the text format we need
2. We pass this example to vw and get the pmf out
3. Now, we sample this pmf to get what article we will end up showing
4. Finally we return the article chosen, and the probability of choosing it (we are going to need the probability when we learn form this example)
```
def get_action(vw, context, actions):
vw_text_example = to_vw_example_format(context,actions)
pmf = vw.predict(vw_text_example)
chosen_action_index, prob = sample_custom_pmf(pmf)
return actions[chosen_action_index], prob
```
## Simulation set up
Now that we have done all of the setup work and know how to interface with VW, let's simulate the world of Tom and Anna. The scenario is they go to a website and are shown an article. Remember that the reward function allows us to define the worlds reaction to what VW recommends.
We will choose between Tom and Anna uniformly at random and also choose their time of visit uniformly at random. You can think of this as us tossing a coin to choose between Tom and Anna (Anna if heads and Tom if tails) and another coin toss for choosing time of day.
```
users = ['Tom', 'Anna']
times_of_day = ['morning', 'afternoon']
actions = ["politics", "sports", "music", "food", "finance", "health", "camping"]
def choose_user(users):
return random.choice(users)
def choose_time_of_day(times_of_day):
return random.choice(times_of_day)
# display preference matrix
def get_preference_matrix(cost_fun):
def expand_grid(data_dict):
rows = itertools.product(*data_dict.values())
return pd.DataFrame.from_records(rows, columns=data_dict.keys())
df = expand_grid({'users':users, 'times_of_day': times_of_day, 'actions': actions})
df['cost'] = df.apply(lambda r: cost_fun({'user': r[0], 'time_of_day': r[1]}, r[2]), axis=1)
return df.pivot_table(index=['users', 'times_of_day'],
columns='actions',
values='cost')
get_preference_matrix(get_cost)
```
We will instantiate a CB learner in VW and then simulate Tom and Anna's website visits `num_iterations` number of times. In each visit, we:
1. Decide between Tom and Anna
2. Decide time of day
3. Pass context i.e. (user, time of day) to learner to get action i.e. article recommendation and probability of choosing action
4. Receive reward i.e. see if user clicked or not. Remember that cost is just negative reward.
5. Format context, action, probability, reward in VW format
6. Learn from the example
- VW _reduces_ a CB problem to a cost sensitive multiclass classification problem.
This is the same for every one of our simulations, so we define the process in the `run_simulation` function. The cost function must be supplied as this is essentially us simulating how the world works.
```
def run_simulation(vw, num_iterations, users, times_of_day, actions, cost_function, do_learn = True):
cost_sum = 0.
ctr = []
for i in range(1, num_iterations+1):
# 1. In each simulation choose a user
user = choose_user(users)
# 2. Choose time of day for a given user
time_of_day = choose_time_of_day(times_of_day)
# 3. Pass context to vw to get an action
context = {'user': user, 'time_of_day': time_of_day}
action, prob = get_action(vw, context, actions)
# 4. Get cost of the action we chose
cost = cost_function(context, action)
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
vw_format = vw.parse(to_vw_example_format(context, actions, (action, cost, prob)), vowpalwabbit.LabelType.CONTEXTUAL_BANDIT)
# 6. Learn
vw.learn(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
ctr.append(-1*cost_sum/i)
return ctr
```
We want to be able to visualize what is occurring, so we are going to plot the click through rate over each iteration of the simulation. If VW is showing actions the get rewards the ctr will be higher. Below is a little utility function to make showing the plot easier.
```
def plot_ctr(num_iterations, ctr):
plt.plot(range(1,num_iterations+1), ctr)
plt.xlabel('num_iterations', fontsize=14)
plt.ylabel('ctr', fontsize=14)
plt.ylim([0,1])
```
## Scenario 1
We will use the first reward function `get_cost` and assume that Tom and Anna do not change their preferences over time and see what happens to user engagement as we learn. We will also see what happens when there is no learning. We will use the "no learning" case as our baseline to compare to.
### With learning
```
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations = 5000
ctr = run_simulation(vw, num_iterations, users, times_of_day, actions, get_cost)
plot_ctr(num_iterations, ctr)
```
#### Aside: interactions
You'll notice in the arguments we supply to VW, **we include `-q UA`**. This is telling VW to create additional features which are the features in the (U)ser namespace and (A)ction namespaces multiplied together. This allows us to learn the interaction between when certain actions are good in certain times of days and for particular users. If we didn't do that, the learning wouldn't really work. We can see that in action below.
```
# Instantiate learner in VW but without -q
vw = vowpalwabbit.Workspace("--cb_explore_adf --quiet --epsilon 0.2")
num_iterations = 5000
ctr = run_simulation(vw, num_iterations, users, times_of_day, actions, get_cost)
plot_ctr(num_iterations, ctr)
```
### Without learning
Let's do the same thing again (with `-q`) but with do_learn set to False to show the effect if we don't learn from the rewards. The ctr never improves are we just hover around 0.2.
```
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations = 5000
ctr = run_simulation(vw, num_iterations, users, times_of_day, actions, get_cost, do_learn=False)
plot_ctr(num_iterations, ctr)
```
## Scenario 2
In the real world people's preferences change over time. So now in the simulation we are going to incorporate two different cost functions, and swap over to the second one halfway through. Below is a a table of the new reward function we are going to use, `get_cost_1`:
### Tom
| | `get_cost` | `get_cost_new1` |
|:---|:---:|:---:|
| **Morning** | Politics | Politics |
| **Afternoon** | Music | Sports |
### Anna
| | `get_cost` | `get_cost_new1` |
|:---|:---:|:---:|
| **Morning** | Sports | Sports |
| **Afternoon** | Politics | Sports |
This reward function is still working with actions that the learner has seen previously.
```
def get_cost_new1(context,action):
if context['user'] == "Tom":
if context['time_of_day'] == "morning" and action == 'politics':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'sports':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
elif context['user'] == "Anna":
if context['time_of_day'] == "morning" and action == 'sports':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'sports':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
get_preference_matrix(get_cost_new1)
```
To make it easy to show the effect of the cost function changing we are going to modify the `run_simulation` function. It is a little less readable now, but it supports accepting a list of cost functions and it will operate over each cost function in turn. This is perfect for what we need.
```
def run_simulation_multiple_cost_functions(vw, num_iterations, users, times_of_day, actions, cost_functions, do_learn = True):
cost_sum = 0.
ctr = []
start_counter = 1
end_counter = start_counter + num_iterations
for cost_function in cost_functions:
for i in range(start_counter, end_counter):
# 1. in each simulation choose a user
user = choose_user(users)
# 2. choose time of day for a given user
time_of_day = choose_time_of_day(times_of_day)
# Construct context based on chosen user and time of day
context = {'user': user, 'time_of_day': time_of_day}
# 3. Use the get_action function we defined earlier
action, prob = get_action(vw, context, actions)
# 4. Get cost of the action we chose
cost = cost_function(context, action)
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
vw_format = vw.parse(to_vw_example_format(context, actions, (action, cost, prob)), vowpalwabbit.LabelType.CONTEXTUAL_BANDIT)
# 6. Learn
vw.learn(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
ctr.append(-1*cost_sum/i)
start_counter = end_counter
end_counter = start_counter + num_iterations
def run_simulation_multiple_cost_functions(vw, num_iterations, users, times_of_day, actions, cost_functions, do_learn = True):
cost_sum = 0.
ctr = []
start_counter = 1
end_counter = start_counter + num_iterations
for cost_function in cost_functions:
for i in range(start_counter, end_counter):
# 1. in each simulation choose a user
user = choose_user(users)
# 2. choose time of day for a given user
time_of_day = choose_time_of_day(times_of_day)
# Construct context based on chosen user and time of day
context = {'user': user, 'time_of_day': time_of_day}
# 3. Use the get_action function we defined earlier
action, prob = get_action(vw, context, actions)
# 4. Get cost of the action we chose
cost = cost_function(context, action)
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
vw_format = vw.parse(to_vw_example_format(context, actions, (action, cost, prob)), vowpalwabbit.LabelType.CONTEXTUAL_BANDIT)
# 6. Learn
vw.learn(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
ctr.append(-1*cost_sum/i)
start_counter = end_counter
end_counter = start_counter + num_iterations
return ctr
```
### With learning
Let us now switch to the second reward function after a few samples (running the first reward function). Recall that this reward function changes the preferences of the web users but it is still working with the same action space as before. We should see the learner pick up these changes and optimize towards the new preferences.
```
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_new1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, users, times_of_day, actions, cost_functions)
plot_ctr(total_iterations, ctr)
```
**Note:** The initial spike in CTR depends on the rewards received for the first few examples. When you run on your own, you may see something different initially because our simulator is designed to have randomness.
### Without learning
```
# Do not learn
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_new1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, users, times_of_day, actions, cost_functions, do_learn=False)
plot_ctr(total_iterations, ctr)
```
## Scenario 3
In this scenario we are going to start rewarding actions that have never seen a reward previously when we change the cost function.
### Tom
| | `get_cost` | `get_cost_new2` |
|:---|:---:|:---:|
| **Morning** | Politics | Politics|
| **Afternoon** | Music | Food |
### Anna
| | `get_cost` | `get_cost_new2` |
|:---|:---:|:---:|
| **Morning** | Sports | Food|
| **Afternoon** | Politics | Food |
```
def get_cost_new2(context,action):
if context['user'] == "Tom":
if context['time_of_day'] == "morning" and action == 'politics':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'food':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
elif context['user'] == "Anna":
if context['time_of_day'] == "morning" and action == 'food':
return USER_LIKED_ARTICLE
elif context['time_of_day'] == "afternoon" and action == 'food':
return USER_LIKED_ARTICLE
else:
return USER_DISLIKED_ARTICLE
```
### With learning
Let us now switch to the third reward function after a few samples (running the first reward function). Recall that this reward function changes the preferences of the users and is working with a **different** action space than before. We should see the learner pick up these changes and optimize towards the new preferences
```
# use first reward function initially and then switch to third reward function
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_new2]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, users, times_of_day, actions, cost_functions)
plot_ctr(total_iterations, ctr)
```
### Without Learning
```
# Do not learn
# use first reward function initially and then switch to third reward function
# Instantiate learner in VW
vw = vowpalwabbit.Workspace("--cb_explore_adf -q UA --quiet --epsilon 0.2")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_new2]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, users, times_of_day, actions, cost_functions, do_learn=False)
plot_ctr(total_iterations, ctr)
```
## Summary
This tutorial aimed at showcasing a real world scenario where contextual bandit algorithms can be used. We were able to take a context and set of actions and learn what actions worked best for a given context. We saw that the learner was able to respond rapidly to changes in the world. We showed that allowing the learner to interact with the world resulted in higher rewards than the no learning baseline.
This tutorial worked with simplistic features. VW supports high dimensional sparse features, different exploration algorithms and policy evaluation approaches.
| github_jupyter |
# 01 Intro
- Introduction to Data Visualization
- Introduction to Matplotlib
- Basic Plotting with Matplotlib
- Dataset on Immigration to Canada
- Line Plots
# Introduction to Data Visualization
## Data visualization
> a way to show a
complex data in a form that is graphical and easy to understand.
>Transforming a visual into one which is more effective, attractive and impactive
## Why Build visuals
- For exploratory data analysis
- Communicate data clearly
- Share unbiased representation of data
- Support recommendations to different stakeholders
## Best Practices
### 3 Key points when creating a visual
1. Less is more effective.
2. Less is more attractive.
3. Less is more impactive.
Any feature or design you incorporate in your plot to make it more attractive or
pleasing should support the message that the plot is meant to get across and not
distract from it.
Simple, cleaner, less distracting, and much
easier to read graphs.
Bar graphs and charts are argued to be far
superior ways to quickly get a message across.
Reinforce the concept of less is more effective, attractive, and impactive.
# Introduction to Matplotlib
## Architecture of Matplotlib
Matplotlib's architecture is composed of
three main layers:
1. Back-end layer
2. Artist layer
3. Scripting layer
### 1. Back-end layer
Back-end layer has three built-in abstract interface classes:
1. FigureCanvas
2. Renderer
3. Event
### 2. Artist layer
Artist layer is composed of one main object,which is the Artist.
The Artist is the object that knows how to take the Renderer and use it to put ink on the canvas.
Everything you see on a Matplotlib figure is an Artist instance.
>The title, the lines, the tick labels, the
images, and so on, all correspond to an individual Artist.
There are two types of Artist objects.
1. Primitive type
> a line, a rectangle, a circle, or text.
2. Composite type
> figure or axes.

**Each composite artist may contain other composite
artists as well as primitive artists.**
### Use artist layer to generate histogram
>Use the artist layer to generate a histogram of 10,000 random numbers
```
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas # import FigureCanvas
from matplotlib.figure import Figure # import Figure artist
fig = plt.figure() # an empty figure with no axes
canvas = FigureCanvas(fig) # attach the figure artist to figure canvas
# Create 10000 random numbers using numpy
import numpy as np
x = np.random.randn(10000)
ax = fig.add_subplot(111) # create an axes artist with one row and one column and uses the first cell in a grid
ax.hist(x, 100) # generate a histogram of the 10000 numbers with 100 bins
# add a title to the figure and save it
ax.set_title(r'Normal distribution with $\mu=0, \sigma=1$')
fig.savefig('../figs/01_Intro/matplotlib_histogram_artist.png')
plt.show()
```
### 3. Scripting layer
Developed for scientists who are not professional programmers to perform quick exploratory analysis of some data.
Matplotlib's scripting layer is essentially the Matplotlib.pyplot interface, which automates the process of defining a canvas and defining a figure artist instance and connecting them.
### Use scripting layer to generate histogram
>Use the scripting layer to generate a histogram of 10,000 random numbers
```
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(10000)
plt.hist(x, 100)
plt.title(r'Normal distribution with $\mu=0, \sigma=1$')
plt.savefig('../figs/01_Intro/matplotlib_histogram_scripting.png')
plt.show()
```
# Basic Plotting with Matplotlib
```
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(5, 5, 'o')
plt.show()
plt.plot(5, 5, 'o')
plt.ylabel("Y")
plt.xlabel("X")
plt.title("Plotting Example")
plt.show()
# Using pandas with matplotlib
import pandas as pd
india_china = {'1980': [8880, 5123],
'1981': [8670, 6682],
'1982': [8147, 3308],
'1983': [7338, 1863],
'1984': [5704, 1527]}
india_china
india_china_df = pd.DataFrame.from_dict(
india_china,
orient='index',
columns=['India', 'China'])
india_china_df
# Line plot
india_china_df.plot(kind="line");
# Histogram plot
india_china_df["India"].plot(kind="hist");
```
# Dataset on Immigration to Canada
## Immigration to Canada from 1980 to 2013 Dataset
>Dataset Source: International migration flows to and from selected countries - The 2015 revision.
>The dataset contains annual data on the flows of international immigrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. The current version presents data pertaining to 45 countries.
>Get [the raw immigration data from United Nations Population Division Department of Economic and Social Affairs website](https://www.un.org/en/development/desa/population/migration/data/empirical2/data/UN_MigFlow_All_CountryFiles.zip)

> OR fetch Canada's immigration data that has been extracted and uploaded to one of IBM servers from [here](https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Data_Files/Canada.xlsx)
## Read Data into pandas Dataframe
```
import numpy as np
import pandas as pd
df_can = pd.read_excel('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Data_Files/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2)
print ('Data read into a pandas dataframe!')
```
## Display Dataframe
```
df_can.head()
```
### Read Data into pandas Dataframe from local file
```
df_can = pd.read_excel('../data/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2)
print ('Data read into a pandas dataframe!')
df_can.head()
df_can.info()
# list of column headers
df_can.columns.values
# list of indicies
df_can.index.values
# type of index and columns
print(type(df_can.columns))
print(type(df_can.index))
# get the index and columns as lists
df_can.columns.tolist()
df_can.index.tolist()
print (type(df_can.columns.tolist()))
print (type(df_can.index.tolist()))
# size of dataframe (rows, columns)
df_can.shape
```
### Clean the data set
>Remove a few unnecessary columns
```
# in pandas axis=0 represents rows (default) and axis=1 represents columns.
df_can.drop(['AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)
df_can.head(2)
```
### Rename the columns
>Rename the columns by passing in a dictionary of old and new names
```
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent', 'RegName':'Region'}, inplace=True)
df_can.columns
```
### Sums up the total immigrants
>sums up the total immigrants by country over the entire period 1980 - 2013
```
# add a 'Total' column
df_can['Total'] = df_can.sum(axis=1)
df_can['Total']
```
### Check null objects
```
# check to see how many null objects we have in the dataset
df_can.isnull().sum()
# view a quick summary of each column in our dataframe
df_can.describe()
```
### Filtering list of countries
```
# filtering on the list of countries ('Country').
df_can.Country # returns a series
# filtering on the list of countries ('OdName') and the data for years: 1980 - 1985.
df_can[['Country', 1980, 1981, 1982, 1983, 1984, 1985]] # returns a dataframe
```
> notice that 'Country' is string, and the years are integers.
**Convert all column names to string later on.**
```
# setting the 'Country' column as the index
df_can.set_index('Country', inplace=True)
df_can.head(3)
```
## **loc** vs **iloc**
>df.loc[**label**]
- filters by the **labels** of the index/column
>df.iloc[**index**]
- filters by the **positions** of the index/column
```
# view the number of immigrants from Japan (row 87) for the following scenarios:
# 1. the full row data (all columns)
print(df_can.loc['Japan'])
# 2. for year 2013
print(df_can.loc['Japan', 2013])
# 3. for years 1980 to 1985
print(df_can.loc['Japan', [1980, 1981, 1982, 1983, 1984, 1984]])
print(df_can.iloc[87, [3, 4, 5, 6, 7, 8]])
```
### Convert the column names into strings: '1980' to '2013'.
```
df_can.columns = list(map(str, df_can.columns))
# converted years to string to easily call upon the full range of years for plotting
years = list(map(str, range(1980, 2014)))
years[:5]
```
### Filtering based on a criteria
> filter the dataframe based on a condition
> pass the condition as a boolean vector.
```
# filter the dataframe to show the data on Asian countries (AreaName = Asia).
# 1. create the condition boolean series
condition = df_can['Continent'] == 'Asia'
print(condition)
# 2. pass this condition into the dataFrame
df_can[condition]
# filter for AreaNAme = Asia and RegName = Southern Asia
df_can[(df_can['Continent']=='Asia') & (df_can['Region']=='Southern Asia')]
# review the changes we have made to our dataframe.
print('data dimensions:', df_can.shape)
print(df_can.columns)
df_can.head(2)
```
# Line Plots
## Plot a line graph of immigration from Haiti
```
# use the matplotlib inline backend
%matplotlib inline
# importing matplotlib.pyplot
import matplotlib.pyplot as plt
# converted the years to string to call upon the full range of years
years = list(map(str, range(1980, 2014)))
years[:5]
```
### Extract the data series for Haiti
```
# passing in years 1980 - 2013 to exclude the 'total' column
haiti = df_can.loc['Haiti',years]
haiti.head()
# plot a line plot by appending .plot() to the haiti dataframe
haiti.plot();
# change the index values of Haiti to type integer for plotting
haiti.index = haiti.index.map(int)
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
plt.show() # need this line to show the updates made to the figure
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
# annotate the 2010 Earthquake.
# syntax: plt.text(x, y, label)
plt.text(2000, 6000, '2010 Earthquake') # see note below
plt.savefig('../figs/01_Intro/immigration_from_haiti.png')
plt.show()
```
### Add more countries to line plot
>Add more countries to line plot to make meaningful comparisons immigration from different countries.
### Compare the number of immigrants from India and China from 1980 to 2013.
### Step 1: Get the data set for China and India, and display dataframe.
```
china = df_can.loc['China',years]
china.head()
india = df_can.loc['India',years]
india.head()
df_china_india = df_can.loc[["China", "India"],years]
df_china_india.head()
```
### Step 2: Plot graph
```
df_china_india.plot(kind='line');
```
>Recall that pandas plots the indices on the x-axis and the columns as individual lines on the y-axis.
>As the dataframe with the country as the index and years as the columns, we must first transpose the dataframe using transpose() method to swap the row and columns.
```
df_china_india = df_china_india.transpose()
df_china_india.head()
# change the index values of df_china_india to type integer for plotting
df_china_india.index = df_china_india.index.map(int)
df_china_india.plot(kind='line')
plt.title('Immigration from China and India')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.savefig('../figs/01_Intro/immigration_from_china_india.png')
plt.show()
```
>From the above plot, we can observe that the China and India have very similar immigration trends through the years.
### Compare the trend of top 5 countries
>Compare the trend of top 5 countries that contributed the most to immigration to Canada.
### Step 1: Get the dataset.
>Recall that we created a Total column that calculates the cumulative immigration by country.
>We will sort on this column to get our top 5 countries using pandas sort_values() method.
>inplace = True paramemter saves the changes to the original df_can dataframe
```
df_can.sort_values(by='Total', ascending=False, axis=0, inplace=True)
# get the top 5 entries
df_top5 = df_can.head(5)
df_top5
# transpose the dataframe
df_top5 = df_top5[years].transpose()
print(df_top5)
```
### Step 2: Plot the dataframe.
>Change the size using the `figsize` parameter to make the plot more readeable
```
# let's change the index values of df_top5 to type integer for plotting
df_top5.index = df_top5.index.map(int)
# pass a tuple (x, y) size
df_top5.plot(kind='line', figsize=(14, 8))
plt.title('Immigration Trend of Top 5 Countries')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.savefig('../figs/01_Intro/immigration_trend_top5_countries.png')
plt.show()
```
| github_jupyter |
Notebook written by [Zhedong Zheng](https://github.com/zhedongzheng)

```
import tensorflow as tf
import numpy as np
import sklearn
VOCAB_SIZE = 20000
EMBED_DIM = 100
RNN_SIZE = 70
CLIP_NORM = 5.0
BATCH_SIZE = 32
LR = {'start': 5e-3, 'end': 5e-4, 'steps': 1500}
N_EPOCH = 2
N_CLASS = 2
def sort_by_len(x, y):
idx = sorted(range(len(x)), key=lambda i: len(x[i]))
return x[idx], y[idx]
def pad_sentence_batch(sent_batch, thres=400):
max_seq_len = max([len(sent) for sent in sent_batch])
if max_seq_len > thres:
max_seq_len = thres
sent_batch = [sent[-thres:] for sent in sent_batch]
padded_seqs = [(sent + [0]*(max_seq_len - len(sent))) for sent in sent_batch]
return padded_seqs
def next_train_batch(X_train, y_train):
for i in range(0, len(X_train), BATCH_SIZE):
padded_seqs = pad_sentence_batch(X_train[i : i+BATCH_SIZE])
yield padded_seqs, y_train[i : i+BATCH_SIZE]
def next_test_batch(X_test):
for i in range(0, len(X_test), BATCH_SIZE):
padded_seqs = pad_sentence_batch(X_test[i : i+BATCH_SIZE])
yield padded_seqs
def train_input_fn(X_train, y_train):
dataset = tf.data.Dataset.from_generator(
lambda: next_train_batch(X_train, y_train),
(tf.int32, tf.int64),
(tf.TensorShape([None,None]), tf.TensorShape([None])))
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def predict_input_fn(X_test):
dataset = tf.data.Dataset.from_generator(
lambda: next_test_batch(X_test),
tf.int32,
tf.TensorShape([None,None]))
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def rnn_cell():
return tf.nn.rnn_cell.GRUCell(RNN_SIZE, kernel_initializer=tf.orthogonal_initializer())
def forward(inputs, mode):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
x = tf.contrib.layers.embed_sequence(inputs, VOCAB_SIZE, EMBED_DIM)
x = tf.layers.dropout(x, 0.2, training=is_training)
bi_outs, bi_states = tf.nn.bidirectional_dynamic_rnn(
rnn_cell(), rnn_cell(), x, tf.count_nonzero(inputs, 1), dtype=tf.float32)
x = tf.concat(bi_states, -1)
logits = tf.layers.dense(x, N_CLASS)
return logits
def clip_grads(loss):
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
clipped_grads, _ = tf.clip_by_global_norm(grads, CLIP_NORM)
return zip(clipped_grads, params)
def model_fn(features, labels, mode):
logits = forward(features, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
preds = tf.argmax(logits, -1)
return tf.estimator.EstimatorSpec(mode, predictions=preds)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
lr_op = tf.train.exponential_decay(
LR['start'], global_step, LR['steps'], LR['end']/LR['start'])
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
train_op = tf.train.AdamOptimizer(lr_op).apply_gradients(
clip_grads(loss_op), global_step=global_step)
lth = tf.train.LoggingTensorHook({'lr': lr_op}, every_n_iter=100)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss_op, train_op=train_op, training_hooks=[lth])
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.imdb.load_data(num_words=VOCAB_SIZE)
X_train, y_train = sort_by_len(X_train, y_train)
X_test, y_test = sort_by_len(X_test, y_test)
estimator = tf.estimator.Estimator(model_fn)
for _ in range(N_EPOCH):
estimator.train(lambda: train_input_fn(X_train, y_train))
y_pred = np.fromiter(estimator.predict(lambda: predict_input_fn(X_test)), np.int32)
print("\nValidation Accuracy: %.4f\n" % (y_pred==y_test).mean())
```
| github_jupyter |
# NLP Learners
This module contains the main class to quickly define a `Learner` (and automatically generates an appropriate model) from your NLP data.
```
from fastai.gen_doc.nbdoc import *
from fastai.text import *
from fastai.docs import *
```
## Class RNNLearner
This is the class that handles the whole creation of a `Learner`, be it for a language model or an RNN classifier. It handles the conversion of weights from a pretrained model as well as saving or loading the encoder.
```
show_doc(RNNLearner, doc_string=False)
```
Creates an `RNNLearner` from `data` and a `model` with a text data using a certain `bptt`. The `split_func` is used to properly split the model in different groups for gradual unfreezing and differential learning rates. Gradient clipping of `clip` is optionnally applied. `adjust`, `alpha` and `beta` are all passed to create an instance of `RNNTrainer`.
### Factory methods
```
show_doc(RNNLearner.classifier, doc_string=False)
```
Create an RNNLearner with a classifier model from `data`. The model used is the encoder of an [AWD-LSTM](https://arxiv.org/abs/1708.02182) that is built with embeddings of size `emb_sz`, a hidden size of `nh`, and `nl` layers (the `vocab_size` is inferred from the `data`). All the dropouts are put to values that we found worked pretty well and you can control their strength by adjusting `drop_mult`. If `qrnn` is True, the model uses [QRNN cells](https://arxiv.org/abs/1611.01576) instead of LSTMs.
The input texts are fed into that model by bunch of `bptt` and only the last `max_len` activations are considerated. This gives us the backbone of our model. The head then consists of:
- a layer that concatenates the final outputs of the RNN with the maximum and average of all the intermediate outputs (on the sequence length dimension),
- blocks of [nn.BatchNorm1d, nn.Dropout, nn.Linear, nn.ReLU] layers.
The blocks are defined by the `lin_ftrs` and `drops` arguments. Specifically, the first block will have a number of inputs inferred from the backbone arch and the last one will have a number of outputs equal to data.c (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_ftrs` (of course a block has a number of inputs equal to the number of outputs of the previous block). The dropouts all have a the same value ps if you pass a float, or the corresponding values if you pass a list. Default is to have an intermediate hidden size of 50 (which makes two blocks model_activation -> 50 -> n_classes) with a dropout of 0.1.
```
data = get_imdb(classifier=True)
learn = RNNLearner.classifier(data, drop_mult=0.5)
show_doc(RNNLearner.language_model, doc_string=False)
```
Create an RNNLearner with a language model from `data` of a certain `bptt`. The model used is an [AWD-LSTM](https://arxiv.org/abs/1708.02182) that is built with embeddings of size `emb_sz`, a hidden size of `nh`, and `nl` layers (the `vocab_size` is inferred from the `data`). All the dropouts are put to values that we found worked pretty well and you can control their strength by adjusting `drop_mult`. If `qrnn` is True, the model uses [QRNN cells](https://arxiv.org/abs/1611.01576) instead of LSTMs. The flag `tied_weights` control if we should use the same weights for the encoder and the decoder, the flag `bias` controls if the last linear layer (the decoder) has bias or not.
You can specify `pretrained_fnames` if you want to use the weights of a pretrained model. This should be a list of the name of the weight file and the name of the corresponding dictionary. The dictionary is needed because the function will internally convert the embeddings of the pretrained models to match the dictionary of the `data` passed (a word may have a different id for the pretrained model).
```
data = get_imdb()
learn = RNNLearner.language_model(data, pretrained_fnames=['lstm_wt103', 'itos_wt103'], drop_mult=0.5)
```
### Loading and saving
```
show_doc(RNNLearner.load_encoder)
show_doc(RNNLearner.save_encoder)
show_doc(RNNLearner.load_pretrained, doc_string=False)
```
Opens the weights in the `wgts_fname` of model diretory of the `RNNLearner` and the dictionary in `itos_fname` then adapts the pretrained weights to the vocabulary of the `data`.
## Utility functions
```
show_doc(lm_split)
show_doc(rnn_classifier_split)
show_doc(convert_weights, doc_string=False)
```
Convert the `wgts` from an old disctionary `stoi_wgts` (correspondance word to id) to a new dictionary `itos_new` (correspondans id to word).
| github_jupyter |
# The Scientific Python Ecosystem
The Scientific Python Ecosystem is made up of a robust collection of packages that provide functionality for everything from simple numeric arrays to sophisticated machine learning algorithms. In this notebook, we'll introduce the core scientific python packages and some important terminology.

### Outline
- Python
- Numpy
- Scipy
- Pandas
### Tutorial Duriation
10 minutes
### Going Further
This notebook is just meant to make sure we all have the same base terminology before moving on to the fun `xarray` and `dask`. If you are new to Python or just want to brush up, you may be interested in the following online resources:
- Scientific Python Lectures: http://scipy-lectures.org/
- Numpy Tutorial: https://docs.scipy.org/doc/numpy-dev/user/quickstart.html
- Scipy Tutorial: https://docs.scipy.org/doc/scipy/reference/tutorial/index.html
- Pandas Tutorials: https://pandas.pydata.org/pandas-docs/stable/tutorials.html
## Numpy
NumPy is the fundamental package for scientific computing with Python. It contains among other things:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
Numpy Documentation: https://docs.scipy.org/doc/numpy/
```
import numpy as np
x = np.zeros(shape=(4, 5))
x
y = x + 4
y
# random numbers
z = np.random.random(x.shape)
z
# aggregations
z_sum = z.sum(axis=1)
z_sum
# broadcasting
y.transpose() * z_sum
# slicing
z[2:4, ::2] # 2-4 on the first axis, stride of 2 on the second
# data types
xi = np.array([1, 2, 3], dtype=np.int) # integer
xi.dtype
xf = np.array([1, 2, 3], dtype=np.float) # float
xf.dtype
# universal functions (ufuncs, e.g. sin, cos, exp, etc)
np.sin(z_sum)
```
### SciPy
SciPy is a collection of mathematical algorithms and convenience functions built on the Numpy extension of Python. It adds significant power to the interactive Python session by providing the user with high-level commands and classes for manipulating and visualizing data. SciPy includes a number of subpackages covering different scientific computing domains:
| Subpackage | Description|
| ------| ------|
| cluster | Clustering algorithms|
| constants | Physical and mathematical constants|
| fftpack | Fast Fourier Transform routines|
| integrate | Integration and ordinary differential equation solvers|
| interpolate | Interpolation and smoothing splines|
| io | Input and Output|
| linalg | Linear algebra|
| ndimage | N-dimensional image processing|
| odr | Orthogonal distance regression|
| optimize | Optimization and root-finding routines|
| signal | Signal processing|
| sparse | Sparse matrices and associated routines|
| spatial | Spatial data structures and algorithms|
| special | Special functions|
| stats | Statistical distributions and functions
Because SciPy is built directly on Numpy, we'll skip any examples for now. The SciPy API is well documented with examples how to use specific subpackages.
SciPy Documentation:
### Pandas
pandas is a Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, real world data analysis in Python.
Pandas Documentation: http://pandas.pydata.org/pandas-docs/stable/
```
import pandas as pd
# This data can also be loaded from the statsmodels package
# import statsmodels as sm
# co2 = sm.datasets.co2.load_pandas().data
co2 = pd.read_csv('./data/co2.csv', index_col=0, parse_dates=True)
# co2 is a pandas.DataFrame
co2.head() # head just prints out the first few rows
# The pandas DataFrame is made up of an index
co2.index
# and 0 or more columns (in this case just 1 - co2)
# Each column is a pandas.Series
co2['co2'].head()
# label based slicing
co2['1990-01-01': '1990-02-14']
# aggregations just like in numpy
co2.mean(axis=0)
# advanced grouping/resampling
# here we'll calculate the annual average timeseris of co2 concentraions
co2_as = co2.resample('AS').mean() # AS is for the start of each year
co2_as.head()
# we can also quickly calculate the monthly climatology
co2_climatology = co2.groupby(co2.index.month).mean()
co2_climatology
%matplotlib inline
# and even plot that using pandas and matplotlib
co2_climatology.plot()
```
| github_jupyter |
# AdaBoost
在做重要决定的时候,我们会考虑吸收多个专家的意见而不是一个人的意见,机器学习处理问题的时候也可以采用这种方法.这就是元算法(meta-algorithm)背后的思路.元算法是对其他算法进行组合的一种方式,我们会先建立一个**单层决策树(decision stump)**分类器,实际上它是一个单节点的决策树.AdaBoots算法将应用在上述单层决策树之上,然后将在一个难数据集上应用AdaBoots分类器,以了解该算法是如何迅速超越其他分类器的.
强可学习(strongly learnable)和弱可学习(weakly learnable)
- 强可学习:如果存在一个多项式学习算法,并且它的学习率很高,那么我们就称这个概念为**强可学习**.
- 弱可学习:如果存在一个多项式学习算法,它的学习率只比随机猜测略好,那么就称这个概念为**弱可学习**
AdaBoost algorithm 是将弱可学习变为强可学习算法中最具有代表性的算法.
对着提升方法来说,两个问题需要回答:一是在每一轮如何改变训练数据的权值或概率分布;二是如何将弱分类器组合成一个强分类器.关于第一个问题Adaboost的做法是,提高那些被前一轮弱分类器分类错误的样本权值,而降低那些被正确分类样本权值.这样一来,那些没有得到正确分类的数据,由于其权值的增大而获得更多的"关注".至于第2个问题,adaboost才用加权多数表决法的方法.具体的而言就是,加大分类错误率小的弱分类器权值,使其在表中起更大的作用,减小分类误差率大的弱分类器的权值,在表决中起较小的作用.
可以取看看[boosting和bagging](https://blog.csdn.net/u013709270/article/details/72553282)的不同之处
### AdaBoost 算法(boosting)
假定一个二分类分类的训练集
$T={(x_1,y_1),...,(x_n,y_n)}$
其中每一个点由实例和样本标签组成.实例$x_i \in X\subseteqq R^{n}$,标记$y_i \in Y={-1,+1}$,X是实例空间,Y是标记集合.Adaboost利用以下算法,从训练数据样本中学习一系列弱分类器或基本分类器,并将这些弱分类器线性组合成一个强分类器.
#### AdaBoost
输入:训练样本数据集$T={(x_1,y_1),...,(x_n,y_n)}$,其中$x_i \in X\subseteqq R^{n}$,$y_i \in Y={-1,+1}$;弱学习算法;
输出:最终分类器$G(x)$
(1) 初始化训练数据的权重值分布(在初始化的时候将所有样本的权重赋予相同值)
$D_1=(w_{11},w_{12},...w_{1i},w_{1N}),w_{1i} = \frac{1}{N},i=1,2,...N$
(2) 对$m=1,2,...,M$(需要执行人为定义的M轮)
(a) 使用具有权值分布的$D_m$的训练数据集学习,得到基本分类器
$G_m(x):X\rightarrow {+1,-1}$
(b) 计算$G_m(x)$在训练数据集上的分类错误率
$e_m = P(G_m(x_i) \neq y_i)=\sum_{i=1}^{N}w_{mi}I(G_m(x_i)\neq y_i)$
(c) 计算$G_m(x)$的系数
$\alpha_m=\frac{1}{2}log(\frac{1-e_m}{e_m})$,这里的log是自然对数
(d) 更新训练数据集的权值分布
$D_{m+1}=(w_{m+1,1},w_{m+1,2},...w_{m+1,i},w_{m+1,N})$
$w_{m+1,i}=\frac{w_{m,i}}{Z_{m}}exp(-\alpha_m \cdot y_i \cdot G(x_i)),i=1,2,...N$
这里,$Z_{m}$是规范化因子
$Z_{m}=\sum_{i=1}^{N}w_{mi}exp(-\alpha_m \cdot y_i \cdot G(x_i))$
它使$D_{m+1}$成为一个概率分布
(3) 构建基本分类器的线性组合
$f(x)=\sum_{m=1}^{M}\alpha_m G_{m}(x)$
最终得到分类器
$G(x)=sign(f(x))=sign\begin{pmatrix}
\sum_{m=1}^{M}\alpha_mG_m(x)
\end{pmatrix}$
<p style="color:blue">这里的sign是符号的意思,也就是说如果求出来为负,那么是一类,如果为正数,那么是另一类</p>
**对Adaboost的算法说明:**
步骤(1) 假设训练数据集具有均匀的权值分布,即每个训练样本在基本分类器的学习中作用是相同,这一假设保证第一步能够在原始数据集上学习基本分类器$G_1(x)$.
步骤(2) adaboost反复学习基本分类器,在每一轮$m=1,2,..,M$顺次的执行如下操作:
- (a) 使用当前分布$D_m$加权的训练数据,学习基本分类器$G_m(x)$
- (b) 计算基本分类器$G_m(x)$在加权训练数据集上的分类误差率:
- $e_m = P(G_m(x_i) \neq y_i)=\sum_{i=1}^{N}w_{mi}I(G_m(x_i)\neq y_i)$
- 这里,$w_{mi}$表示第m轮中第i个实例的权值,$\sum_{i=1}^{N}w_{mi}=1$
- (c) 计算基本分类器$G(x)$的系数$\alpha_m$,$\alpha_m$表示$G_m(x)$在最终分类器的权重性质.
- 当$e_m\leqslant \frac{1}{2}$,$\alpha \geqslant 0,$并且$a_m$随着$e_m$的减少而增大,所以分类误差率越小的基本分类器在最终的分类作用越大
- (d) 更新训练数据的权值分布为下一轮做准备.
- $\left\{\begin{matrix}
\frac{w_{mi}}{Z_m}e^{-\alpha}, &G_m(x_i)=y_i \\
\frac{w_{mi}}{Z_m}e^{\alpha}& G_m(x_i)\neq y_i
\end{matrix}\right.$
步骤(3) 线性组合$f(x)$实现M个基本分类器的加权表决.系数$\alpha_m$表示了$G_m(x)$的重要性,但是这里所有的$\alpha_m$之和并不为1.$f(x)$的符号决定了实例x的类,$f(x)$的绝对值表示了分类的确信度.
### AdaBoost的例子
给出如下表,假设多分类器是由$x<v$或者$x>v$产生,其中阈值为v使该分类器在训练数据集上分类错误率最低.

我们自定义选取的阈值为2.5,5.5,8.5
初始化数据权值分布
$D_1=(w_{11},w_{12},...,w_{110})$
$w_{1i}=\frac{1}{10} = 0.1,i=1,2,...,10$
**对m=1,**
(a) 在权值分布为$D_1$的训练数据集上,阈值v依次取值2.5,5.5,8.5时,且分别尝试**符号选择1,-1**时,错误率2.5或者8.5最低,那么我们先选取2.5,故基本分类器为:
$G_1(x)=\left\{\begin{matrix}
1, &x<2.5\\
-1,&x>2.5
\end{matrix}\right.$
**Note:**这里的符号选择是指,当我们选取某个阈值的时候,将其预测的分类分别使用1,-1尝试计算错误率,比如当我们选取阈值2.5时候,我们有两种选择方法:
- $\left\{\begin{matrix}
1, &x<2.5\\
-1,&x>2.5
\end{matrix}\right.$
- $\left\{\begin{matrix}
1, &x>2.5\\
-1,&x<2.5
\end{matrix}\right.$
$G_1(x)$的分类结果为:$[1,1,1,-1,-1,-1,-1,-1,-1,-1]$
$True\;labels:[1,1,1,-1,-1,-1,1,1,1,-1]$
(b) 计算$G_1(x)$在训练集上的错误率为$e_m = P(G_m(x_i) \neq y_i)=\sum_{i=1}^{N}w_{mi}I(G_m(x_i)\neq y_i)=0.1+0.1+0.1$这里分类错误的分别是x=6,7,8
(c) 计算$G_1(x)$的系数:$\alpha_1 = \frac{1}{2}log\frac{1-e_1}{e_1}=0.4236.$
(d) 更新训练样本的权值分布:
$D_2=(w_{21},...w_{210})$
$w_{wi}=\frac{w_{1i}}{Z_m}exp(-\alpha_1 y_i G_1(x_i)), i=1,2,...10$
$D_2=(0.0715,0.0715,0.0715,0.0715,0.0715,0.0715,0.1666,0.1666,0.1666,0.0715)$
$f_1(x)=0.4236G_1(x)$
再使用分类器$sign[f_1(x)]$在训练数据集上进行分类,发现错误的点有3个.
**对m=2,**
(a) 在权值分布为 $D_2$ 的训练数据集上,阈值v依次取值2.5,5.5,8.5时,且分别尝试符号选择1,-1时,8.5最低,故基本分类器为:
$G_2(x)=\left\{\begin{matrix}
1, &x<8.5\\
-1,&x>8.5
\end{matrix}\right.$
$G_2(x)$的分类结果为:$[1,1,1,1,1,1,1,1,1,-1]$
$True\;labels:[1,1,1,-1,-1,-1,1,1,1,-1]$
(b) 计算$G_2(x)$在训练集上的错误率为$e_m = P(G_m(x_i) \neq y_i)=\sum_{i=1}^{N}w_{mi}I(G_m(x_i)\neq y_i)=0.0715+0.0715+0.0715=0.2145$这里分类错误的分别是x=3,4,5
(c) 计算$G_2(x)$的系数:$\alpha_2 = \frac{1}{2}log\frac{1-e_2}{e_2}=0.6496.$
(d) 更新训练样本的权值分布:
$D_3=(0.0455,0.0455,0.0455,0.1667,0.1667,0.1667,0.1060,0.1060,0.1060,0.0455)$
$f_2(x)=0.4236G_1(x)+0.6496G_2(x)$
再使用分类器$sign[f_2(x)]$在训练数据集上进行分类,发现错误的点有3个.
**对m=3,**
(a) 在权值分布为 $D_3$ 的训练数据集上,阈值v依次取值2.5,5.5,8.5时,且分别尝试符号选择1,-1时,5.5最低,故基本分类器为:
$G_3(x)=\left\{\begin{matrix}
-1, &x<5.5\\
1,&x>5.5
\end{matrix}\right.$
$G_3(x)$的分类结果为:$[-1,-1,-1,-1,-1,-1,1,1,1,1]$
$True\;labels:[1,1,1,-1,-1,-1,1,1,1,-1]$
(b) 计算$G_3(x)$在训练集上的错误率为$e_3=0.0455+0.0455+0.0455+0.0455=0.182$这里分类错误的分别是x=9,0,1,2
(c) 计算$G_3(x)$的系数:$\alpha_3 = \frac{1}{2}log\frac{1-e_3}{e_3}=0.7514.$
(d) 更新训练样本的权值分布:
$D_3=(0.125,0.125,0.125,0.102,0.102,0.102,0.065,0.065,0.065,0.125)$
$f_3(x)=0.4236G_1(x)+0.6496G_2(x)+0.7514G_3(x)$
再使用分类器$sign[f_3(x)]$在训练数据集上进行分类,发现错误的点有0个,分类完毕
所以最终的分类器为:
$G(x)=sign[f_3(x)=0.4236G_1(x)+0.6496G_2(x)+0.7514G_3(x)]$
## 下面将用Python代码实现
```
import numpy as np
```
1.首先我们创建一个模拟加载数据集,下面的为此例的数据集
```
def loadData():
"""
loading data set
Returns:
x: data set as x.
y: data set as y.
W: initialization weights.
"""
x = np.array([0,1,2,3,4,5,6,7,8,9])
y = np.array([1,1,1,-1,-1,-1,1,1,1,-1])
W = np.abs(y) / len(y)
return x,y,W
```
2.计算错误率,这里计算的错误率一定要走遍所有的阈值和方向的选取,最后返回出该轮最优阈值下的错误率最小的"基本分类器",这里的基本分类器是单层节点的决策分类器
$G_1(x)=\left\{\begin{matrix}
1, &x<thresh\\
-1,&x>thresh
\end{matrix}\right.$
-------------
$G_2(x)=\left\{\begin{matrix}
-1, &x<thresh\\
1,&x>thresh
\end{matrix}\right.$
```
def compute_error(threshs,x,y,W):
"""
compute error in every threshs.
Note:
1.Make sure the source data set is clean,we need copy x and y
2.initialization bestThresh_error is inf.
Returns:
bestThresh_error: The best error(minimum error) in some thresh.
bestGx: The best G(x) in best thresh and minimum error.
bestThresh_list: The best thresh,and split method.
"""
x_copy = x.copy()
G_x1 = y.copy()
G_x2 = y.copy()
bestThresh_error = np.inf
bestGx = None
bestThresh_list = None
for thresh in threshs:
index_gt = np.where(x_copy>thresh)[0] # find index in copy data
index_lt = np.where(x_copy<=thresh)[0]
G_x1[index_gt] = -1. # changed values in copy data,Implementate thresh split
G_x1[index_lt] = 1.
G_x2[index_gt] = 1. # we need try two situations.
G_x2[index_lt] = -1.
# compute error
G_W1 = np.where(G_x1 != y)
error_1 = np.sum(W[G_W1])
G_W2 = np.where(G_x2 != y)
error_2 = np.sum(W[G_W2])
# find the best error(minimum error),best thresh, best G(x)
if error_1 < bestThresh_error or error_2 < bestThresh_error:
if error_1 < error_2:
bestThresh_error = error_1.copy()
bestGx = G_x1.copy()
bestThresh_list = [thresh,"G_x1"]
else:
bestThresh_error = error_2.copy()
bestGx = G_x2.copy()
bestThresh_list = [thresh,"G_x2"]
return bestThresh_error,bestGx,bestThresh_list
```
3.计算G(x)的参数$\alpha$
```
def compute_alpha(error):
"""
Implement compute alpha value.
Returns:
alpha: parameters in G(x)
"""
alpha = 1./2. * (np.log((1.-error)/np.maximum(error,1e-16)))
return alpha
```
4.计算需要求出下一轮权重向量w的分母Z
```
def compute_Z(W,alpha,G_x,y):
"""
compute Z value to compute D
Returns:
Z:parameters in compute W or (D)
"""
return np.sum(W * np.exp(-alpha * y * G_x)),W
```
5.计算权重居中$D_{m+1}=(w_{m+1,1},w_{m+1,2},...w_{m+1,i},w_{m+1,N})$
```
def compute_W(W,Z,alpha,y,G_x):
"""
Implementate compute W(D)
returns:
W: weigths vector.
"""
W = W/Z * np.exp(-alpha * y * G_x)
return W
```
6.计算分类器$sign[f(x)]$
```
def compute_fx(alpha,G_x,y,thresh):
"""
build Classifier sign[f(x)]
Returns:
if fx equals label y,then we can break for loop,so we return False ,otherwise return True
"""
fx = np.multiply(alpha,G_x).sum(axis=0) # Implement f(x) = alpha_1G_1(x) + ... + alpha_nG_n(x)
fx[fx<0]=-1 # calculate "sign"
fx[fx>=0]=1
print("fx's result: ",fx)
if np.array_equal(fx,y):
print("划分结束")
return alpha[1:],thresh,False # alpha values is 0 when alpha index=1,so,alpha index start 1
else:
print("继续划分")
return alpha[1:],thresh,True
```
7.实现主函数
```
def main(epoch):
"""
Build main function, Implementate AdaBoost.
Returns:
1.classify parameters.
"""
x,y,W = loadData()
cache_alpha = np.zeros((1,1)) # cache alpha, this code must write.because every loop,we need append alpha value.
# The same reason as above
cache_Gx = np.zeros((1,len(x)))
cache_thresh = []
threshs = [2.5,5.5,8.5]
for i in range(epoch):
res_error,res_G_x,bestThresh_list = compute_error(threshs,x,y,W)
bestThresh = bestThresh_list[0]
cache_thresh.append(bestThresh_list)
print("error is: ",res_error,"G_x is: ",res_G_x,"best thresh: ",bestThresh)
cache_Gx = np.vstack((cache_Gx,np.array([res_G_x])))
alpha = compute_alpha(res_error)
cache_alpha = np.vstack((cache_alpha,np.array([[alpha]])))
Z,W = compute_Z(W=W,alpha=alpha,G_x=res_G_x,y=y)
W = compute_W(W,Z,alpha,y,res_G_x)
print("W is : ",W)
ALPHA,THRESH,result = compute_fx(alpha=cache_alpha,G_x=cache_Gx,y=y,thresh=cache_thresh)
if not result:
return ALPHA,THRESH
else:
print('程序执行完成,没有找到error=0的alpha.')
return ALPHA,THRESH
alphas,threshs = main(epoch=20)
print("alphas: ",alphas)
print("threshs: ",threshs)
```
由结果我们可以看出代码的结果是和例子中求出来的是一样的,阈值分别2.5采用第一种分割方式,8.5也是采用第一种分割方式,5.5采用第二种分割方式.
$\alpha_1=0.42364893,\alpha_2=0.64964149,\alpha_3=0.7520387$
实际上我们可以采取0.5步长的阈值进行测试.
```
def main_test(epoch):
"""
Build main function, Implementate AdaBoost.
Returns:
1.classify parameters.
"""
x,y,W = loadData()
cache_alpha = np.zeros((1,1)) # cache alpha, this code must write.because every loop,we need append alpha value.
# The same reason as above
cache_Gx = np.zeros((1,len(x)))
cache_thresh = []
threshs = np.arange(np.min(x),np.max(x),0.5)
for i in range(epoch):
res_error,res_G_x,bestThresh_list = compute_error(threshs,x,y,W)
bestThresh = bestThresh_list[0]
cache_thresh.append(bestThresh_list)
print("error is: ",res_error,"G_x is: ",res_G_x,"best thresh: ",bestThresh)
cache_Gx = np.vstack((cache_Gx,np.array([res_G_x])))
alpha = compute_alpha(res_error)
cache_alpha = np.vstack((cache_alpha,np.array([[alpha]])))
Z,W = compute_Z(W=W,alpha=alpha,G_x=res_G_x,y=y)
W = compute_W(W,Z,alpha,y,res_G_x)
print("W is : ",W)
ALPHA,THRESH,result = compute_fx(alpha=cache_alpha,G_x=cache_Gx,y=y,thresh=cache_thresh)
if not result:
return ALPHA,THRESH
else:
print('程序执行完成,没有找到error=0的alpha.')
return ALPHA,THRESH
alphas,threshs = main_test(epoch=10)
print("alphas: ",alphas)
print("threshs: ",threshs)
```
你会发现阈值结果和上面的会有些出入,但是也是正确的,因为化小数的阈值和化整数的阈值对于这个例子来说都是一样的
### 预测函数
$G(x)=sign[f(x)]=\alpha_1G_1(x) + \alpha_2G_2(x) + \cdots + \alpha_nG_n(x)$
```
def predict(test_x,alphas,threshs):
G_x = np.zeros(shape=(1,test_x.shape[0]))
for thresh_ in threshs:
G_x_single = np.ones(shape=(test_x.shape))
index_gt = np.where(test_x>thresh_[0])[0]
index_lt = np.where(test_x<=thresh_[0])[0]
if thresh_[1] == "G_x1":
G_x_single[index_gt] = -1.
G_x_single[index_lt] = 1.
G_x = np.vstack((G_x,G_x_single))
else:
G_x_single[index_gt] = 1.
G_x_single[index_lt] = -1.
G_x = np.vstack((G_x,G_x_single))
# compute fx
fx = np.multiply(alphas,G_x[1:]).sum(axis=0)
fx[fx<=0] = -1.
fx[fx>0] = 1
print(fx)
```
我们来看看测试的结果,我们分别尝试标签为1和-1的x的值

```
test_x = np.array([0])
predict(test_x=test_x,alphas=alphas,threshs=threshs)
test_x = np.array([5])
predict(test_x=test_x,alphas=alphas,threshs=threshs)
```
再来尝试一些其他的值
```
test_x = np.array([100])
predict(test_x=test_x,alphas=alphas,threshs=threshs)
```
最后我们来写出完整版的测试函数,返回训练样本的正确率
```
def predict_complete(test_x,test_y,alphas,threshs):
G_x = np.zeros(shape=(1,test_x.shape[0]))
for thresh_ in threshs:
G_x_single = np.ones(shape=(test_x.shape))
index_gt = np.where(test_x>thresh_[0])[0]
index_lt = np.where(test_x<=thresh_[0])[0]
if thresh_[1] == "G_x1":
G_x_single[index_gt] = -1.
G_x_single[index_lt] = 1.
G_x = np.vstack((G_x,G_x_single))
else:
G_x_single[index_gt] = 1.
G_x_single[index_lt] = -1.
G_x = np.vstack((G_x,G_x_single))
# compute fx
fx = np.multiply(alphas,G_x[1:]).sum(axis=0)
fx[fx<=0] = -1.
fx[fx>0] = 1
print("predict fx is : ",fx)
accurate = np.sum(fx==test_y)/len(test_y)
print("accurate is : ",accurate)
test_x = np.array([0,1,2,3,4,5,6,7,8,9])
test_y = np.array([1,1,1,-1,-1,-1,1,1,1,-1])
predict_complete(test_x=test_x,test_y=test_y,alphas=alphas,threshs=threshs)
```
### 现在使用大样本多特征进行测试
这里使用的样本是猫与非猫,其中猫是1,非猫是0
```
import h5py
import matplotlib.pyplot as plt
def load_data():
'''
create train set and test set
make sure you have .h5 file in your dataset
'''
train_dataset = h5py.File('data_set/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('data_set/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
train_x = train_x_orig.reshape(train_x_orig.shape[0],-1).T / 255
test_x = test_x_orig.reshape(test_x_orig.shape[0],-1).T / 255
print('Train_x\'s shape:{}'.format(train_x.shape))
print('Test_x\'s shape:{}'.format(test_x.shape))
print("Train_y's shape:{}".format(train_y.shape))
print("Test_y's shape:{}".format(test_y.shape))
index = 2
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
index = 3
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
```
根据AdaBoost的标签规则,更改y标签的值,从0,1改成-1,1.也就是说1为猫,-1为非猫。
```
train_y[train_y==0] = -1
print("New labels train_y is : ",train_y)
test_y[test_y==0] = -1
print("New labels test_y is : ",test_y)
def compute_error_big_data(threshs,x,y,W):
G_x1 = y.copy()
G_x2 = y.copy()
length_x = x.shape[0]
bestParameters = {'Thresh_error':np.inf}
for thresh in threshs:
for i in range(length_x):
# Try to split each feature.
index_gt = np.where(x[i,:]>thresh)[0] # find index in copy data
index_lt = np.where(x[i,:]<=thresh)[0]
G_x1[:,index_gt] = -1. # changed values in copy data,Implementate thresh split
G_x1[:,index_lt] = 1.
G_x2[:,index_gt] = 1. # we need try two situations.
G_x2[:,index_lt] = -1.
error_1 = np.sum(W[G_x1 !=y])
error_2 = np.sum(W[G_x2 != y])
if error_1 < error_2:
if error_1 < bestParameters['Thresh_error']:
bestParameters['Thresh_error'] = error_1.copy()
bestParameters['bestGx'] = G_x1.copy()
bestParameters['bestThresh'] = thresh
bestParameters['Feature_number'] = i
bestParameters['choose_split_method'] = "G_x1"
else:
if error_2 < bestParameters['Thresh_error']:
bestParameters['Thresh_error'] = error_2.copy()
bestParameters['bestGx'] = G_x2.copy()
bestParameters['bestThresh'] = thresh
bestParameters['Feature_number'] = i
bestParameters['choose_split_method'] = "G_x2"
return bestParameters
def compute_fx_for_big(alpha,G_x,y):
"""
build Classifier sign[f(x)]
Returns:
if fx equals label y,then we can break for loop,so we return False ,otherwise return True
"""
fx = np.multiply(alpha,G_x).sum(axis=0,keepdims=True) # Implement f(x) = alpha_1G_1(x) + ... + alpha_nG_n(x)
fx=np.sign(fx) # calculate "sign"
if np.array_equal(fx,y):
print("划分结束")
return alpha[1:],False # alpha values is 0 when alpha index=1,so,alpha index start 1
else:
return alpha[1:],True
def main_big_data(X,Y,epoch_num):
W = np.abs(Y)/Y.shape[1]
threshs = np.arange(np.min(X),np.max(X),0.1)
cache_alphas = np.zeros(shape=(1,1))
cache_Gx = np.zeros(Y.shape)
cache_Feature_number = []
cache_bestThresh = []
cache_choose_split_method = []
for epoch in range(epoch_num):
# select best thresh,error,Gx
bestParameters = compute_error_big_data(threshs=threshs,x=X,y=Y,W=W)
Thresh_error = bestParameters['Thresh_error']
bestGx = bestParameters['bestGx']
bestThresh = bestParameters['bestThresh']
Feature_number = bestParameters['Feature_number']
choose_split_method = bestParameters['choose_split_method']
# cache parameters
cache_Gx = np.vstack((cache_Gx,bestGx))
cache_Feature_number.append(Feature_number)
cache_bestThresh.append(bestThresh)
cache_choose_split_method.append(choose_split_method)
# compute alpha
alpha = compute_alpha(error=Thresh_error)
cache_alphas = np.vstack((cache_alphas,alpha))
# update weigths
Z,W = compute_Z(W=W,alpha=alpha,G_x=bestGx,y=Y)
W = compute_W(W=W,Z=Z,alpha=alpha,y=Y,G_x=bestGx)
ALPHA,result = compute_fx_for_big(alpha=cache_alphas,G_x=cache_Gx,y=Y)
if not result:
return ALPHA,cache_Feature_number,cache_bestThresh,cache_choose_split_method
else:
print('程序执行完成,没有找到error=0的alpha.')
return ALPHA,cache_Feature_number,cache_bestThresh,cache_choose_split_method
ALPHA,cache_Feature_number,cache_bestThresh,cache_choose_split_method = main_big_data(X=train_x,Y=train_y,epoch_num=10)
print("alphs is : ",ALPHA)
print("Feature_number is :",cache_Feature_number)
print("bestThresh is: ",cache_bestThresh)
print("choose split method is :",cache_choose_split_method)
def predict(test_X,test_Y,alphas,Feature_number,bestThresh,choose_split_method):
G_x = np.zeros(test_Y.shape)
for i in range(alphas.shape[0]):
G_x_single = np.ones(shape=(test_Y.shape))
# must choose one feature to split lable.
index_gt = np.where(test_X[Feature_number[i],:] > bestThresh[i])[0]
index_lt = np.where(test_X[Feature_number[i],:] <= bestThresh[i])[0]
if choose_split_method[i] == "G_x1":
G_x_single[:,index_gt] = -1.
G_x_single[:,index_lt] = 1.
G_x = np.vstack((G_x,G_x_single))
else:
G_x_single[:,index_gt] = 1.
G_x_single[:,index_lt] = -1.
G_x = np.vstack((G_x,G_x_single))
# Compute fx
fx = np.multiply(alphas,G_x[1:]).sum(axis=0,keepdims=True)
fx = np.sign(fx)
# calculate accurate.
accurate = np.sum(fx==test_Y) / test_Y.shape[1]
return accurate
accurate = predict(test_X=train_x,test_Y=train_y,alphas=ALPHA,Feature_number=cache_Feature_number,
bestThresh=cache_bestThresh,choose_split_method=cache_choose_split_method)
print("The train accurate is : ",accurate)
fx = predict(test_X=test_x,test_Y=test_y,alphas=ALPHA,Feature_number=cache_Feature_number,
bestThresh=cache_bestThresh,choose_split_method=cache_choose_split_method)
print("The f(x) is : ",fx)
def different_epoch_num():
plot_accurate = []
for i in range(1,50,5):
ALPHA,cache_Feature_number,cache_bestThresh,cache_choose_split_method = main_big_data(X=train_x,Y=train_y,epoch_num=i)
accurate = predict(test_X=train_x,test_Y=train_y,alphas=ALPHA,Feature_number=cache_Feature_number,
bestThresh=cache_bestThresh,choose_split_method=cache_choose_split_method)
print("After iter:{}, The Train set accurate is : ".format(i),accurate)
accurate = predict(test_X=test_x,test_Y=test_y,alphas=ALPHA,Feature_number=cache_Feature_number,
bestThresh=cache_bestThresh,choose_split_method=cache_choose_split_method)
print("After iter:{}, The Test set accurate is : ".format(i),accurate)
different_epoch_num()
```
| 迭代次数 | 训练样本正确率 | 测试样本正确率 |
| ------ | ------- | ------ |
| 1 | 0.68 | 0.58 |
| 6 | 0.77 | 0.58 |
| 11 | 0.88 | 0.64 |
| 16 | 0.89 | 0.58 |
| 21 | 0.96 | 0.66 |
| 26 | 0.97 | 0.58 |
| 31 | 0.99 | 0.58 |
| 36 | 0.99 | 0.6 |
| 41 | 0.99 | 0.66 |
| 46 | 1.0 | 0.58 |
可以看到随着迭代次数的增加,训练样本的正确率逐步升高,测试样本的正确率先升高然后降低(或者说在一定范围浮动),很明显在最大分类器46以后,再进行更多次数的迭代是毫无意义的,应该46已经是最大分类器的数量(实际上这里的最大分类器是44,也就是说训练样本误差为0),测试样本的正确率在分类器44以后都维持在0.58.所以按照上面的表格,我们应该选择分类器为41个左右的弱分类器所组成的强分类器是最好的
另外也可以看出随着分类器的个数的增加到一定限度,算法就开始呈现过拟合的状态.
实际在在运用过程中,不能总是想着训练样本的误差为0,因为这样模型就容易过拟合,再者,对于基本分类器$G_i(x)$,我们也可以选择其他的模型,比如:
Bagging + 决策树 = 随机森林
AdaBoost + 决策树 = 提升树
Gradient Boosting + 决策树 = GBDT
| github_jupyter |
# Experiments with the bivariate Gaussian
In this notebook, we'll get a feel for the two-dimensional Gaussian by varying the covariance matrix, drawing random samples from the resulting distribution, and plotting contour lines of the density.
We begin, as always, by loading in standard packages.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
# installing packages for interactive graphs
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import interact, interactive, fixed, interact_manual, IntSlider
```
The function **bivariate_plot** takes as input three parameters that uniquely specify a 2x2 covariance matrix:
* `var1`, the variance of the first feature, `x1`
* `var2`, the variance of the second feature, `x2`
* `corr`, the correlation between `x1` and `x2`
It then depicts a 2-d Gaussian whose mean is the origin and whose covariance matrix is given by these parameters. The display consists of 100 points randomly sampled from the Gaussian, as well as three representative contour lines of the density.
The first line below, **interact_manual**, sets up an interactive widget that allows you to specify the parameters to **bivariate_plot** using sliders, and provides a button to execute the function.
```
@interact_manual(var1 = (1,9), var2 = (1,9), corr=(-0.95,0.95,0.05))
def bivariate_plot(var1, var2, corr):
#
# Set parameters of Gaussian
mu = [0,0]
covariance = corr * np.sqrt(var1) * np.sqrt(var2)
sigma = [[var1,covariance], [covariance,var2]]
np.set_printoptions(precision=2)
print "Covariance matrix:"
print np.around(sigma, decimals=2)
#
# Draw samples from the distribution
n = 100
x = np.random.multivariate_normal(mu,sigma,size=n)
#
# Set up a plot for the samples and the density contours
lim = 10.0
plt.xlim(-lim, lim) # limit along x1-axis
plt.ylim(-lim, lim) # limit along x2-axis
plt.axes().set_aspect('equal', 'datalim')
#
# Plot the sampled points as blue dots
plt.plot(x[:,0], x[:,1], 'bo')
#
# To display contour lines, first define a fine grid
res = 200
xg = np.linspace(-lim, lim, res)
yg = np.linspace(-lim, lim, res)
z = np.zeros((res,res))
# Compute the density at each grid point
rv = multivariate_normal(mean=mu, cov=sigma)
for i in range(0,res):
for j in range(0,res):
z[j,i] = rv.logpdf([xg[i], yg[j]])
sign, logdet = np.linalg.slogdet(sigma)
normalizer = -0.5 * (2 * np.log(6.28) + sign * logdet)
# Now plot a few contour lines of the density
for offset in range(1,4):
plt.contour(xg,yg,z, levels=[normalizer - offset], colors='r', linewidths=2.0, linestyles='solid')
# Finally, display
plt.show()
```
## <font color="magenta">Quick exercise:</font>
Experiment with the widget above to get a sense for how the different parameters influence the shape of the Gaussian. In particular, figure out the answers to the following questions.
* Under what conditions does the Gaussian have contour lines that are perfect circles?
* Under what conditions is the Gaussian tilted upwards?
* Under what conditions is the Gaussian titled downwards?
* Suppose the Gaussian has no tilt, and the contour lines are stretched vertically, so that the vertical stretch is twice the horizontal stretch. What can we conclude about the covariance matrix?
*Note down the answers to these questions: you will enter them later, as part of this week's assignment.*
| github_jupyter |
## Computer Vision Interpret
[`vision.interpret`](/vision.interpret.html#vision.interpret) is the module that implements custom [`Interpretation`](/train.html#Interpretation) classes for different vision tasks by inheriting from it.
```
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
from fastai.vision.interpret import *
show_doc(SegmentationInterpretation)
show_doc(SegmentationInterpretation.top_losses)
show_doc(SegmentationInterpretation._interp_show)
show_doc(SegmentationInterpretation.show_xyz)
show_doc(SegmentationInterpretation._generate_confusion)
show_doc(SegmentationInterpretation._plot_intersect_cm)
```
Let's show how [`SegmentationInterpretation`](/vision.interpret.html#SegmentationInterpretation) can be used once we train a segmentation model.
### train
```
camvid = untar_data(URLs.CAMVID_TINY)
path_lbl = camvid/'labels'
path_img = camvid/'images'
codes = np.loadtxt(camvid/'codes.txt', dtype=str)
get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
data = (SegmentationItemList.from_folder(path_img)
.split_by_rand_pct()
.label_from_func(get_y_fn, classes=codes)
.transform(get_transforms(), tfm_y=True, size=128)
.databunch(bs=16, path=camvid)
.normalize(imagenet_stats))
data.show_batch(rows=2, figsize=(7,5))
learn = unet_learner(data, models.resnet18)
learn.fit_one_cycle(3,1e-2)
learn.save('mini_train')
jekyll_warn("Following results will not make much sense with this underperforming model but functionality will be explained with ease")
```
### interpret
```
interp = SegmentationInterpretation.from_learner(learn)
```
Since `FlattenedLoss of CrossEntropyLoss()` is used we reshape and then take the mean of pixel losses per image. In order to do so we need to pass `sizes:tuple` to `top_losses()`
```
top_losses, top_idxs = interp.top_losses(sizes=(128,128))
top_losses, top_idxs
plt.hist(to_np(top_losses), bins=20);plt.title("Loss Distribution");
```
Next, we can generate a confusion matrix similar to what we usually have for classification. Two confusion matrices are generated: `mean_cm` which represents the global label performance and `single_img_cm` which represents the same thing but for each individual image in dataset.
Values in the matrix are calculated as:
\begin{align}
\ CM_{ij} & = IOU(Predicted , True | True) \\
\end{align}
Or in plain english: ratio of pixels of predicted label given the true pixels
```
learn.data.classes
mean_cm, single_img_cm = interp._generate_confusion()
mean_cm.shape, single_img_cm.shape
```
`_plot_intersect_cm` first displays a dataframe showing per class score using the IOU definition we made earlier. These are the diagonal values from the confusion matrix which is displayed after.
`NaN` indicate that these labels were not present in our dataset, in this case validation set. As you can imagine it also helps you to maybe construct a better representing validation set.
```
df = interp._plot_intersect_cm(mean_cm, "Mean of Ratio of Intersection given True Label")
```
Next let's look at the single worst prediction in our dataset. It looks like this dummy model just predicts everything as `Road` :)
```
i = top_idxs[0]
df = interp._plot_intersect_cm(single_img_cm[i], f"Ratio of Intersection given True Label, Image:{i}")
```
Finally we will visually inspect this single prediction
```
interp.show_xyz(i, sz=15)
jekyll_warn("""With matplotlib colormaps the max number of unique qualitative colors is 20.
So if len(classes) > 20 then close class indexes may be plotted with the same color.
Let's fix this together :)""")
interp.c2i
show_doc(ObjectDetectionInterpretation)
jekyll_warn("ObjectDetectionInterpretation is not implemented yet. Feel free to implement it :)")
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
| github_jupyter |
# Graphical User Interface programming in Python
Goal: Writing a simple Graphical User Interface (GUI) with PyQt based on available widgets.
## Exercice
The exercice for this training is to create a GUI for calculating the diffraction image obtained from a 2D cristal composed on a square of NxN atoms using the Laue formula.
```python
def laue_image(ncells, h, k, oversampling):
[...]
```
The `laue_image` function calculates the diffraction image around the Bragg peak (`H`, `K`), actually (H-0.5…H+0.5, K-0.5…K+0.5) of a `ncells`x`ncells` 2D square cristal considering an `oversampling` factor.
This oversampling factor should be at least 2 to have 2 points per peak.
The Python/numpy implementation is available here: [laue.py](laue.py)
```
%matplotlib inline
from matplotlib import pyplot
from matplotlib.colors import LogNorm
import laue
result = laue.laue_image(ncells=10, h=0, k=4, oversampling=2)
pyplot.imshow(result, norm=LogNorm())
```
### Goal of the exercice
Write a GUI similar to this sketch to execute the Laue function and save its result :

# Qt and PyQt overview
### Qt
[Qt](https://doc.qt.io/qt-5/index.html) is a free and open-source widget toolkit for creating graphical user interfaces.
As a victim of its success, it is also used for developing cross-platform applications.
Written in c++, we are now at the 5th version, waiting for the 6th one. The first version has been released in 1995!!!
The Qt company is employing ~300 people

Qt is divided into several basic modules. The main modules for GUI are:
* [Qt Core](https://doc.qt.io/qt-5/qtcore-index.html): Provides core non-GUI functionality, like signal and slots, properties, base classes of item models, serialization, etc.
* [Qt Gui](https://doc.qt.io/qt-5/qtgui-index.html): Extends QtCore with GUI functionality: Events, windows and screens, OpenGL and raster-based 2D painting, images.
* [Qt Widgets](https://doc.qt.io/qt-5/qtwidgets-index.html): Provides ready to use Widgets for your application, including also graphical elements for your UI.
Besides those modules that you will use today, Qt is offering modules for web, sql, multimedia...
### PyQt
Due to it sucess and python sucess, bindings have been developed for Qt: PyQt5 for Qt5, PyQt4 for Qt4.
This permits users to access the power of Qt with the python 'abstraction'.
**PyQt4 and Qt v4 are no longer supported**
* [pyqt5 on pypi](https://pypi.org/project/PyQt5/)
* [pyqt5 official site](https://www.riverbankcomputing.com/software/pyqt/)
```
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5 import Qt
```
#### Documentation
Due to the documentation quality of PyQt5 and Qt, we are usually refering to the [Qt documentation](https://doc.qt.io/qt-5/).
Do not worry, most of the API is the same.
#### Coding style
For Qt related classes you should use the [Qt coding style](https://wiki.qt.io/Qt_Coding_Style)
* Variables and functions start with a lower-case letter. Each consecutive word in a variable's name starts with an upper-case letter
* Classes always start with an upper-case letter.
* Acronyms are camel-cased (e.g. QXmlStreamReader, not QXMLStreamReader).
``` python
class MyWidget(Qt.QWidget):
def myFunction(self, myInput):
self.myInput = myInput
```
### PySide2
Nowadays, the Qt company has also developed its own python binding: [PySide2](https://wiki.qt.io/Qt_for_Python).
If you want to have some comparaison between the two, you can read https://machinekoder.com/pyqt-vs-qt-for-python-pyside2-pyside/
*Usually people are using wrappers like [QtPy](https://pypi.org/project/QtPy/) for using either PySide2 or Pyqt5.*
Information on non commercial licenses:
* PySide2 is [LGPL](https://www.gnu.org/licenses/lgpl-3.0.en.html)
* PyQt5 is [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html) (more restrictive than LGPL, basically code using it should be licensed under a compatible license)
# Introduction to GUI programming
A GUI is an **interface** between users and a computer system/program.
It provides an **interaction** between both (mutual action/reaction relation).
The computer system/program reacts upon user actions and provides feedbacks...
## Human factors
For this interaction to be efficent, human factors and ergonomics is key.
This is not covered in this training. We will only introduce how to program a GUI application.
Yet, you can ask yourself a few questions:
- Who are the expected users? E.g., beginners or experts, frequency of usage.
- Does the GUI provides hints on how to proceed?
E.g., do not expect users to find a function that is only available as a keyboard shortcut.
## Event-driven programming
The GUI is waiting for user inputs to react upon (or for new information from the processing to be available).
### Polling versus events
Instead of checking if something happened on a regular basis ("polling"), a GUI program is waiting for notifications of user input or processing information.
This is based on the callback mechanism: "Don't call us, we'll call you back" (Hollywood principle).
This is the usual paradigm of GUI libraries.
### Event loop
The application execution is splitted in 2 stages:
- Initialization: Prepare the application, register callbacks to get notified.
- Execution of an event loop: Wait for events and process them in a loop.
As the chronological order of events matters, the processing of events runs in a single thread.
As a consequence:
- GUI libraries are single-threaded (unless exceptions).
- Event handling must not block.
The event loop is handled by the GUI libraries.
### Event dispatcher
GUI libraries provides an event dispatching mechanism to hide the event loop to the developer.
A GUI is composed of reusable building bricks ("widgets") which are objects.
The GUI library is in charge of dispatching the events to the right widget or to the right event handler.
This turns the problem of handling events from a global one (a single loop for the whole application) to a local self-contained one (at the level of a single widget).
### QApplication
The [QApplication](https://doc.qt.io/qt-5/qapplication.html#details) manages GUI control flow and relationship with the OS:
* Runs the event loop and dispatcher
* Handles the relation with the OS and window system: keyboard and mouse events, settings (look&feel, string localization)
* Manages Qt objects
**Note: There is only one QApplication per application.**
Doc: https://doc.qt.io/qt-5/qapplication.html#details
```
from PyQt5 import Qt
# Definitions: functions, classes
app = Qt.QApplication([])
# Initialization
app.exec_() # Event loop execution
# This blocks until the application quit
# with a call to Qt.QApplication.instance().quit()
# or all window to be closed
```
#### Qt-IPython integration
There is an IPython magic command to create the QApplication and execute it from a notebook
```
%gui qt
```
## A bit of software design
**Rule**: The processing code is strictly separated from the GUI and does not depend on it.
- Different concerns, different knowledge
- Testing
- Avoid circular dependencies
- GUI usually evolves faster than the processing part
- The processing code can be reused in a different context (batch processing, script, web...)
How?:
- Split the code in e.g., 2 files: `myprocessing.py` and `gui.py`
- `gui.py` has an `import myprocessing` statement
- `myprocessing.py` does **NOT** have an `import gui` statement
- Communication:
- GUI -> Processing: function calls
- Processing -> GUI: callback mechanism
# Qt mechanism and classes
### QObject
Main Qt classes inherit from the QObject class.
* **The instanciation of any QObject requires the creation of a QApplication**
* The QObject class allows instances to communicate using the **signals and slots** communication.
-> This mecanism is clearly event-driven oriented and is central in Qt.

The creation of a connection between two QObjects is made using **connect**. You can remove the connection using **disconnect**
``` python
subject_object.signal1.connect(observer_object.slot)
...
subject_object.signal1.disconnect(observer_object.slot)
```
#### signal / slot example
To understand the interest of the signal / slot communication we can see implementation of the [observer](https://en.wikipedia.org/wiki/Observer_pattern) pattern with QObjects.
The idea is that one object 'Subject' is notifying a list of objects 'Observer' about his state.

The 'pyqt' implementation looks like:
```
class Subject(Qt.QObject):
"""Simple QObject with a state"""
sigStateChanged = Qt.pyqtSignal(str)
def setState(self, state):
print('subject state changed to', state)
self.sigStateChanged.emit(state)
class Observer(Qt.QObject):
"""Simple QObject, observing the state of a Subject"""
def __init__(self, name):
Qt.QObject.__init__(self)
self.name = name
def subjectObserveChangedHandler(self, state):
print('Observer ', self.name,
' has been informed that subject has now state', state)
subject = Subject()
observer0 = Observer(name='observer0')
# connect subject signal with observer slots
subject.sigStateChanged.connect(observer0.subjectObserveChangedHandler)
# then change the state of the subject
subject.setState('waiting')
subject.setState('working')
subject.sigStateChanged.disconnect(observer0.subjectObserveChangedHandler)
subject.setState('waiting')
```
If you want more details on:
* QObject: https://doc.qt.io/qt-5/qobject.html#details
* PyQt signal/slots: https://www.riverbankcomputing.com/static/Docs/PyQt5/signals_slots.html
* signal/slots: https://doc.qt.io/qt-5/signalsandslots.html
* connection type: https://doc.qt.io/qt-5/qt.html#ConnectionType-enum
# Qt widgets
Qt, among other, offers a large range of base widgets to create your GUI.
#### Hello world
If we want to create a simple label with 'hello word' text:
```
first_widget = Qt.QLabel('hello world')
first_widget.show()
```
#### Basic Qt widgets
The QWidget class provides the basic capability to render to the screen, and to handle user input events.
Widgets are classes inheriting from [QWidget](https://doc.qt.io/qt-5/qwidget.html).
We will present shortly the most used widgets. This part as been taken from the official qt widget gallery: https://doc.qt.io/qt-5/gallery.html
The qt implementation examples (calendar and styles) using PyQt5 are availables from: https://github.com/baoboa/pyqt5/edit/master/examples/widgets

* (1) [QCheckBox](https://doc.qt.io/qt-5/qcheckbox.html) provides a checkbox with a text label.
* (2) [QRadioButton](https://doc.qt.io/qt-5/qradiobutton.html) provides a radio button with a text or pixmap label.
* (3) [QPushButton](https://doc.qt.io/qt-5/qpushbutton.html) provides a command button.
* (4) [QTabWidget](https://doc.qt.io/qt-5/qtabwidget.html) provides a stack of tabbed widgets.
* (5) [QTableWidget](https://doc.qt.io/qt-5/qtablewidget.html) provides a classic item-based table view.
* (6) [QScrollBar](https://doc.qt.io/qt-5/qscrollbar.html) provides a vertical or horizontal scroll bar.
* (7) [QProgressBar](https://doc.qt.io/qt-5/qprogressbar.html) provides a horizontal progress bar.
* (8) [QDateTimeEdit](https://doc.qt.io/qt-5/qdatetimeedit.html) provides a widget for editing dates and times.
* (9) [QSlider](https://doc.qt.io/qt-5/qslider.html) provides a vertical or horizontal slider.
* (10) [QDial](https://doc.qt.io/qt-5/qdial.html) provides a rounded range control (like a speedometer or potentiometer).
* (11) [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) provides a one-line text editor.

* (1) [QGroupBox](https://doc.qt.io/qt-5/qgroupbox.html) provides a group box frame with a title.
* (2) [QCalendarWidget](https://doc.qt.io/qt-5/qcalendarwidget.html) provides a monthly calendar widget that can be used to select dates.
* (3) [QLabel](https://doc.qt.io/qt-5/qlabel.html) provides a text or image display.
* (4) [QDateEdit](https://doc.qt.io/qt-5/qdateedit.html) provides a widget for editing dates.
* (5) [QComboBox](https://doc.qt.io/qt-5/qcombobox.html) provides a combined button and pop-up list.
#### QWidget properties
[QWidgets](https://doc.qt.io/qt-5/qwidget.html) have several properties.
Some interesting one:
* [visible](https://doc.qt.io/qt-5/qwidget.html#visible-prop): hide, show, setVisible, isVisible
* [toolTip](https://doc.qt.io/qt-5/qwidget.html#toolTip-prop): toolTip(), setToolTip(): Tooltip are displayed when the mouse fly over
* [windowTitle](https://doc.qt.io/qt-5/qwidget.html#windowTitle-prop): windowTitle(), setWindowTitle() - for top-level widgets
* [enabled](https://doc.qt.io/qt-5/qwidget.html#enabled-prop): enabled(), setEnabled()
* [size](https://doc.qt.io/qt-5/qwidget.html#size-prop): setFixedSize(), setWidth(), resize()...
### Exercise with QLabel
* create a QLabel and display it
* change the value and print the value using the [QLabel API](https://doc.qt.io/qt-5/qlabel.html)
```
# create a QLabel
mylabel = ...
# set the value of the QLabel
...
# show the QLabel
...
# print the QLabel text
print()
```
### Exercise with QLineEdit (1)
* Create a [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) and display it
* Use a [QIntValidator](https://doc.qt.io/qt-5/qintvalidator.html) to ensure the content of the [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) is a **positive** integer
* (optionnal) Add a toolTip to the QLineEdit
```
# QLineEdit creation
myLineEdit = ...
# defining the validator
...
# adding the validator to the QLineEdit
...
# show the QLineEdit
...
```
### Exercise with QLineEdit (2)
* Define a class `IntLineEdit` which inherits from [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) and creates the [QIntValidator](https://doc.qt.io/qt-5/qintvalidator.html) in the constructor.
* the constructor should take `bottom` and `top` as arguments.
```
class IntLineEdit(Qt.QLineEdit):
def __init__(self, parent=None):
super(IntLineEdit, self).__init__(parent)
widget = IntLineEdit()
widget.show()
```
### Exercise with QPushButton
* create a [QPushButton](https://doc.qt.io/qt-5/qpushbutton.html) with the text 'click me'
* print the text 'someone clicked me' each time the button is pressed
For this exercise you might refer to the:
* [QPushButton documentation](https://doc.qt.io/qt-5/qpushbutton.html)
* [AbstractButton signals documentation](https://doc.qt.io/qt-5/qabstractbutton.html#signals)
```
def print_callback():
print('someone clicked me')
# QPushButton creation
myButton = ...
# connect the button `pressed` signal with the callback
...
# show the button and test it
...
```
#### Dialogs
Qt is also providing a set of dialogs.
Dialogs are made for *short-term tasks and brief communications with the user*
**Dialogs can be modal (blocking) or not.**
-> If we want a modal dialog then we will call `exec_()` and wait for response. If not we will use `show()`
##### QMessageBox
To ask user question or give information you can use a [QMessageBox](https://doc.qt.io/qt-5/qmessagebox.html)
```
msg = Qt.QMessageBox()
msg.setIcon(Qt.QMessageBox.Warning)
msg.setText("This is a warning message")
msg.setInformativeText("this message concern QMessageBox")
msg.setWindowTitle("MessageBox warning")
msg.setDetailedText("Details of the message")
msg.exec_()
```
We can also obtain `QMessageBox` instances from the [QMessageBox](https://doc.qt.io/qt-5/qmessagebox.html) *static helper functions* like 'warning' or 'information':
```
button_id = Qt.QMessageBox.warning(None,
"MessageBox warning",
"This is a warning message")
button_id
```
This return the ID of the [button pressed](https://doc.qt.io/qt-5/qmessagebox.html#StandardButton-enum)
##### QFileDialog
The [QFileDialog](https://doc.qt.io/qt-5/qfiledialog.html) can be used to get file(s) and or folder(s) path, existing or not.
```
dialog = Qt.QFileDialog()
dialog.setAcceptMode(Qt.QFileDialog.AcceptOpen)
dialog.setFileMode(Qt.QFileDialog.FileMode.ExistingFiles)
dialog.setNameFilters(["py (*.py)", "ipynb (*.ipynb)", "txt (*.txt)"])
if dialog.exec_():
print('selected files are', dialog.selectedFiles())
else:
print('user cancel file selection')
```
Here again, you can get some default QFileDialog from static functions:
```
filenames, filter_ = Qt.QFileDialog.getOpenFileNames(None, 'files to open')
print('File names:', filenames)
```
#### Other dialogs
There is other dialogs available in Qt:
* [QColorDialog](https://doc.qt.io/qt-5/qcolordialog.html): dialog widget for specifying colors.
* [QErrorMessage](https://doc.qt.io/qt-5/qerrormessage.html): error message display dialog.
* [QFontDialog](https://doc.qt.io/qt-5/qfontdialog.html): dialog widget for selecting a font.
* [QInputDialog](https://doc.qt.io/qt-5/qinputdialog.html): dialog to get a single value from the user.
* [QProgressDialog](https://doc.qt.io/qt-5/qprogressdialog.html): progress of an operation.
* [QWizard](https://doc.qt.io/qt-5/qwizard.html): framework for [wizards] (several pages dialogs, see https://doc.qt.io/qt-5/qtwidgets-dialogs-trivialwizard-example.html).
#### Advanced widgets

There is even more possibilities, like defining trees ([QTreeView](https://doc.qt.io/qt-5/qtreeview.html)), list ([QListWidget](https://doc.qt.io/qt-5/qlistwidget.html))...
But this part will not be covered today.
# A word on events
As said in the introduction, Qt dispatches events such as keyboard, mouse and windowing system events to the right QWidget.
QWidgets are notified of such events by calls to their event handling methods, which names end with `Event`:
- [mousePressEvent](https://doc.qt.io/qt-5/qwidget.html#mousePressEvent)
- [keyPressEvent](https://doc.qt.io/qt-5/qwidget.html#keyPressEvent)
- [resizeEvent](https://doc.qt.io/qt-5/qwidget.html#resizeEvent)
- And more, see [QWidget events doc](https://doc.qt.io/qt-5/qwidget.html#events)
```
import random
class Button(Qt.QPushButton):
def __init__(self, parent=None):
super(Button, self).__init__("Catch me if you can", parent)
self.clicked.connect(self._clicked)
def _clicked(self):
print("You are smart or lucky!")
def enterEvent(self, event):
app = Qt.QApplication.instance()
desktop = app.desktop() # QDesktopWidget
rect = desktop.availableGeometry(self)
x = random.randint(rect.x(), rect.width() - self.width())
y = random.randint(rect.y(), rect.height() - self.height())
self.move(x, y)
button = Button()
button.show()
```
# Assembling widgets
How to go from single widgets to a consistent graphical user interface.
Objective: Describe and manage the geometric imbrication of widgets:
- Container widgets
- Layout
## Container widgets
- [QMainWindow](https://doc.qt.io/qt-5/qmainwindow.html): Main application window
- [QSplitter](https://doc.qt.io/qt-5/qsplitter.html): Lets the user control the size of child widgets
- [QStackedWidget](https://doc.qt.io/qt-5/qstackedwidget.html): Stack of widgets where only one widget is visible at a time
- [QTabWidget](https://doc.qt.io/qt-5/qtabwidget.html): Stack of tabbed widgets
- [QGroupBox](https://doc.qt.io/qt-5/qgroupbox.html): Box frame with a title.
### QMainWindow: Main application window

```
class MainWindow(Qt.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# Set central widget
label = Qt.QTextEdit("Type text here")
self.setCentralWidget(label)
# Add menu
menuBar = self.menuBar()
fileMenu = menuBar.addMenu("File")
editMenu = menuBar.addMenu("Edit")
# Add a toolbar
toolBar = self.addToolBar("Toolbar")
toolBar.addAction(Qt.QIcon.fromTheme("edit-undo"), "Undo")
toolBar.addAction(Qt.QIcon.fromTheme("edit-redo"), "Redo")
# Add a dock widget
dock = Qt.QDockWidget("Dock")
self.addDockWidget(Qt.Qt.RightDockWidgetArea, dock)
# Use status bar
self.statusBar().showMessage("Current status")
window = MainWindow()
window.show()
```
QMainWindow Layout:

Set the central widget: [QMainWindow.setCentralWidget](https://doc.qt.io/qt-5/qmainwindow.html#setCentralWidget)
Documentation: https://doc.qt.io/qt-5/qmainwindow.html
## QWidget's layout
[QWidget](https://doc.qt.io/qt-5/qwidget.html) base class can be used as a container with configurable layout (through the [QWidget.setLayout](https://doc.qt.io/qt-5/qwidget.html#setLayout) method).
The layout is responsible for automatically allocating space for widgets.
Layouts are classes that inherit from [QLayout](https://doc.qt.io/qt-5/qlayout.html).
This defines a spatial tree structure of widgets.
We now have 2 different hierarchical structures:
- The class inheritance
- The widget spatial imbrication.
### Basic layouts: Horizontal and vertical
- [QHBoxLayout](https://doc.qt.io/qt-5/qhboxlayout.html): Lines up widgets horizontally

- [QVBoxLayout](https://doc.qt.io/qt-5/qvboxlayout.html): Lines up widgets vertically

- [QBoxLayout](https://doc.qt.io/qt-5/qboxlayout.html): Lines up child widgets horizontally or vertically.
The [QBoxLayout.setDirection](https://doc.qt.io/qt-5/qboxlayout.html#setDirection) method allows to change it.
To populate the layout, use:
- [QBoxLayout.addWidget](https://doc.qt.io/qt-5/qboxlayout.html#addWidget)(widget, stretch=0): Appends a widget to the end of the layout
- [QBoxLayout.addStrech](https://doc.qt.io/qt-5/qboxlayout.html#addStretch)(strech=0): Adds a stretchable empty space
#### QBoxLayout example
```
%gui qt
from PyQt5 import Qt
class BoxWidget(Qt.QWidget): # Container widget
def __init__(self, parent=None):
super(BoxWidget, self).__init__(parent)
# Child widgets
label = Qt.QLabel("Value:")
lineEdit = Qt.QLineEdit("0")
button = Qt.QPushButton("Done")
# Create layout and add widgets to it
layout = Qt.QHBoxLayout()
# or: layout = Qt.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(lineEdit)
layout.addWidget(button)
# layout.addStretch(1) # Add a stretch area to improve resizing
# set the widget's layout
self.setLayout(layout)
widget = BoxWidget()
widget.show()
```
### Advanced layouts : Grid and stack
- [QGridLayout](https://doc.qt.io/qt-5/qgridlayout.html): lays out widgets in a grid
To populate the layout, use the [`addWidget(widget, row, column, rowSpan, columnSpan)`](https://doc.qt.io/qt-5/qgridlayout.html#addWidget) method.

- [QFormLayout](https://doc.qt.io/qt-5/qformlayout.html): lays out widgets in a 2 columns grid
To populate the layout, use the [`addRow(label, widget)`](https://doc.qt.io/qt-5/qformlayout.html#addRow) or [`addRow(widget)`](https://doc.qt.io/qt-5/qformlayout.html#addRow-4) method.

- [QStackedLayout](https://doc.qt.io/qt-5/qstackedlayout.html): stack of widgets where only one widget is visible at a time
#### QGridLayout example
```
class GridWidget(Qt.QWidget): # Container widget
def __init__(self, parent=None):
super(GridWidget, self).__init__(parent)
# Child widgets
label = Qt.QLabel("Range:")
beginLineEdit = Qt.QLineEdit("0")
endLineEdit = Qt.QLineEdit("1")
button = Qt.QPushButton("Done")
# Create layout and add widgets to it
layout = Qt.QGridLayout()
layout.addWidget(label, 0, 0)
layout.addWidget(beginLineEdit, 0, 1)
layout.addWidget(endLineEdit, 0, 2)
layout.addWidget(button, 1, 0, 1, 3)
# set the widget's layout
self.setLayout(layout)
widget = GridWidget()
widget.show()
```
#### QFormLayout example
```
class FormWidget(Qt.QWidget):
def __init__(self, parent=None):
super(FormWidget, self).__init__(parent)
beginLineEdit = Qt.QLineEdit("0")
endLineEdit = Qt.QLineEdit("1")
button = Qt.QPushButton("Done")
# Create layout and add widgets to it
layout = Qt.QFormLayout(parent=self) # Give the parent is same as setLayout
layout.addRow("Min:", beginLineEdit)
layout.addRow("Max:", endLineEdit)
layout.addRow(button)
widget = FormWidget()
widget.show()
```
### Nested layout
For complex widget imbrication, you can use a hierarchy of widgets, each having a different layout.
It is also possible to nest layouts within each other (e.g., with [QBoxLayout.addLayout](https://doc.qt.io/qt-5/qboxlayout.html#addLayout)).
```
class NestedBoxLayoutWidget(Qt.QWidget):
def __init__(self, parent=None):
super(NestedBoxLayoutWidget, self).__init__(parent)
label = Qt.QLabel("Range:")
beginLineEdit = Qt.QLineEdit("0")
endLineEdit = Qt.QLineEdit("1")
button = Qt.QPushButton("Done")
# Create layout and add widgets to it
layout = Qt.QVBoxLayout(self)
layout.addWidget(label)
# Adding a nested horizontal layout
horizontalLayout = Qt.QHBoxLayout()
layout.addLayout(horizontalLayout)
horizontalLayout.addWidget(beginLineEdit)
horizontalLayout.addWidget(endLineEdit)
layout.addWidget(button)
layout.addStretch(1)
widget = NestedBoxLayoutWidget()
widget.show()
```
### Layout processus
Each widget advertises:
- some spatial request: [sizeHint](https://doc.qt.io/qt-5/qwidget.html#sizeHint-prop), [minimumSizeHint](https://doc.qt.io/qt-5/qwidget.html#minimumSizeHint-prop), [minimumSize](https://doc.qt.io/qt-5/qwidget.html#minimumSize-prop),
[maximumSize](https://doc.qt.io/qt-5/qwidget.html#maximumSize-prop)
- how it accepts to be resized: [sizePolicy](https://doc.qt.io/qt-5/qwidget.html#sizePolicy-prop)
The layout manager takes this information into account to allocate a rectangle to each child widget.
Documentation on layout: https://doc.qt.io/qt-5/layout.html
## Exercice
### Layout
Write a form widget which allows the user to provide parameters for the calculation of the diffraction image obtained from a 2D square cristal using the Laue formula.
The Python/numpy implementation is available here: [laue.py](laue.py)
Sketch of the GUI:

Hint: You can create such a form with a [QWidget](https://doc.qt.io/qt-5/qwidget.html) with a [QFormLayout](https://doc.qt.io/qt-5/qformlayout.html), [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) or your own `IntLineEdit` widgets (and eventually [QLabel](https://doc.qt.io/qt-5/qlabel.html)) and a [QPushButton](https://doc.qt.io/qt-5/qpushbutton.html).
### Retrieve parameters
Add a `compute` method that retrieves parameters from the different widgets and prints the result of the `laue_image` function.
### Connect button signal
Connect the `compute` method to the [`clicked`](https://doc.qt.io/qt-5/qabstractbutton.html#clicked) [QPushButton](https://doc.qt.io/qt-5/qpushbutton.html) signal (see [QAbstractButton signals](https://doc.qt.io/qt-5/qabstractbutton.html#signals)) to run the computation when the user clicks on the button.
### Add a file dialog
Replace the printing of the result by saving to a file:
- Ask for a filename with [QFileDialog.getSaveFileName](https://doc.qt.io/qt-5/qfiledialog.html#getSaveFileName) static methods.
- Use [numpy.save](https://docs.scipy.org/doc/numpy/reference/generated/numpy.save.html) to save the result to a file.
[Solution](solution/app_mini.py)
# PyQt and Qt object life-cycle
### QWidget parent
When a QWidget **A<span>** is added to another QWidget **B<span>** or to its layout, **B<span>** becomes automatically the parent of **A<span>**.
To get the current QWidget parent, use the [QObject.parent](https://doc.qt.io/qt-5/qobject.html#parent) method.
```
lineEdit = Qt.QLineEdit("Input")
print("lineEdit's parent:", lineEdit.parent())
# Add the lineEdit to a QMainWindow
window = Qt.QMainWindow()
window.setCentralWidget(lineEdit)
window.show()
print("lineEdit's parent:", lineEdit.parent())
lineEdit.parent() is window
```
### (Py)Qt object instance destruction
With Qt, the object parenting set by the layout also handles the life-cycle (i.e., automatic destruction) of widget instances.
When a widget is destroyed, all its children are also destroyed...
With PyQt, this raises an issue as Python is also handling the life-cycle of instances (with reference counting).
```
lineEdit.text()
del window
print("lineEdit:", lineEdit)
lineEdit.text()
```
### (Py)Qt object instance destruction
A QWidget is a C++ Qt object.
To be accessible from Python, it is wrapped in a Python object by PyQt.
- The destruction of the Python PyQt object instance is handled by Python.
- The destruction of the C++ Qt object instance is handled by:
- **Python** if its `parent` is `None`.
- **Qt** if its `parent` is **not** `None`.
### QApplication/QWidget end of life
By default, the QApplication quits when all top windows/widgets are closed (see [QWidget.close](https://doc.qt.io/qt-5/qwidget.html#close)).
A QWidget can be destroyed when it is closed with:
[QWidget.setAttribute](https://doc.qt.io/qt-5/qwidget.html#setAttribute)([Qt.Qt.WA_DeleteOnClose](https://doc.qt.io/qt-5/qt.html#WidgetAttribute-enum)).
By default it is **NOT** destroyed when closed.
```
label = Qt.QLabel("Close me and I am destroyed")
label.setAttribute(Qt.Qt.WA_DeleteOnClose)
label.show()
label.text()
```
## Unhandled exceptions
With PyQt5 >= 5.5, an unhandled exception in Python terminates the application (see [doc](https://www.riverbankcomputing.com/static/Docs/PyQt5/incompatibilities.html#unhandled-python-exceptions)).
It is possible to override this behavior by setting Python's [sys.excepthook](https://docs.python.org/3/library/sys.html#sys.excepthook).
```
import sys, traceback
from PyQt5 import Qt
def excepthook(type_, value, tb):
message = '%s, %s, %s' % (type_, value, ''.join(traceback.format_tb(tb)))
print('message')
Qt.QMessageBox.critical(None, "Exception raised", message)
def clicked():
raise RuntimeError("Button clicked")
app = Qt.QApplication([])
sys.excepthook = excepthook
button = Qt.QPushButton('Press here')
button.clicked.connect(clicked)
button.show()
app.exec_()
```
## More exercices on the Laue example
Make one or several of the following add-on (not necessarily in order).
#### Add tooltips
Add tooltips (i.e., help messages displayed when the mouse stays still over a widget) on the different widgets to give extra information.
Hint: Use [QWidget.setToolTip](https://doc.qt.io/qt-5/qwidget.html#toolTip-prop) for widgets used in your application.
[Solution](solution/app_mini_tooltips.py)
#### Constrain user input
The expected inputs are:
* Number of cells: integer >= 2
* Oversampling: integer >= 2
* H: floating point value
* K: floating point value
If you didn't use the `IntLineEdit` in your code, for now you can give any (possibly invalid) input value in the [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html)s.
##### 1. Use QLineEdit with a [Qvalidator](https://doc.qt.io/qt-5/qvalidator.html)
Add [QIntValidator](https://doc.qt.io/qt-5/qintvalidator.html) and [QDoubleValidator](https://doc.qt.io/qt-5/qdoublevalidator.html) to restrict the possible entries of the different [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html).
Note: You can reuse the `IntLineEdit` widget from the QWidget exercice which deals with the [QIntValidator](https://doc.qt.io/qt-5/qintvalidator.html) to avoid code duplication.
[Solution](solution/app_mini_validator.py)
#### 2. Alternative: Replace QLineEdit widgets with QSpinBox
Replace [QLineEdit](https://doc.qt.io/qt-5/qlineedit.html) in the form with [QSpinBox](https://doc.qt.io/qt-5/qspinbox.html) and [QDoubleSpinBox](https://doc.qt.io/qt-5/qdoublespinbox.html) to make sure inputs are integers >=2 and floats.
[Solution](solution/app_mini_spinbox.py)
#### Display the output size
Add a [QLabel](https://doc.qt.io/qt-5/qlabel.html) displaying the expected output size (which depends on the number of unit cells or oversampling values) before the user press on the `Run` button.
Update the displayed value whenever the user changes the inputs.
In [laue.py](laue.py), this function returns the size of the output array of `laue_image` in each dimension:
```python
def laue_array_size(ncells, oversampling):
return ncells * oversampling
```
Hint: Connect to the appropriate [QLineEdit signal](https://doc.qt.io/qt-5/qlineedit.html#signals) to trigger the update of the displayed value.
[Solution](solution/app_mini_output_size.py)
#### Add a result preview
Add a widget to preview the result.
The [imageplot.py](imageplot.py) module provides a minimalistic `ImagePlot` widget that displays a 2D array (`ImagePlot.setData`) as an image with a gray colormap and a log scale:
```python
from imageplot import ImagePlot
...
plot = ImagePlot()
plot.setData(data)
...
```
[Solution](solution/app_mini_preview.py)
#### Split "Run and Save" button
Split the `Run and Save` button into 2 buttons: `Run` and `Save`.
The `Save` button should only be enabled once `Run` has been pressed once and some result are ready to be saved.
Hint: Use [QWidget.setEnabled](https://doc.qt.io/qt-5/qwidget.html#enabled-prop) and set it from the QPushButton's [clicked](https://doc.qt.io/qt-5/qabstractbutton.html#clicked) signal.
[Solution](solution/app_mini_run_and_save.py)
#### Using a QMainWindow and status bar
##### 1. Use QMainWindow
Embed the Laue form widget in a [QMainWindow](https://doc.qt.io/qt-5/qmainwindow.html). Use [QMainWindow.setCentralWidget](https://doc.qt.io/qt-5/qmainwindow.html#setCentralWidget).
##### 2. Add information in status bar
Add a message when the processing is done in QMainWindow's [statusBar](https://doc.qt.io/qt-5/qmainwindow.html#statusBar).
Hint: Use [QMainWindow.statusBar](https://doc.qt.io/qt-5/qmainwindow.html#statusBar) and[QStatusBar.showMessage](https://doc.qt.io/qt-5/qstatusbar.html#showMessage).
[Solution](solution/app_window.py)
#### Execute 'laue' process in a Thread
In order to avoid the GUI to freeze while the processing is running, the possibly long computation needs to run in a dedicated thread.
Instead of starting the computation when the `Run` button is pressed, start the processing in a [threading.Thread](https://docs.python.org/3/library/threading.html#thread-objects).
The issue is now to notify the GUI that the processing is completed.
You can pass a [Qt.pyqtSignal](https://doc.qt.io/qt-5/signalsandslots.html) `emit` method as a callback to this thread that he will called once the processing is done.
Of course this signal should be connected to a slot function of your GUI.
[Solution](solution/app_mini_thread.py)
Example of complete application:
- [As a standalone script](solution/app_full.py)
- [As a script](solution/app_ui.py) using an [.ui file](solution/laue_app.ui) (see QtDesigner below)
# QtDesigner
[QtCreator](https://doc.qt.io/qtcreator/) is an integrated development environment (IDE) for creating cross platform applications using Qt.
[QtDesigner](https://doc.qt.io/qt-5/qtdesigner-manual.html) is a one of the tool embed in QtCreator for building GUIs.
From this tool you can compose QMainWindows, QWidgets or QDialogs for example and embed them in Python script.

## Demonstration: doing the Laue exercice with QtCreator
launch *qtcreator* application
``` bash
qtcreator
```

Then go to: new project, application => QtWidgetsApplication
Give a path and a name to your project.

We are only focusing on the `.ui` file, since you are not supposed to be C++ developer and we want a Python application.
Signals / slot connections will be managed in the Python script, not in c++.
Now you can add your widget and subwidgets. **Name each widgets** for recovering them later.
Once created, select your widgets and add a layout.
We will add signals, slots and connections in the python file.

Now save your project, you will be able to modify it afterwards.
### Link .ui file with python script
On the web you will find several tutorial to convert directly the `.ui` files to `.py` files like
``` bash
python -m PyQt5.uic.pyuic -x [FILENAME].ui -o [FILENAME].py
```
In this case, **beware not to modify the generated code**: See [How-to use the generated code](https://www.riverbankcomputing.com/static/Docs/PyQt5/designer.html#using-the-generated-code)
**It is best if you use the [uic](https://www.riverbankcomputing.com/static/Docs/PyQt5/designer.html#the-uic-module)** module from PyQt5.
This way you won't need to convert it each time you modify the `.ui` and you can embed the widget connections in the same Python file.
```
from PyQt5 import uic
help(uic.loadUi)
```
### Loading .ui
Example: [laue_widget.ui](examples/laue_widget.ui)
```
from PyQt5 import Qt, uic
class LaueMainWindow(Qt.QMainWindow):
def __init__(self, *args, **kwargs):
super(LaueMainWindow, self).__init__(*args, **kwargs)
uic.loadUi(uifile="examples/laue_widget.ui", baseinstance=self)
window = LaueMainWindow()
window.setAttribute(Qt.Qt.WA_DeleteOnClose)
window.show()
print("ncells:", window._nCellsSpinBox.text(), "oversampling:", window._oversamplingSpinBox.value(),
"H:", window._hLineEdit.text(), "K:", window._kLineEdit.text())
```
### Using inherited widgets in Qt designer
It is possible to use widgets that are not part of Qt (like `IntLineEdit`) in the designer, through **widget promotion**:
- In the designer, use the base Qt widget as a placeholder
- Configure this base widget to be replaced by the target one: widget context menu->**Promote to...**
- In the dialog, give the target widget name and the Python module it belongs to as the header file.
# Conclusion
(Py)Qt as most GUI library is:
- single-threaded
- event-driven
- object-oriented
It provides reusable building bricks (widgets and layout) to compose GUI.
Its functionalities and the level of control is huge, and so is its API.
The designer is an efficient way to build (and maintain) GUIs.
You can find more exercices here: https://pythonspot.com/gui/
## Final remark
Compare the code you've written during the exercice with the following cell which provides the same functionality:
```
import numpy
import laue
numpy.save("result.npy", laue.laue_image(ncells=10, h=0, k=4, oversampling=2))
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("..")
from optimus import Optimus
op = Optimus("dask")
df.cols.max()
# df = op.create.dataframe({"name": ["A1", "B2"]*20})
df = op.load.csv("store.csv", dtype="str")
df.cols.len("*").cols.max()
df.cols.min(compute=False)
df.cols.min(compute=True)
df
df.cols.hist("id")
%%time
df.cols.hist("id")
df.meta
df.cols.frequency("*", n=10, count_uniques=True, compute=True)
df.cols.names()
df.rows.count()
df.cols.hist("id")
df.cols.hist("price")
%%time
df.cols.hist()
# import sys,os,os.path
# os.environ
def xray(df, col_name):
for i in df[col_name]:
print(i, type(i))
op = Optimus("dask", threads_per_worker=8, n_workers=1, comm=True)
preview_df = op.load.file("http://159.65.217.17:5003/uploads/datasetFile-1591123129359.csv", n_rows=35).ext.cache()
import fastnumbers
fastnumbers.fast_float(None)
f = df.cols.set(value='df["ticket_price"]*df["discount"]', where='df["ticket_price"]!=None', output_cols="ticket_price").ext.cache()
_output = df.ext.profile(columns="*", output="json")
_output = {**preview_df.ext.to_json("*"), "meta": preview_df.meta.get() if (preview_df.meta and preview_df.meta.get) else {} }
_output = preview_df.ext.profile(columns="*", output="json")
df = op.load.file("http://159.65.217.17:5003/uploads/datasetFile-1591123129359.csv").ext.cache()
df = df.ext.repartition(8).ext.cache()
_output = df.ext.profile(columns="*", output="json")
_output = df.ext.set_buffer("*")
_output = df.ext.buffer_window("*", 0, 17).ext.to_json("*")
print(_output)
_output = df.ext.set_buffer("*")
_output = df.ext.buffer_window("*", 0, 17).cols.set(value='df["ticket_price"]', where='df["ticket_price"]!=None', output_cols="new ticket_price").ext.to_json("*")
_df_profile = df.ext.buffer_window("*").cols.set(value='df["ticket_price"]', where='df["ticket_price"]!=None', output_cols="new ticket_price")
_output = { "profile": _df_profile.ext.profile(["new ticket_price"], output="json")}
df.compute().ext.head()
_output = df.ext.buffer_window("*", 0, 17).cols.set(value='df["ticket_price"]*df["discount"]', where='df["ticket_price"]!=None', output_cols="new ticket_price").ext.to_json("*")
df.ext.buffer_window("*", 0, 17).cols.set(value='df["ticket_price"]*df["ticket_price"]',output_cols="new 1")
from optimus import Optimus
op = Optimus("dask", n_workers=1, threads_per_worker=8, processes=False, memory_limit="3G", comm=True)
%%time
df = op.load.file("data/dataset-transactions.csv").ext.cache()
# df = df.ext.optimize()
df = df.ext.repartition(1).ext.cache()
df.ext.display(20)
# cols_and_inferred_dtype = df.cols.infer_profiler_dtypes("*")
# df.cols.cast_to_profiler_dtypes(columns=cols_and_inferred_dtype).persist()
# # result = df.ext.profile(columns=columns, bins=bins, output=output, flush=flush, size=size)
# print(cols_and_inferred_dtype)
# df.cols.infer_profiler_dtypes("*")
p = df.ext.profile("*", flush=True)
df.meta.get()["profile"]["columns"]["customer_id"]
# df, p = df.ext.cast_and_profile("*")
assert(p["columns"]["ticket_price"]["stats"]["match"]==17)
assert(p["columns"]["ticket_price"]["stats"]["mismatch"]==0)
assert(p["columns"]["ticket_price"]["stats"]["missing"]==0)
df.cols.infer_profiler_dtypes("*")
df = df.cols.replace("discount", search=["%"], replace_by="", search_by="chars", ignore_case=True)
df.compute()
# df.cols.infer_profiler_dtypes("*")
from optimus.infer import Infer
df.ext.head("*",50).applymap(Infer.parse_pandas)
xray(df, "discount")
p = df.ext.profile("discount")
df.cols.infer_profiler_dtypes("*")
# df["transactoin_date"].compute()
df.compute()
xray(df, "discount")
df = df.cols.unnest("transactoin_date", separator="/", splits=2, output_cols="transactoin_date").ext.cache()
_output = df.ext.profile(columns="*", output="json", flush=True)
df.meta.get()["profile"]["columns"]["customer_id"]
df.cols.infer_profiler_dtypes("*")
df.compute()
df.meta.get()["profile"]["columns"]["customer_id"]
df1 = df.cols.set(value='df["ticket_price"]*df["discount"]*10', output_cols="new ticket_price")
df.cols.names()
df1.compute()
df.cols.set(value='mask["ticket_price"]+mask["transactoin_date"]', where='df["ticket_price"]!=None', output_cols="new ticket_price_2").compute()
df = op.load.file("http://159.65.217.17:5003/uploads/datasetFile-1590599684769.csv").ext.cache()
df = df.ext.repartition(8).ext.cache()
_output = df.ext.cast_and_profile(columns="*", output="json")
unmasked = "DD/MM/YY"
masked = unmasked[1:4]
masked
print(_output)
# cols_and_inferred_dtype = df.cols.infer_profiler_dtypes("*")
# print(cols_and_inferred_dtype)
# df = df.cols.cast_to_profiler_dtypes(columns=cols_and_inferred_dtype).persist()
df.compute()
%%time
df.cols.cast(columns = {"INCIDENT_NUMBER":"int"}).compute()
%%time
df.ext.cast_and_profile(columns="*", flush=True, output="json")
df.cols.names()
df.OCCURRED_ON_DATE.astype()
%%time
import pandas as pd
from fastnumbers import fast_int
import pendulum
def func(value):
return pendulum.parse(value)
df.OCCURRED_ON_DATE.apply(func, meta="object").compute()
df.compute()
a = df["INCIDENT_NUMBER"].compute()
b = df["OFFENSE_CODE"].compute()
from dask import dataframe as dd
dd.compute(a+b)
def func(value):
return value
df.apply(func)
%%time
# _output = df.ext.cast_and_profile(columns="*", flush=True, output="json")
# df = df.cols.cast_to_profiler_dtypes(columns=cols_and_inferred_dtype).persist()
df.ext.display()
df.ext.partitions()
df.compute()
df.cols.year("")
print(_output)
import pendulum
pendulum.from_format("11 11 1980", "DD MM YYYY")
# df = op.load.file("data/test-types.csv").ext.cache()
df = op.load.file("data/dataset-transactions.csv").ext.cache()
a = df.cols.infer_profiler_dtypes("*")
print(a)
df = df.cols.cast_to_profiler_dtypes(columns={'transactoin_date': 'date'})
# df["a"] = df.transactoin_date.astype('M8[us]')
# b["transactoin_date"].dt.year
df.dtypes
df.cols.year("transactoin_date", "new1").compute()
df.cols.month("transactoin_date", "new1").compute()
df.cols.day("transactoin_date", "new1").compute()
df.ext.display(20)
df= df.cols.unnest("transactoin_date", separator="/", splits=3, output_cols="transactoin_date").ext.cache()
df.ext.profile(columns="*", infer=True, output="json", flush=True, bins=10)
df.ext.display()
df.cols.infer_profiler_dtypes("*")
df = op.load.file("http://159.65.217.17:5003/uploads/datasetFile-1590426328940.csv").ext.cache()
df = df.ext.optimize()
df = df.ext.repartition(1).ext.cache()
col_name = "customer_id"
df.cols.count_mismatch({col_name:"int","transactoin_date":"date"})
df = op.load.file("data/crime.csv").ext.cache()
%%time
df.cols.cast("INCIDENT_NUMBER","int").compute()
df.ext.display()
import numpy as np
a = np.array([1,2,3])
b = np.array([1,2,"a"])
def func(a, b):
try:
r =a+b
except:
r = np.nan
return r
func(a,b)
df[["INCIDENT_NUMBER","REPORTING_AREA"]].compute().to_numpy()[0]
def op(value):
return value
# return value["INCIDENT_NUMBER"]
# print(value)
# try:
# return value["INCIDENT_NUMBER"] + value["REPORTING_AREA"]
# except():
# return np.nan
def func(pdf):
# print(type(pdf))
return pdf.apply(op, axis=1)
df[["INCIDENT_NUMBER","REPORTING_AREA"]].map_partitions(func, meta=object).compute()
%%time
from numpy.core._exceptions import UFuncTypeError
def func(a,b):
return a+b
def myfunc(a, b, expr):
"Return a-b if a>b, otherwise return a+b"
try:
return func(a,b)
except (UFuncTypeError, TypeError):
return np.nan
vfunc = np.vectorize(myfunc)
print(func)
arr= [1, 2, 3,4,5,6,7,8,9,"A"]*1000000
b = vfunc(arr, 2, func)
# vfunc([1, 2, 3, "A"], 2)
len(arr)
df = df.ext.repartition(8)
import re
comp = re.compile("^(?=.)([+-]?([0-9]*)(\.([0-9]+))?)$")
%time df["INCIDENT_NUMBER"].str.match(comp).compute()
df.cols.names()
from dateutil.parser import parse as dparse
dparse("2018-09-02 13:00:00")
df.ext.display()
df.cols.infer_profiler_dtypes("*")
df.ext.display(20)
col_name ="customer_id"
print(df.cols.count_mismatch({col_name:"int"}))
print(df.cols.count_mismatch({col_name:"decimal"}))
print(df.cols.count_mismatch({col_name:"boolean"}))
print(df.cols.count_mismatch({col_name:"date"}))
print(df.cols.count_mismatch({col_name:"array"}))
print(df.cols.count_mismatch({col_name:"object"}))
print(df.cols.count_mismatch({col_name:"gender"}))
print(df.cols.count_mismatch({col_name:"ip"}))
print(df.cols.count_mismatch({col_name:"url"}))
print(df.cols.count_mismatch({col_name:"email"}))
print(df.cols.count_mismatch({col_name:"credit_card_number"}))
print(df.cols.count_mismatch({col_name:"zip_code"}))
_output = df.ext.profile(columns=col_name, infer=True, output="json", flush=True)
# _output = df.ext.set_buffer("*")
# _output = df.ext.set_buffer("*")
# _output = df.ext.buffer_window("*", 0, 17).ext.to_json("*")
print(_output)
cols_and_inferred_dtype = df.cols.infer_profiler_dtypes("*")
print(cols_and_inferred_dtype)
mismatch = df.cols.count_mismatch(cols_and_inferred_dtype, infer=True, compute=True)
print(mismatch)
df = op.load.file("data/crime.csv").ext.cache()
df = df.ext.optimize().ext.cache()
df.cols.count_mismatch({"OFFENSE_DESCRIPTION":"string"})
df.ext.display()
import functools
import dask
import dask.dataframe as dd
import pandas as pd
pdf = pd.DataFrame({
'x': range(0, 100),
'y': range(0, 100),
'z': range(0, 100)
})
ddf = dd.from_pandas(pdf, npartitions=8)
print('Number of partitions', ddf.npartitions)
def compute_stats(row):
return {
'sum': row['x'] + row['y'] + row['z'],
'min': min(row),
'max': max(row)
}
def accum_stats(stats_accum, stats):
return {
'sum': stats_accum['sum'] + stats['sum'],
'min': min(stats_accum['min'], stats['min']),
'max': max(stats_accum['max'], stats['max'])
}
def compute_stats_partition(pdf):
pds = pdf.apply(compute_stats, axis=1)
print(pdf)
print("99999999999999999999999999999999999999999999999")
return pds
return functools.reduce(accum_stats, pds)
def merge_stats_series(pds):
print(pds)
print("--")
return pds
return functools.reduce(accum_stats, pds)
res = ddf.reduction(
compute_stats_partition,
merge_stats_series,
meta={
'sum': 'int64',
'min': 'int64',
'max': 'int64'
})
# singleton dataframe to list of delayed objects
# where each row is a delayed object
# and in this case we just want the first one
delayed_dict = res.to_delayed()[0]
a = dd.compute(delayed_dict)
df = op.load.file("http://159.65.217.17:5003/uploads/datasetFile-1590447280612.csv").ext.cache()
df = df.ext.optimize()
df = df.ext.repartition(8).ext.cache()
_output = df.ext.profile(columns="*", infer=True, output="json")
print(_output)
##Bag
# each element is an integer
import dask.bag as db
b = db.from_sequence([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], npartitions=1)
b.take(3)
%%time
b.to_dataframe().compute()
from optimus.helpers.functions import match_date
import re
pattern = match_date("dd MM yy h")
string = "11 05 1980 2"
prog = re.compile(pattern)
result = prog.match(string)
print(bool(result))
```
| github_jupyter |
```
# standard libraries
import pandas as pd
# Binance wrapper libraries
from binance.client import Client
from binance.websockets import BinanceSocketManager
def web_socket_modularized():
"""
Signature: web_socket() -> 'BinanceSocketManager'
Docstring:
Deals with real-time data.
Also takes care of plotting.
It makes use of Binance' API ('https://api.binance.com/api/v3/')
Returns
-------
BinanceSocketManager
Example
-------
>>> web_socket()
"""
# real-time data and chart
# have to be global for real-time interaction
global rtdata
global rtchart
# initialize client without API keys, as they are not needed for now
client = Client("", "")
# this function runs every time the socket receives new data
def process_message(x):
global rtdata
global rtchart
# get the last minute from the existing data
t1 = pd.to_datetime(rtdata.tail(1).index.values[0])
# get the last minute from the new data
t2 = pd.to_datetime(x['k']['t'], unit='ms')
# convert the new data (kline tipe) into a dataframe
new_df = pd.DataFrame([x['k']])
# change the data type for t
new_df['t'] = pd.to_datetime(new_df['t'], unit='ms')
# change the data type for T
new_df['T'] = pd.to_datetime(new_df['T'], unit='ms')
# change to index into datetime with frequency = minutes
new_df.index = pd.DatetimeIndex(new_df.t, freq='min')
# drop the t column as it is now the index
new_df = new_df.drop('t', axis=1)
# reindex the dataframe using the existing data as a reference
new_df.reindex(columns=rtdata.columns)
# if the timestamps are different then append new values
if t1 != t2:
rtdata = pd.concat([rtdata, new_df], axis=0)
#if it's still the same minute then update the value
#this way we can see every change even before the candle is over
else:
rtdata.loc[rtdata.index[-1]] = new_df.loc[new_df.index[-1]]
# update the chart
rtchart.data[0].x=rtdata.index
rtchart.data[0].open=rtdata.o
rtchart.data[0].high=rtdata.h
rtchart.data[0].low=rtdata.l
rtchart.data[0].close=rtdata.c
# recenter the plot leaving some space for predictions
rtchart.update_xaxes(range=[rtdata.index[-16],rtdata.index[-1] + pd.Timedelta(minutes=5)])
pass
# get the last 1 hour of 1-minute candles data
klines = client.get_historical_klines("BTCUSDT", Client.KLINE_INTERVAL_1MINUTE, "1 hour ago UTC")
# convert to a dataframe using the appropriate format provided by the API documentation
rtdata = pd.DataFrame(columns=['t','o','h','l','c','v','T','q','n','V','Q','B'])
# concatenate every candle
rtdata = pd.concat([rtdata, pd.DataFrame([i for i in klines], columns=rtdata.columns)], axis=0)
# change the data type for t
rtdata['t'] = pd.to_datetime(rtdata['t'], unit='ms')
# change the data type for T
rtdata['T'] = pd.to_datetime(rtdata['T'], unit='ms')
# change to index into datetime with frequency = minutes
rtdata.index = pd.DatetimeIndex(rtdata.t, freq='min')
# drop the t column as it is now the index
rtdata = rtdata.drop('t', axis=1)
# initialize a socket manager
bm = BinanceSocketManager(client)
# start the kline socket to get bitcoin data in realtime
bm.start_kline_socket("BTCUSDT", process_message)
# adjust the plot's y-range
rtchart.update_yaxes(range=[rtdata['l'].tail(15).min(),rtdata['h'].tail(15).max()])
# return the socket manager
return(bm)
def start_socket():
"""
Signature: startsocket() -> 'BinanceSocketManager'
Docstring:
Initializes a Socket Manager to recieve real-time information from Binance.
Returns
-------
BinanceSocketManager
Examples
--------
>>> start_socket()
"""
client = Client("", "")
bm = BinanceSocketManager(client)
bm.start()
return(bm)
def close_socket(x):
"""
Signature: close_socket()
Docstring:
Closes a socket to stop the real-time data fetching.
Parameters
----------
x : BinanceSocketManager
An instance of a BinanceSocketManager.
Examples
--------
>>> close_socket()
"""
x.close()
pass
```
| github_jupyter |
# Explore ways to read/write params to/from a file
```
from see import base_classes
from see.Segmentors import segmentor
from see.ColorSpace import colorspace
from see.Workflow import workflow
from see.Segment_Fitness import segment_fitness
workflow.addalgos([colorspace, segmentor, segment_fitness])
wf = workflow()
```
----
# Use Pickle to wrap entire algorithm object
This works nice assuming the algorithm is what we want to write. Dosn't work in the genetic algoirthm since it may just be a list. Although it works fairly well.
```
import pickle
def append_algo_pickle(fpop_file, algorithm):
filehandler = open(fpop_file, 'ab')
pickle.dump(algorithm, filehandler)
def read_algo_pickle(fpop_file):
poplist = []
with (open(fpop_file, "rb")) as openfile:
while True:
try:
poplist.append(pickle.load(openfile))
except EOFError:
break
return poplist
!rm pickle_test.pk
append_algo_pickle('pickle_test.pk', wf)
pop = read_algo_pickle('pickle_test.pk')
pop
!cat 'pickle_test.pk'
```
---
This version just saves the list
```
!rm pickle_test.pk
append_algo_pickle('pickle_test.pk', wf.params.tolist())
pop = read_algo_pickle('pickle_test.pk')
pop
```
---
# Use JSON
Read in as a json file. This also dosn't work as well since we can't append additional values to the list.
```
import json
def append_algo_json(fpop_file, algorithm):
with open('json_test.json', 'w', encoding="utf8") as f:
json.dump(algorithm, f)
def read_algo_json(fpop_file):
with open(fpop_file, 'r') as f:
population = json.load(f)
return population
!rm 'json_test.json'
append_algo_json('json_test.json', wf.params)
!cat 'json_test.json'
pop = read_algo_json('json_test.json')
```
----
# Basic Vector
This is a more direct way to write out the list. The nice thing about this format is it is human readable. Maybe not as flexible but easier to work wiht.
```
def write_algo_vector(fpop_file, algorithm):
with open(f"{fpop_file}", 'a') as myfile:
myfile.write(f'{algorithm.params.tolist().__str__()}\n')
def read_algo_vector(fpop_file):
inlist = []
with open(f"{fpop_file}",'r') as myfile:
for line in myfile:
inlist.append(eval(line))
return inlist
!rm 'list_test.txt'
write_algo_vector('list_test.txt', wf)
!cat List_test.txt
params_as_list = read_algo_vector('list_test.txt')
params_as_list
```
----
# Basic Dictionary
Same idea as a list but as a dictionary. Has the same problem as the basic pickle.
```
def write_algo_dict(fpop_file, params):
with open(f"{fpop_file}", 'a') as myfile:
myfile.write(f'{dict(params).__str__()}\n')
def read_algo_dict(fpop_file):
inlist = []
with open(f"{fpop_file}",'r') as myfile:
for line in myfile:
inlist.append(eval(line))
return inlist
!rm 'dict_test.txt'
write_algo_dict('dict_test.txt', wf.params)
!cat dict_test.txt
params_as_dict = read_algo_vector('dict_test.txt')
params_as_dict
```
# Testing in SEE
```
from see import base_classes
import imageio
img = imageio.imread('Image_data/Examples/AA_Chameleon.jpg')
gmask = imageio.imread('Image_data/Examples/AA_Chameleon_GT.png')
mydata = base_classes.pipedata()
from see.Segmentors import segmentor
from see.ColorSpace import colorspace
from see.Workflow import workflow
from see.Segment_Fitness import segment_fitness
workflow.addalgos([colorspace, segmentor, segment_fitness])
wf = workflow()
print(wf)
from see import GeneticSearch
my_evolver = GeneticSearch.Evolver(workflow, mydata, pop_size=10)
test = my_evolver.newpopulation()
for ind in test:
GeneticSearch.write_algo_vector("Dirktest.txt", ind)
test2 = GeneticSearch.read_algo_vector("Dirktest.txt")
test3 = my_evolver.copy_pop_list(test2)
type(test3[0])
!cat Dirktest.txt
from pathlib import Path
filen = Path('Dirktest.txt')
dir(filen)
filen.suffix
```
| github_jupyter |
# Simple kriging in Python
This follows a tutorial and code by Connor Johnson, in [his blog post](http://connor-johnson.com/2014/03/20/simple-kriging-in-python/). It is openly licensed under the MIT license.
Some more geostatistics resources:
- More from Connor Johnson: https://github.com/cjohnson318/geostatsmodels
- Another library: https://github.com/whimian/pyGeoStatistics
- HPGL: https://github.com/hpgl/hpgl
- From Clayton Deutsch's lab: http://www.ccgalberta.com/pygeostat/welcome.html
- Following a scikit-learn API: https://pypi.python.org/pypi/scikit-gstat/0.1.6
## What is kriging?
Kriging is a set of techniques for interpolation. It differs from other interpolation techniques in that it sacrifices smoothness for the integrity of sampled points. Most interpolation techniques will over or undershoot the value of the function at sampled locations, but kriging honors those measurements and keeps them fixed.
## Data
We use the data from **Geoff Bohling** at the Kansas Geological Survey. [Click here](http://people.ku.edu/~gbohling/geostats/index.html) then look for "My tutorial on reservoir modeling...". I'm using the `ZoneA.dat` file.
```
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial.distance import pdist, squareform
with open( '../data/ZoneA.dat', 'r') as f:
z = f.readlines()
z = [ i.strip().split() for i in z[10:] ]
z = np.array( z, dtype=np.float )
z = pd.DataFrame( z, columns=['x', 'y', 'thk', 'por', 'perm', 'lperm', 'lpermp', 'lpermr'] )
fig, ax = plt.subplots()
ax.scatter( z.x, z.y, c=z.por)
ax.set_aspect(1)
plt.xlim(-1500,22000)
plt.ylim(-1500,17500)
plt.xlabel('Easting [m]')
plt.ylabel('Northing [m]')
plt.title('Porosity %')
plt.show()
```
## The semivariogram
According to Connor, these formulations follow Geoff Bohling's and Clayton Deutsch's work.
The semivariogram encodes data about spatial variance over the region at a given distance or lag. We generally expect data points that are close together spatially to share other characteristics, and we expect points that are separated by greater distances to have lesser correlation. The semivariogram allows us to model the similarity points in a field as a function of distance. The semivariogram is given by,
$$ \hat{\gamma}(h) = \dfrac{1}{2N(h)} \displaystyle \sum_{N(h)} ( z_{i} - z_{j} )^{2} $$
Here, h is distance specified by the user, and z_{i} and z_{j} are two points that are separated spatially by h. The N(h) term is the number of points we have that are separated by the distance h. The semivariogram then is the sum of squared differences between values separated by a distance h. As an aside, contrast this with the formulation for variance,
$$ s = \dfrac{1}{N-1} \displaystyle \sum_{k=1}^{N} (z_{k} - \hat{\mu} )^{2} $$
Here, $N$ is the number of data points, $\hat{\mu}$ is the sample mean, and $z_{k}$ is a data point. For sample variance, we are taking the squared difference between data points and the mean, and in the semivariogram we are taking the squared difference between data points separated by distance $h$. We can write some functions to calculate the semivariogram at one lag, and then at multiple lags as follows.
```
def SVh(P, h, bw):
'''
Experimental semivariogram for a single lag.
'''
dists = squareform(pdist(P[:,:2]))
N = dists.shape[0]
Z = list()
for i in range(N):
for j in range(i+1,N):
if( dists[i,j] >= h-bw )and( dists[i,j] <= h+bw ):
Z.append(( P[i,2] - P[j,2])**2)
return np.sum(Z) / (2.0 * len( Z ))
def SV(P, hs, bw):
'''
Experimental variogram for a collection of lags.
'''
sv = list()
for h in hs:
sv.append( SVh( P, h, bw ) )
sv = [ [ hs[i], sv[i] ] for i in range( len( hs ) ) if sv[i] > 0 ]
return np.array( sv ).T
```
The C() function is the covariance function, and will be used later. Let us now calculate and plot the semivariogram,
```
def C(P, h, bw):
'''
Calculate the sill.
'''
c0 = np.var( P[:,2] )
if h == 0:
return c0
return c0 - SVh( P, h, bw )
# Part of our data set recording porosity.
P = np.array(z[['x', 'y', 'por']])
# Bandwidth, plus or minus 250 meters.
bw = 500
# Lags in 500 meter increments from zero to 10,000.
hs = np.arange(0, 10500, bw)
sv = SV( P, hs, bw )
# Make a plot.
plt.plot( sv[0], sv[1], '.-' )
plt.xlabel('Lag [m]')
plt.ylabel('Semivariance')
plt.title('Sample semivariogram') ;
plt.show()
```
## Modeling
Now that we’ve calculated the semivariogram, we will need to fit a model to the data. There are three popular models, the spherical, exponential, and the Gaussian. Here, we’ll implement the spherical model. First, we will present a function named opt() for determining the optimal value a for the spherical model.
```
def opt(func, x, y, C0, parameterRange=None, meshSize=1000):
if parameterRange == None:
parameterRange = [x[1], x[-1]]
mse = np.zeros(meshSize)
a = np.linspace(parameterRange[0], parameterRange[1], meshSize)
for i in range(meshSize):
mse[i] = np.mean((y - func(x, a[i], C0))**2.0)
return a[mse.argmin()]
```
The opt() function finds the optimal parameter for fitting a spherical model to the semivariogram data. The spherical model is given by the function spherical(). On the last line we see that spherical() returns itself in a map() function, which seems odd. The idea is that the input h can be a single float value, or list or NumPy array of floats. If h is a single value, then line 9 is called. If h is a list or an array (an iterable) then line 17 is called, which applies line 9 to each value of h.
```
def spherical(h, a, C0):
'''
Spherical model of the semivariogram
'''
# If h is a scalar:
if np.ndim(h) == 0:
# Calculate the spherical function.
if h <= a:
return C0 * ( 1.5*h/a - 0.5*(h/a)**3.0 )
else:
return C0
else:
# Calculate the spherical function for all elements.
a = np.ones(h.size) * a
C0 = np.ones(h.size) * C0
return np.array(list(map(spherical, h, a, C0)))
```
Next, `cvmodel()` fits a model to the semivariogram data and returns a covariance method named `covfct()`.
```
def cvmodel(P, model, hs, bw):
'''
Input: (P) ndarray, data
(model) modeling function
- spherical
- exponential
- gaussian
(hs) distances
(bw) bandwidth
Output: (covfct) function modeling the covariance
'''
# Calculate the semivariogram.
sv = SV(P, hs, bw)
# Calculate the sill.
C0 = C(P, hs[0], bw)
# Calculate the optimal parameters.
param = opt(model, sv[0], sv[1], C0)
# Return a covariance function.
return lambda h, a=param: C0 - model(h, a, C0)
```
At this point we’ll plot our model and see if it represents our data well.
```
sp = cvmodel(P, model=spherical, hs=np.arange(0, 10500, 500), bw=500)
plt.plot( sv[0], sv[1], '.-' )
plt.plot( sv[0], sp( sv[0] ) ) ;
plt.title('Spherical Model')
plt.ylabel('Semivariance')
plt.xlabel('Lag [m]')
plt.show()
```
## Kriging
Now that we have a model for the semivariogram, we can write a function to perform the kriging. The fundamental relationship is a matrix equation,
$$ K \lambda = k \Rightarrow \lambda = K^{-1} k $$
Here, $K$ is a matrix of covariances calculated using the spherical model, $\lambda$ is a vector of simple kriging weights, and $k$ is the vector of covariances between the data points and an unsampled point. Our kriging function takes the data set `P`, the `model`, the distances `hs`, the bandwidth `bw`, the coordinates of the unsampled point `u`, and the number of surrounding points `N` to use in the calculation.
```
def krige(P, model, hs, bw, u, N):
'''
Input (P) ndarray, data
(model) modeling function
- spherical
- exponential
- gaussian
(hs) kriging distances
(bw) kriging bandwidth
(u) unsampled point
(N) number of neighboring
points to consider
'''
# covariance function
covfct = cvmodel(P, model, hs, bw)
# mean of the variable
mu = np.mean(P[:,2])
# distance between u and each data point in P
d = np.sqrt((P[:,0]-u[0])**2.0 + (P[:,1]-u[1])**2.0)
# add these distances to P
P = np.vstack(( P.T, d )).T
# sort P by these distances
# take the first N of them
P = P[d.argsort()[:N]]
# apply the covariance model to the distances
k = covfct( P[:,3] )
# cast as a matrix
k = np.matrix( k ).T
# form a matrix of distances between existing data points
K = squareform( pdist( P[:,:2] ) )
# apply the covariance model to these distances
K = covfct( K.ravel() )
# re-cast as a NumPy array -- thanks M.L.
K = np.array( K )
# reshape into an array
K = K.reshape(N,N)
# cast as a matrix
K = np.matrix( K )
# calculate the kriging weights
weights = np.linalg.inv( K ) * k
weights = np.array( weights )
# calculate the residuals
residuals = P[:,2] - mu
# calculate the estimation
estimation = np.dot( weights.T, residuals ) + mu
return float( estimation )
```
## Estimation
Here, we’ll calculate the kriging estimate at a number of unsampled points.
```
P[:,0].min(), P[:,0].max(), P[:,1].min(), P[:,1].max()
X0, X1 = 0, 20000
Y0, Y1 = 0, 16000
# Define the number of grid cells over which to make estimates.
# TODO: Vectorize this. I'll try numba/jit but I don't think it'll help.
# I think it can be vectorized with np.mgrid (better than np.meshgrid)
# Many points:
x, y = 100, 80
# Fewer points:
x, y = 50, 40
dx, dy = (X1-X0) / x, (Y1-Y0) / y
def stepwise(x, y):
Z = np.zeros((y, x))
for i in range(y):
print(i, end=' ')
for j in range(x):
Z[i, j] = krige(P, model=spherical, hs=hs, bw=bw, u=(dy*j, dx*i), N=16)
return Z
# THIS IS SLOW
# Z = stepwise(x, y)
Z
extent = [X0, X1, Y0, Y1]
plt.imshow(Z, origin='lower', interpolation='none', extent=extent)
plt.scatter(z.x, z.y, s=2, c='w')
plt.show()
```
## Comparison to 2D Gaussian process regression
This needs writing.
https://stackoverflow.com/questions/41572058/how-to-correctly-use-scikit-learns-gaussian-process-for-a-2d-inputs-1d-output/43409379
| github_jupyter |
[](https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/notebooks/tn_surface_water.ipynb)
# Automated mapping of surface water in the state of Tennessee using Google Earth Engine cloud computing
Author: Qiusheng Wu ([Website](https://wetlands.io) - [GitHub](https://github.com/giswqs))
<h1>Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Install-geemap" data-toc-modified-id="Install-geemap-1"><span class="toc-item-num">1 </span>Install geemap</a></span></li><li><span><a href="#Create-an-interactive-map" data-toc-modified-id="Create-an-interactive-map-2"><span class="toc-item-num">2 </span>Create an interactive map</a></span></li><li><span><a href="#Define-region-of-interest-(ROI)" data-toc-modified-id="Define-region-of-interest-(ROI)-3"><span class="toc-item-num">3 </span>Define region of interest (ROI)</a></span></li><li><span><a href="#Create-Landsat-timeseries" data-toc-modified-id="Create-Landsat-timeseries-4"><span class="toc-item-num">4 </span>Create Landsat timeseries</a></span></li><li><span><a href="#Calculate-Normalized-Difference-Water-Index-(NDWI)" data-toc-modified-id="Calculate-Normalized-Difference-Water-Index-(NDWI)-5"><span class="toc-item-num">5 </span>Calculate Normalized Difference Water Index (NDWI)</a></span></li><li><span><a href="#Extract-surface-water-extent" data-toc-modified-id="Extract-surface-water-extent-6"><span class="toc-item-num">6 </span>Extract surface water extent</a></span></li><li><span><a href="#Calculate-surface-water-areas" data-toc-modified-id="Calculate-surface-water-areas-7"><span class="toc-item-num">7 </span>Calculate surface water areas</a></span></li><li><span><a href="#Plot-temporal-trend" data-toc-modified-id="Plot-temporal-trend-8"><span class="toc-item-num">8 </span>Plot temporal trend</a></span></li></ul></div>
## Install geemap
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
import ee
import geemap
```
## Create an interactive map
```
Map = geemap.Map()
Map
```
## Define region of interest (ROI)
```
roi = ee.FeatureCollection('TIGER/2018/States').filter(
ee.Filter.eq('NAME', 'Tennessee')
)
Map.addLayer(roi, {}, "TN")
Map.centerObject(roi, 7)
```
## Create Landsat timeseries
```
images = geemap.landsat_timeseries(
roi=roi, start_year=1984, end_year=2020, start_date='01-01', end_date='12-31'
)
first = images.first()
vis_params = {'bands': ['NIR', 'Red', 'Green'], 'min': 0, 'max': 3000}
Map.addLayer(first, vis_params, 'First image')
```
## Calculate Normalized Difference Water Index (NDWI)
```
ndwi_images = images.map(
lambda img: img.normalizedDifference(['Green', 'SWIR1']).rename('ndwi')
)
ndwi_palette = [
'#ece7f2',
'#d0d1e6',
'#a6bddb',
'#74a9cf',
'#3690c0',
'#0570b0',
'#045a8d',
'#023858',
]
first_ndwi = ndwi_images.first()
Map.addLayer(first_ndwi, {'palette': ndwi_palette}, 'First NDWI')
```
## Extract surface water extent
```
water_images = ndwi_images.map(lambda img: img.gt(0).selfMask())
first_water = water_images.first()
Map.addLayer(first_water, {'palette': ['blue']}, 'First Water')
```
## Calculate surface water areas
```
def cal_area(img):
pixel_area = img.multiply(ee.Image.pixelArea()).divide(1e6)
img_area = pixel_area.reduceRegion(
**{
'geometry': roi.geometry(),
'reducer': ee.Reducer.sum(),
'scale': 1000,
'maxPixels': 1e12,
}
)
return img.set({'water_area': img_area})
water_areas = water_images.map(cal_area)
water_stats = water_areas.aggregate_array('water_area').getInfo()
water_stats
```
## Plot temporal trend
```
import matplotlib.pyplot as plt
x = list(range(1984, 2021))
y = [item.get('ndwi') for item in water_stats]
plt.bar(x, y, align='center', alpha=0.5)
# plt.xticks(y_pos, objects)
plt.ylabel('Area (km2)')
plt.title('Surface water dynamics in Tennessee')
plt.show()
Map.addLayerControl()
Map
```
| github_jupyter |

___
#### NAME:
#### STUDENT ID:
___
## Numpy Introduction
```
# Load required modules
import numpy as np
```
<br>
**1a) Create two numpy arrays called** ```a``` **and** ```b``` **where** ```a``` **should be all integers between 25-34 (inclusive), and** ```b``` **should be ten evenly spaced numbers between 1-6 (inclusive). Print** ```a``` **and** ```b```
```
# your work here
```
<br>
**1b) [Cube](https://numpy.org/doc/stable/reference/routines.math.html) (i.e. raise to the power of 3) all the elements in both** ```a``` **and** ```b``` **(element-wise). Store the results in two new arrays called** ```cubed_a``` **and** ```cubed_b```**. Print** ```cubed_a``` **and** ```cubed_b```
```
# your work here
```
<br>
**1c) [Add](https://numpy.org/doc/stable/reference/routines.math.html) the two cubed arrays (e.g.** ```[1,2] + [3,4] = [4,6]```**) and store the result in an array called** ```c```**. Print** ```c```
```
# your work here
```
<br>
**1d) [Sum](https://numpy.org/doc/stable/reference/routines.math.html) the elements with even indices of** ```c``` **and store the result in a variable called** ```d```**. Print** ```d```
```
# your work here
```
<br>
**1e) Take the element-wise [square root](https://numpy.org/doc/stable/reference/routines.math.html) of the** ```c``` **and store the result in an array called** ```e```**. Print** ```e```
```
# your work here
```
<br>
**1f) [Append](https://numpy.org/doc/stable/reference/routines.array-manipulation.html)** ```b``` **to** ```a```, **[reshape](https://numpy.org/doc/stable/reference/routines.array-manipulation.html) the appended array so that it is a 4x5, 2D array and store the results in a variable called** ```m```**. Print** ```m```
```
# your work here
```
<br>
**1g) Extract the third and the fourth column of the** ```m``` **matrix. Store the resulting 4x2 matrix in a new variable called** ```m2```**. Print** ```m2```
```
# your work here
```
<br>
**1h) Take the [dot product](https://numpy.org/doc/stable/reference/routines.array-manipulation.html) of** ```m``` **and** ```m2```**. Store the results in a matrix called** ```m3```**. Print** ```m3```
>**Note:** the Dot product of two matrices is given by
$$\large{A\cdot B = A^{T}B}$$
<br>
```
# your work here
```
<br>
**1i) [Round](https://numpy.org/doc/stable/reference/routines.math.html) the** ```m3``` **matrix to three decimal points. Store the result in place and print the new** ```m3```
```
# your work here
```
<br>
___
## NumPy and Masks
<br>
**2a) Create an array called** ```f``` **where the values are** ```cos(x)``` **for** ```x``` **from $0$ to $\pi$ with 50 [equally spaced values](https://numpy.org/doc/stable/reference/routines.array-creation.html) (inclusive). Print** ```f```
```
# your work here
```
<br>
**2b) Use a [mask](https://numpy.org/doc/stable/reference/maskedarray.html) to get an array that is** ```True``` **when** ```f >= 1/2``` **and** ```False``` **when** ```f < 1/2```**. Store the result in an array called** ```g```**. Print** ```g```
```
# your work here
```
<br>
**2c) Create an array called** ```h``` **that has only those values where** ```f>= 1/2```**. Print** ```h```
```
# your work here
```
<br>
___
## Exploratory Data Analysis (EDA) - Data Visualization
<br>
**3.1) Using the** ```df_google``` **data, plot the daily** ```High``` **value as a time series. Give your plot a title.**
>**Tip:**<br>
>* To view your current working directory enter ```%pwd``` in cell and run it.
>**To Do:**<br>
>1. Extract the data from the *'column'* ```high```<br>
>2. Use the datetime array, ```days``` with you extracted data to plot a lineplot (time series) using pyplot. Give your plot a tile.
<br>
**Note:** If you are having a hard time extracting the correct data, take a look at the *References and Additional Resources* section of the main notebook. In particular, the section titled *More on Multi-Dimensional Arrays* should have some valuable information.
```
# Plotting Set Up
import pandas as pd
import matplotlib.pyplot as plt
# jupyter notebook magic to display plots in output
%matplotlib inline
# make the plots bigger
plt.rcParams['figure.figsize'] = (10,8)
path_to_file = 'df_google.csv'
data = np.genfromtxt(path_to_file, delimiter=',')
# time stamps
index = pd.date_range('1/2/2018', periods=525)
# convert to np array include yyyy-mm-dd but not time
days = np.array(index, dtype = 'datetime64[D]')
# sanity check
days[:5]
# your work here
```
<br>
___
## NumPy and 2 Variable Prediction
<br>
**Below we have created 2 NumPy arrays each of size 100 that represent the following:**<br>
> ```x``` (number of miles) ranges from 1 to 10 with a uniform noise of $(0, 1/2)$<br>
> ```y``` (money spent in dollars) will be from 1 to 20 with a uniform noise $(0, 1)$
```
# seed the random number generator with a fixed value
import numpy as np
np.random.seed(500)
x=np.linspace(1, 10, 100) + np.random.uniform(low = 0, high = 0.5, size = 100)
y=np.linspace(1, 20, 100) + np.random.uniform(low = 0, high = 1.0, size = 100)
# sanity check
print ('x = ', x[:10])
print ('\ny = ', y[:10])
```
<br>
**3a) Find the [expected](https://numpy.org/doc/stable/reference/routines.statistics.html) value of** ```x``` **and the expected value of** ```y```**. Store the results in two variables called** ```ex``` **and** ```ey```
```
# your work here
```
<br>
**3b) Find [variance](https://numpy.org/doc/stable/reference/routines.statistics.html) of** ```x``` **and** ```y```**. Store the results in two variables called** ```varx``` **and** ```vary```
```
# your work here
```
<br>
**3c) Find [co-variance](https://numpy.org/doc/stable/reference/routines.statistics.html) of** ```x``` **and** ```y```**. Store the result in a variable called** ```cov```
```
# your work here
```
<br>
**3d) Assume that number of dollars spent in car fuel is only dependant on the miles driven -- a linear relationship. Write code that uses a linear predictor to calculate a predicted value of** ```y``` **for each** ```x``` **and store your predicitions in an array called** ```y_pred```**. Print first 5 elements in** ```y_pred```<br>
$$\large{y_{_{predicted}} = f(x) = y_0 + kx}$$
<br>
```
# your work here
```
<br>
**3e) Put the prediction error into an array called** ```y_error```**. Print first 5 elements in** ```y_error```
```
# your work here
```
<br>
**3f) Write code that calculates the root mean square error (RMSE). Store the result in a variable called** ```rmse```**. Print** ```rmse```
<br>
$$\large{RMSE = \sqrt{\frac{\sum_{i=1}^{n} \left(y_{_{predicted}} - y_{_{actual}}\right)^{^2}}{n}}} $$
<br>
```
# your work here
```
___
### Deliverables
Please submit your the following via the instructed method (lecture or Syllabus):
>(1) A copy of your work, either a downloaded notebook or a pdf, by the assignment deadline
<br>
**Note:** Don't gorget to restart your kernel prior to extracting your data.
>```Kernel --> Restart Kernel and Run all Cells```<br>
>```File --> Export Notebooks As --> PDF``` (or as instructed)
___
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.