text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#default_exp data.load
#export
from fastai.torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
#hide
from nbdev.showdoc import *
bs = 4
letters = list(string.ascii_lowercase)
```
## DataLoader helpers
fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
```
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
```
## DataLoader -
```
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
```
Arguments to `DataLoader`:
* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.
* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.
* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.
* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.
* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.
* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.
* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.
* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.
* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.
* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.
* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`.
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
```
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
```
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
```
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
```
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
```
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
```
Iterable dataloaders require specific tests.
```
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
```
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
```
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/dlmacedo/deep-learning-class/blob/master/notebooks/tensorflow/transformer_chatbot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Transformer Chatbot
This tutorial trains a <a href="https://arxiv.org/abs/1706.03762" class="external">Transformer model</a> to be a chatbot. This is an advanced example that assumes knowledge of [text generation](https://tensorflow.org/alpha/tutorials/text/text_generation), [attention](https://www.tensorflow.org/alpha/tutorials/text/nmt_with_attention) and [transformer](https://www.tensorflow.org/alpha/tutorials/text/transformer).
The core idea behind the Transformer model is *self-attention*—the ability to attend to different positions of the input sequence to compute a representation of that sequence. Transformer creates stacks of self-attention layers and is explained below in the sections *Scaled dot product attention* and *Multi-head attention*.
Note: The model architecture is identical to the example in [Transformer model for language understanding](https://www.tensorflow.org/alpha/tutorials/text/transformer), and we demonstrate how to implement the same model in the Functional approach instead of Subclassing.
```
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install tf-nightly-gpu-2.0-preview==2.0.0.dev20190520
#import tensorflow as tf
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
print("TensorFlow Version:", tf.__version__)
print("GPU Available:", tf.test.is_gpu_available())
tf.random.set_seed(1234)
!pip install tfds-nightly
import tensorflow_datasets as tfds
import os
import re
import numpy as np
import matplotlib.pyplot as plt
```
##Prepare Dataset
We will use the conversations in movies and TV shows provided by [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html), which contains more than 220 thousands conversational exchanges between more than 10k pairs of movie characters, as our dataset.
`movie_conversations.txt` contains list of the conversation IDs and `movie_lines.text` contains the text of assoicated with each conversation ID. For further information regarding the dataset, please check the README file in the zip file.
```
path_to_zip = tf.keras.utils.get_file(
'cornell_movie_dialogs.zip',
origin=
'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip',
extract=True)
path_to_dataset = os.path.join(
os.path.dirname(path_to_zip), "cornell movie-dialogs corpus")
path_to_movie_lines = os.path.join(path_to_dataset, 'movie_lines.txt')
path_to_movie_conversations = os.path.join(path_to_dataset,
'movie_conversations.txt')
```
### Load and preprocess data
To keep this example simple and fast, we are limiting the maximum number of training samples to`MAX_SAMPLES=25000` and the maximum length of the sentence to be `MAX_LENGTH=40`.
We preprocess our dataset in the following order:
* Extract `MAX_SAMPLES` conversation pairs into list of `questions` and `answers.
* Preprocess each sentence by removing special characters in each sentence.
* Build tokenizer (map text to ID and ID to text) using [TensorFlow Datasets SubwordTextEncoder](https://www.tensorflow.org/datasets/api_docs/python/tfds/features/text/SubwordTextEncoder).
* Tokenize each sentence and add `START_TOKEN` and `END_TOKEN` to indicate the start and end of each sentence.
* Filter out sentence that has more than `MAX_LENGTH` tokens.
* Pad tokenized sentences to `MAX_LENGTH`
```
# Maximum number of samples to preprocess
MAX_SAMPLES = 50000
def preprocess_sentence(sentence):
sentence = sentence.lower().strip()
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
sentence = re.sub(r"([?.!,])", r" \1 ", sentence)
sentence = re.sub(r'[" "]+', " ", sentence)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
sentence = re.sub(r"[^a-zA-Z?.!,]+", " ", sentence)
sentence = sentence.strip()
# adding a start and an end token to the sentence
return sentence
def load_conversations():
# dictionary of line id to text
id2line = {}
with open(path_to_movie_lines, errors='ignore') as file:
lines = file.readlines()
for line in lines:
parts = line.replace('\n', '').split(' +++$+++ ')
id2line[parts[0]] = parts[4]
inputs, outputs = [], []
with open(path_to_movie_conversations, 'r') as file:
lines = file.readlines()
for line in lines:
parts = line.replace('\n', '').split(' +++$+++ ')
# get conversation in a list of line ID
conversation = [line[1:-1] for line in parts[3][1:-1].split(', ')]
for i in range(len(conversation) - 1):
inputs.append(preprocess_sentence(id2line[conversation[i]]))
outputs.append(preprocess_sentence(id2line[conversation[i + 1]]))
if len(inputs) >= MAX_SAMPLES:
return inputs, outputs
return inputs, outputs
questions, answers = load_conversations()
print('Sample question: {}'.format(questions[20]))
print('Sample answer: {}'.format(answers[20]))
# Build tokenizer using tfds for both questions and answers
tokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus(
questions + answers, target_vocab_size=2**13)
# Define start and end token to indicate the start and end of a sentence
START_TOKEN, END_TOKEN = [tokenizer.vocab_size], [tokenizer.vocab_size + 1]
# Vocabulary size plus start and end token
VOCAB_SIZE = tokenizer.vocab_size + 2
print('Tokenized sample question: {}'.format(tokenizer.encode(questions[20])))
# Maximum sentence length
MAX_LENGTH = 40
# Tokenize, filter and pad sentences
def tokenize_and_filter(inputs, outputs):
tokenized_inputs, tokenized_outputs = [], []
for (sentence1, sentence2) in zip(inputs, outputs):
# tokenize sentence
sentence1 = START_TOKEN + tokenizer.encode(sentence1) + END_TOKEN
sentence2 = START_TOKEN + tokenizer.encode(sentence2) + END_TOKEN
# check tokenized sentence max length
if len(sentence1) <= MAX_LENGTH and len(sentence2) <= MAX_LENGTH:
tokenized_inputs.append(sentence1)
tokenized_outputs.append(sentence2)
# pad tokenized sentences
tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences(
tokenized_inputs, maxlen=MAX_LENGTH, padding='post')
tokenized_outputs = tf.keras.preprocessing.sequence.pad_sequences(
tokenized_outputs, maxlen=MAX_LENGTH, padding='post')
return tokenized_inputs, tokenized_outputs
questions, answers = tokenize_and_filter(questions, answers)
print('Vocab size: {}'.format(VOCAB_SIZE))
print('Number of samples: {}'.format(len(questions)))
```
### Create `tf.data.Dataset`
We are going to use the [tf.data.Dataset API](https://www.tensorflow.org/api_docs/python/tf/data) to contruct our input pipline in order to utilize features like caching and prefetching to speed up the training process.
The transformer is an auto-regressive model: it makes predictions one part at a time, and uses its output so far to decide what to do next.
During training this example uses teacher-forcing. Teacher forcing is passing the true output to the next time step regardless of what the model predicts at the current time step.
As the transformer predicts each word, self-attention allows it to look at the previous words in the input sequence to better predict the next word.
To prevent the model from peaking at the expected output the model uses a look-ahead mask.
Target is divided into `decoder_inputs` which padded as an input to the decoder and `cropped_targets` for calculating our loss and accuracy.
```
BATCH_SIZE = 64
BUFFER_SIZE = 20000
# decoder inputs use the previous target as input
# remove START_TOKEN from targets
dataset = tf.data.Dataset.from_tensor_slices((
{
'inputs': questions,
'dec_inputs': answers[:, :-1]
},
{
'outputs': answers[:, 1:]
},
))
dataset = dataset.cache()
dataset = dataset.shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
print(dataset)
```
## Attention
### Scaled dot product Attention
The scaled dot-product attention function used by the transformer takes three inputs: Q (query), K (key), V (value). The equation used to calculate the attention weights is:
$$\Large{Attention(Q, K, V) = softmax_k(\frac{QK^T}{\sqrt{d_k}}) V} $$
As the softmax normalization is done on the `key`, its values decide the amount of importance given to the `query`.
The output represents the multiplication of the attention weights and the `value` vector. This ensures that the words we want to focus on are kept as is and the irrelevant words are flushed out.
The dot-product attention is scaled by a factor of square root of the depth. This is done because for large values of depth, the dot product grows large in magnitude pushing the softmax function where it has small gradients resulting in a very hard softmax.
For example, consider that `query` and `key` have a mean of 0 and variance of 1. Their matrix multiplication will have a mean of 0 and variance of `dk`. Hence, *square root of `dk`* is used for scaling (and not any other number) because the matmul of `query` and `key` should have a mean of 0 and variance of 1, so that we get a gentler softmax.
The mask is multiplied with *-1e9 (close to negative infinity).* This is done because the mask is summed with the scaled matrix multiplication of `query` and `key` and is applied immediately before a softmax. The goal is to zero out these cells, and large negative inputs to softmax are near zero in the output.
```
def scaled_dot_product_attention(query, key, value, mask):
"""Calculate the attention weights. """
matmul_qk = tf.matmul(query, key, transpose_b=True)
# scale matmul_qk
depth = tf.cast(tf.shape(key)[-1], tf.float32)
logits = matmul_qk / tf.math.sqrt(depth)
# add the mask to zero out padding tokens
if mask is not None:
logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k)
attention_weights = tf.nn.softmax(logits, axis=-1)
output = tf.matmul(attention_weights, value)
return output
```
### Multi-head attention
<img src="https://www.tensorflow.org/images/tutorials/transformer/multi_head_attention.png" width="500" alt="multi-head attention">
Multi-head attention consists of four parts:
* Linear layers and split into heads.
* Scaled dot-product attention.
* Concatenation of heads.
* Final linear layer.
Each multi-head attention block gets three inputs; Q (query), K (key), V (value). These are put through linear (Dense) layers and split up into multiple heads.
The `scaled_dot_product_attention` defined above is applied to each head (broadcasted for efficiency). An appropriate mask must be used in the attention step. The attention output for each head is then concatenated (using `tf.transpose`, and `tf.reshape`) and put through a final `Dense` layer.
Instead of one single attention head, `query`, `key`, and `value` are split into multiple heads because it allows the model to jointly attend to information at different positions from different representational spaces. After the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality.
```
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, name="multi_head_attention"):
super(MultiHeadAttention, self).__init__(name=name)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.query_dense = tf.keras.layers.Dense(units=d_model)
self.key_dense = tf.keras.layers.Dense(units=d_model)
self.value_dense = tf.keras.layers.Dense(units=d_model)
self.dense = tf.keras.layers.Dense(units=d_model)
def split_heads(self, inputs, batch_size):
inputs = tf.reshape(
inputs, shape=(batch_size, -1, self.num_heads, self.depth))
return tf.transpose(inputs, perm=[0, 2, 1, 3])
def call(self, inputs):
query, key, value, mask = inputs['query'], inputs['key'], inputs[
'value'], inputs['mask']
batch_size = tf.shape(query)[0]
# linear layers
query = self.query_dense(query)
key = self.key_dense(key)
value = self.value_dense(value)
# split heads
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
# scaled dot-product attention
scaled_attention = scaled_dot_product_attention(query, key, value, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
# concatenation of heads
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
# final linear layer
outputs = self.dense(concat_attention)
return outputs
```
## Transformer
### Masking
`create_padding_mask` and `create_look_ahead` are helper functions to creating masks to mask out padded tokens, we are going to use these helper functions as `tf.keras.layers.Lambda` layers.
Mask all the pad tokens (value `0`) in the batch to ensure the model does not treat padding as input.
```
def create_padding_mask(x):
mask = tf.cast(tf.math.equal(x, 0), tf.float32)
# (batch_size, 1, 1, sequence length)
return mask[:, tf.newaxis, tf.newaxis, :]
print(create_padding_mask(tf.constant([[1, 2, 0, 3, 0], [0, 0, 0, 4, 5]])))
```
Look-ahead mask to mask the future tokens in a sequence.
We also mask out pad tokens.
i.e. To predict the third word, only the first and second word will be used
```
def create_look_ahead_mask(x):
seq_len = tf.shape(x)[1]
look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
padding_mask = create_padding_mask(x)
return tf.maximum(look_ahead_mask, padding_mask)
print(create_look_ahead_mask(tf.constant([[1, 2, 0, 4, 5]])))
```
### Positional encoding
Since this model doesn't contain any recurrence or convolution, positional encoding is added to give the model some information about the relative position of the words in the sentence.
The positional encoding vector is added to the embedding vector. Embeddings represent a token in a d-dimensional space where tokens with similar meaning will be closer to each other. But the embeddings do not encode the relative position of words in a sentence. So after adding the positional encoding, words will be closer to each other based on the *similarity of their meaning and their position in the sentence*, in the d-dimensional space.
See the notebook on [positional encoding](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) to learn more about it. The formula for calculating the positional encoding is as follows:
$$\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$
$$\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$
```
class PositionalEncoding(tf.keras.layers.Layer):
def __init__(self, position, d_model):
super(PositionalEncoding, self).__init__()
self.pos_encoding = self.positional_encoding(position, d_model)
def get_angles(self, position, i, d_model):
angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))
return position * angles
def positional_encoding(self, position, d_model):
angle_rads = self.get_angles(
position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],
i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],
d_model=d_model)
# apply sin to even index in the array
sines = tf.math.sin(angle_rads[:, 0::2])
# apply cos to odd index in the array
cosines = tf.math.cos(angle_rads[:, 1::2])
pos_encoding = tf.concat([sines, cosines], axis=-1)
pos_encoding = pos_encoding[tf.newaxis, ...]
return tf.cast(pos_encoding, tf.float32)
def call(self, inputs):
return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]
sample_pos_encoding = PositionalEncoding(50, 512)
plt.pcolormesh(sample_pos_encoding.pos_encoding.numpy()[0], cmap='RdBu')
plt.xlabel('Depth')
plt.xlim((0, 512))
plt.ylabel('Position')
plt.colorbar()
plt.show()
```
### Encoder Layer
Each encoder layer consists of sublayers:
1. Multi-head attention (with padding mask)
2. 2 dense layers followed by dropout
Each of these sublayers has a residual connection around it followed by a layer normalization. Residual connections help in avoiding the vanishing gradient problem in deep networks.
The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.
```
def encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"):
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention = MultiHeadAttention(
d_model, num_heads, name="attention")({
'query': inputs,
'key': inputs,
'value': inputs,
'mask': padding_mask
})
attention = tf.keras.layers.Dropout(rate=dropout)(attention)
attention = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(inputs + attention)
outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention + outputs)
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
sample_encoder_layer = encoder_layer(
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_encoder_layer")
tf.keras.utils.plot_model(
sample_encoder_layer, to_file='encoder_layer.png', show_shapes=True)
```
### Encoder
The Encoder consists of:
1. Input Embedding
2. Positional Encoding
3. `num_layers` encoder layers
The input is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the encoder layers. The output of the encoder is the input to the decoder.
```
def encoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="encoder"):
inputs = tf.keras.Input(shape=(None,), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
for i in range(num_layers):
outputs = encoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="encoder_layer_{}".format(i),
)([outputs, padding_mask])
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
sample_encoder = encoder(
vocab_size=8192,
num_layers=2,
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_encoder")
tf.keras.utils.plot_model(
sample_encoder, to_file='encoder.png', show_shapes=True)
```
### Decoder Layer
Each decoder layer consists of sublayers:
1. Masked multi-head attention (with look ahead mask and padding mask)
2. Multi-head attention (with padding mask). `value` and `key` receive the *encoder output* as inputs. `query` receives the *output from the masked multi-head attention sublayer.*
3. 2 dense layers followed by dropout
Each of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.
As `query` receives the output from decoder's first attention block, and `key` receives the encoder output, the attention weights represent the importance given to the decoder's input based on the encoder's output. In other words, the decoder predicts the next word by looking at the encoder output and self-attending to its own output. See the demonstration above in the scaled dot product attention section.
```
def decoder_layer(units, d_model, num_heads, dropout, name="decoder_layer"):
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name="look_ahead_mask")
padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')
attention1 = MultiHeadAttention(
d_model, num_heads, name="attention_1")(inputs={
'query': inputs,
'key': inputs,
'value': inputs,
'mask': look_ahead_mask
})
attention1 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention1 + inputs)
attention2 = MultiHeadAttention(
d_model, num_heads, name="attention_2")(inputs={
'query': attention1,
'key': enc_outputs,
'value': enc_outputs,
'mask': padding_mask
})
attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)
attention2 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention2 + attention1)
outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention2)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(outputs + attention2)
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=outputs,
name=name)
sample_decoder_layer = decoder_layer(
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_decoder_layer")
tf.keras.utils.plot_model(
sample_decoder_layer, to_file='decoder_layer.png', show_shapes=True)
```
### Decoder
The Decoder consists of:
1. Output Embedding
2. Positional Encoding
3. N decoder layers
The target is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the decoder layers. The output of the decoder is the input to the final linear layer.
```
def decoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name='decoder'):
inputs = tf.keras.Input(shape=(None,), name='inputs')
enc_outputs = tf.keras.Input(shape=(None, d_model), name='encoder_outputs')
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name='look_ahead_mask')
padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
for i in range(num_layers):
outputs = decoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name='decoder_layer_{}'.format(i),
)(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=outputs,
name=name)
sample_decoder = decoder(
vocab_size=8192,
num_layers=2,
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_decoder")
tf.keras.utils.plot_model(
sample_decoder, to_file='decoder.png', show_shapes=True)
```
### Transformer
Transformer consists of the encoder, decoder and a final linear layer. The output of the decoder is the input to the linear layer and its output is returned.
```
def transformer(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="transformer"):
inputs = tf.keras.Input(shape=(None,), name="inputs")
dec_inputs = tf.keras.Input(shape=(None,), name="dec_inputs")
enc_padding_mask = tf.keras.layers.Lambda(
create_padding_mask, output_shape=(1, 1, None),
name='enc_padding_mask')(inputs)
# mask the future tokens for decoder inputs at the 1st attention block
look_ahead_mask = tf.keras.layers.Lambda(
create_look_ahead_mask,
output_shape=(1, None, None),
name='look_ahead_mask')(dec_inputs)
# mask the encoder outputs for the 2nd attention block
dec_padding_mask = tf.keras.layers.Lambda(
create_padding_mask, output_shape=(1, 1, None),
name='dec_padding_mask')(inputs)
enc_outputs = encoder(
vocab_size=vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[inputs, enc_padding_mask])
dec_outputs = decoder(
vocab_size=vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])
outputs = tf.keras.layers.Dense(units=vocab_size, name="outputs")(dec_outputs)
return tf.keras.Model(inputs=[inputs, dec_inputs], outputs=outputs, name=name)
sample_transformer = transformer(
vocab_size=8192,
num_layers=4,
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_transformer")
tf.keras.utils.plot_model(
sample_transformer, to_file='transformer.png', show_shapes=True)
```
## Train model
### Initialize model
To keep this example small and relatively fast, the values for *num_layers, d_model, and units* have been reduced. See the [paper](https://arxiv.org/abs/1706.03762) for all the other versions of the transformer.
```
tf.keras.backend.clear_session()
# Hyper-parameters
NUM_LAYERS = 2
D_MODEL = 256
NUM_HEADS = 8
UNITS = 512
DROPOUT = 0.1
model = transformer(
vocab_size=VOCAB_SIZE,
num_layers=NUM_LAYERS,
units=UNITS,
d_model=D_MODEL,
num_heads=NUM_HEADS,
dropout=DROPOUT)
```
### Loss function
Since the target sequences are padded, it is important to apply a padding mask when calculating the loss.
```
def loss_function(y_true, y_pred):
y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss = tf.multiply(loss, mask)
return tf.reduce_mean(loss)
```
### Custom learning rate
Use the Adam optimizer with a custom learning rate scheduler according to the formula in the [paper](https://arxiv.org/abs/1706.03762).
$$\Large{lrate = d_{model}^{-0.5} * min(step{\_}num^{-0.5}, step{\_}num * warmup{\_}steps^{-1.5})}$$
```
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps**-1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
sample_learning_rate = CustomSchedule(d_model=128)
plt.plot(sample_learning_rate(tf.range(200000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
```
### Compile Model
```
learning_rate = CustomSchedule(D_MODEL)
optimizer = tf.keras.optimizers.Adam(
learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
def accuracy(y_true, y_pred):
# ensure labels have shape (batch_size, MAX_LENGTH - 1)
y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))
accuracy = tf.metrics.SparseCategoricalAccuracy()(y_true, y_pred)
return accuracy
model.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])
```
### Fit model
Train our transformer by simply calling `model.fit()`
```
EPOCHS = 20
model.fit(dataset, epochs=EPOCHS)
```
## Evaluate and predict
The following steps are used for evaluation:
* Apply the same preprocessing method we used to create our dataset for the input sentence.
* Tokenize the input sentence and add `START_TOKEN` and `END_TOKEN`.
* Calculate the padding masks and the look ahead masks.
* The decoder then outputs the predictions by looking at the encoder output and its own output.
* Select the last word and calculate the argmax of that.
* Concatentate the predicted word to the decoder input as pass it to the decoder.
* In this approach, the decoder predicts the next word based on the previous words it predicted.
Note: The model used here has less capacity and trained on a subset of the full dataset, hence its performance can be further improved.
```
def evaluate(sentence):
sentence = preprocess_sentence(sentence)
sentence = tf.expand_dims(
START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0)
output = tf.expand_dims(START_TOKEN, 0)
for i in range(MAX_LENGTH):
predictions = model(inputs=[sentence, output], training=False)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, END_TOKEN[0]):
break
# concatenated the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)
def predict(sentence):
prediction = evaluate(sentence)
predicted_sentence = tokenizer.decode(
[i for i in prediction if i < tokenizer.vocab_size])
print('Input: {}'.format(sentence))
print('Output: {}'.format(predicted_sentence))
return predicted_sentence
```
Let's test our model!
```
output = predict('Where have you been?')
output = predict("It's a trap")
# feed the model with its previous output
sentence = 'I am not crazy, my mother had me tested.'
for _ in range(5):
sentence = predict(sentence)
print('')
```
## Summary
Here we are, we have implemented a Transformer in TensorFlow 2.0 in around 500 lines of code.
In this tutorial, we focus on the two different approaches to implement complex models with Functional API and Model subclassing, and how to incorporate them.
Try using a different dataset or hyper-parameters to train the Transformer! Thanks for reading.
| github_jupyter |
# Découvrir le binaire
> TP sur l'écriture binaire des nombres
- toc: true
- badges: true
- comments: false
- categories: [python, ISN]
Il y a 10 sortes de personnes, ceux qui comprennent le binaire et ceux qui ne le comprennent pas.
**python** sait convertir des nombres de binaire en décimal et de décimal en binaire :
```
bin(2)
int('0b10',2)
```
En utilisant ces commandes,
- donner la valeur binaire du nombre décimal 123
- donner la valeur décimale du nombre binaire 1001110
Mais comment fait-il ? Nous allons réécrire nos propres fonctions. Commençons par remarquer que l'écriture binaire d'un nombre est donné par Python sous forme d'**une chaîne de caractères**, c'est à dire un texte délimité par des guillemets. Nous allons commencer par voir quelques règles basiques de manipulation des chaînes.
# Manipulations de chaînes de caractère
```
maChaine="Ceci est une chaine"
autreChaine=" de caractère"
```
## Concaténation
On utilise l'opération '+' pour mettre deux chaînes de caractères bout à bout.
```
maChaine + autreChaine
```
***Attention*** : Une chaîne de caractère n'est pas un nombre. Observez attentivement cet exemple :
```
a='2'
b='5'
a+b
a='2'
b=5
a+b
a=2
b=5
a+b
```
## Conversion Chaînes - Nombres
Il est possible de convertir un nombre en chaîne de caractère et réciproquement. On utilise pour cela les fonctions **str** et **int** de python :
```
a=12
type(a)
b=str(a)
type(b)
```
Remarquez la fonction **type** permettant de connaître le *type* d'une variable. $a$ est de type **entier** alors que $b$ est de type **chaîne**.
```
b
a+a
b+b
int(b+b)
```
## le slicing où comment découper des parties d'une chaîne de caractère
On peut extraire une sous chaîne de caractère à partir d'une chaîne en faisant suivre la chaîne de crochets contenant les indices de début et de fin de la chaîne que l'on souhaite extraire. Il faut savoir qu'en python, les indices commencent à 0. Observez les exemples suivants :
```
maChaine="Ceci est un texte"
maChaine[0]
maChaine[6]
len(maChaine)
maChaine[16]
maChaine[17]
maChaine[0:3]
maChaine[5:10]
```
Il est possible de ne pas préciser le second indice après :. Dans ce cas, on obtient la fin de la chaîne à partir de l'indice de début précisé.
```
maChaine[3:]
```
## Renverser une chaîne
Il y a beaucoup à voir sur le slicing, en particulier l'utilisation des indices et des pas négatifs, mais nous en avons déjà assez vu. Juste un dernier exemple pratique permettant de renverser une chaîne de caractère
```
maChaine[::-1]
```
## Parcourir une chaîne de caractère par une boucle pour
Il est possible de parcourir un à un chacun des caractères d'une chaîne très facilement en python. Observez cet exemple et admirez à quel point la syntaxe Python est proche du langage naturel !
```
nb=0
for c in maChaine:
print("Je lis le caractère : ",c)
nb +=1
print(f"Il y a {nb} caractères dans cette chaîne")
```
## A vous de jouer
Vous allez créer une fonction ***somme***
- prenant en paramètre un nombre entier $n$
- renvoyant la somme des chiffres composant $n$
*Exemple* : somme(1234) devra renvoyer 10
*Indication* : Vous pourrez
- convertir $n$ en chaîne de caractère
- parcourir chaque caractère
- ajouter le chiffre correspondant à une variable total
- retourner la valeur de total
*Remarque* : Rappelez vous que 1+2 vaut 3 mais '1'+'2' vaut '12' !! je dis ça, je dis rien :)
```
# A vous de compléter ...
def
```
# Convertir un binaire en décimal
Vous êtes maintenant armés pour convertir un binaire en décimal. Avant toute chose, avant d'écrire une fonction, on doit se faire une idée très précise de son fonctionnement avec un papier et un crayon.
Prenons l'exemple du nombre $b='0b1101'$. Pour le convertir en décimal, nous allons le parcourir de la gauche vers la droite et totaliser au fur à mesure le nombre obtenu avec des puissances de 2. Observez la démarche :
1. On se débarasse de l'entête
b='1101'
2. On initialise une variable $n\leftarrow$ 0
3. On parcourt b de gauche à droite, caractère par caractère à l'aide d'une boucle
- On lit '1'. On l'ajoute donc à $n$ qui vaut 1
- On lit '1' à nouveau. Avant de l'ajouter à $n$, il faut multiplier $n$ par 2 (10 en binaire) du fait du décalage des bit d'un rang vers la gauche. $n$ vaut donc à présent $2\times 1 + 1$ soit 3 -c'est l'écriture décimale de 11 en binaire, tout va bien !
- On lit '0'. Comme précédemment, on va multiplier $n$ par 2 afin de tenir compte du décalage des bits vers la gauche. $n$ vaut alors 6 et comme on a lu 0, on ajoute rien.
- Pour finir, on lit '1'. On va donc multiplier $n$ par 2 et ajouter 1, ce qui nous donne $2\times 6+1$ soit 13
4. On renvoie le résultat final n=13
```
int('0b1101',2)
```
Pour bien vous imprégner de l'algorithme, appliquez cette même méthode pour convertir '0b101110' en décimal sur votre feuille. Ne validez la cellule suivante qu'une fois le calcul fait à la main !!
```
int('0b101110',2)
```
Si on résume l'algorithme, il ressemble à ceci :
<PRE>
tronquer la chaîne b pour enlever '0b'
Initialiser $n à 0
parcourir la chaîne b
multiplier n par deux et ajouter le chiffre binaire lu
renvoyer le résultat n
</PRE>
Il ne vous reste plus qu'à créer la fonction **bin2dec**
- qui prend en entrée une chaîne au format binaire (comme'0b1101')
- qui renvoie l'écriture décimale du nombre
Ainsi bin2dec('0b1101') devra renvoyer 13
A vous de jouer
```
# A vous de compléter ...
def
```
# Convertir un décimal en binaire
Nous allons faire maintenant l'opération inverse, c'est à dire passer d'une écriture décimale à binaire :
*Exemple* : dec2bin(13) devra renvoyer '0b1101'
Pour cette seconde fonction, vous commencerez par vous familiariser avec l'algorithme en le faisant fonctionner à la main sur des exemples. Ce n'est qu'une fois à l'aise avec le calcul à la main que vous pourrez apprendre à **pyhton** comment faire.
L'algorithme consiste à faire des divisions successives par 2 et à regarder le reste. Le reste d'une division par 2 est 0 si le nombre est pair, ou 1 si le nombre est impair. Cela donne justement les chiffres de notre nombre binaire !! Observez l'exemple
|n|Quotient par 2|reste|
|-|--------------|-----|
|13|6|1|
|6|3|0|
|3|1|1|
|1|0|1|
On s'arrète lorsque le quotient par 2 est nul. Les restes successifs sont 1, 0 1 et 1 ce qui est justement l'écritune binaire de 13 écrit de la droite vers la gauche, mais vous savez renverser une chaîne de caractère n'est-ce pas :)
## Division et reste en Python
Avant de vous laisser créer la fonction, voici les opérateurs qui vous seront utiles :
- Division entière : //
- reste : %
Regardez sur cet exemple :
```
13//2
13%2
```
Le quotient de 13 par 2 est bien 6 et il reste 1.
## A vous de jouer.
Après vous être exercés sur papier à la conversions de nombres de decimal en binaire, il ne vous reste plus qu'à créer la fonction dec2bin
- qui prend en entrée un entier décimal
- qui renvoie l'écriture binaire du nombre sous forme d'une chaîne '0b....'
```
# A vous de compléter ...
def
```
| github_jupyter |
# Fairness Warnings Example
When fair-classification methods are called, warnings related to fairness violations may be raised.
The goal of the two fairness warnings classes is, to provide easy access to methods to check data sets and classifiers for imbalances.
Two types of warning can be raised, based on either the data set being skewed or the classifier missing some boundary.
```
import sys
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
sys.path.append("../")
from fairensics.methods import FairDisparateImpact, AccurateDisparateImpact
from fairensics.methods.fairness_warnings import FairnessBoundsWarning, DataSetSkewedWarning
# helper function to generate skewed data sets
def to_aif_dataset(X, X_protected, y):
d = {"age": X_protected, "label": y}
for i in range(np.shape(X)[1]):
d["feature_"+str(i)] = X[:, i]
df = pd.DataFrame(data=d)
aif_df = BinaryLabelDataset(df=df,
label_names=["label"],
protected_attribute_names=["age"])
return aif_df
```
# 1. Data Set Warnings
Data set warnings should be executed before the classifiers ``fit`` method is called. The warnings are implemented in the class ``DatasetSkewedWarning`` and are executed by calling the ``check_dataset`` method. Global attributes of the class store the warning thresholds. If a threshold is ``None`` the warning is ignored.
Three different types of skewness are distinguished:
- label skewness (unbalanced ration of different label classes)
- attribute class skewness (unbalanced ration of protected attribute classes)
- label and attribute class skewness (unbalanced ration for each combination of protected attribute and label class)
## 1.1 Skewed Labels
```
# create a data set with skewed labels
n_samples = 100
n_pos_label = 10
X = np.random.rand(n_samples, 3)
X_protected = np.random.randint(0, 2, size=n_samples)
y = np.hstack((np.ones(n_pos_label),
np.zeros(n_samples-n_pos_label)))
skew_labels = to_aif_dataset(X, X_protected, y)
data_warning = DataSetSkewedWarning(skew_labels)
data_warning.check_dataset()
```
## 1.2 Unbalanced Protected Attribute Classes
```
# create a data set with unbalanced class count
n_samples = 100
n_pos_label = 10
X = np.random.rand(n_samples, 3)
X_protected = np.hstack((np.ones(n_pos_label), np.zeros(n_samples-n_pos_label)))
y = np.random.randint(0, 2, size=n_samples)
skew_prot_attr = to_aif_dataset(X, X_protected, y)
data_warning = DataSetSkewedWarning(skew_prot_attr)
data_warning.check_dataset()
```
## 1.3 Unbalanced Combination of Protected Attribute and Label
```
# create a data set with skewed labels
n_samples = 100
n_pos_label = 10
X = np.random.rand(n_samples, 3)
X_protected = np.hstack((np.ones(n_pos_label), np.zeros(n_samples-n_pos_label)))
y = np.random.randint(0, 2, size=n_samples)
skew_prot_attr = to_aif_dataset(X, X_protected, y)
data_warning = DataSetSkewedWarning(skew_prot_attr)
data_warning.check_dataset()
```
## 1.4 Redefining the Default Bounds
```
data_warning = DataSetSkewedWarning(skew_prot_attr)
data_warning.POSITIVE_NEGATIVE_CLASS_FRACTION = .1 # default is.4
data_warning.POSITIVE_NEGATIVE_LABEL_FRACTION = .2 # default is .4
data_warning.CLASS_LABEL_FRACTION = .05 # default is .4
data_warning.check_dataset()
```
# 2. Classifier Warnings
Classifier warnings are executed after the classifier is trained. Again, thresholds are stored in global variables of the class and checks are only executed if a bound is not ``None``.
Both thresholds for ratios and differences can be provided.
## 2.1 Using Default Bounds
```
n_samples = 100
n_pos_label = 10
X = np.random.rand(n_samples, 3)
X_protected = np.hstack((np.ones(n_pos_label), np.zeros(n_samples-n_pos_label)))
# defining predictions
y = np.random.randint(0, 2, size=n_samples)
predicted_dataset = to_aif_dataset(X, X_protected, y)
# raw data set
y_new = np.random.randint(0, 2, size=n_samples)
raw_dataset = to_aif_dataset(X, X_protected, y)
clf_warning = FairnessBoundsWarning(raw_dataset, predicted_dataset)
data_warning.check_dataset()
```
## 2.2 Defining New Bounds
If a Bound is set to ``None`` the metric is not checked.
```
clf_warning = FairnessBoundsWarning(raw_dataset, predicted_dataset)
clf_warning.DISPARATE_IMPACT_RATIO_BOUND = None # default is .8
clf_warning.FPR_RATIO_BOUND = None # default is .8
clf_warning.FNR_RATIO_BOUND = None # default is .8
clf_warning.ERROR_RATIO_BOUND = None # default is .8
clf_warning.EO_DIFFERENCE_BOUND = .2 # default is .1
clf_warning.FPR_DIFFERENCE_BOUND = None # default is None
clf_warning.FNR_DIFFERENCE_BOUND = None # None default is None
clf_warning.ERROR_DIFFERENCE_BOUND = .3 # default is None
data_warning.check_dataset()
```
| github_jupyter |
<div id="Sec:TAbbAcc">
<h2>Abbreviations and Acronyms</h2>
</div>
### Markdown Table
|<div id="Table:TAbbAcc"></div>Abbreviation|Full Form|
|--- |--- |
|1D|One-Dimensional|
|2D|Two-Dimensional|
|A|Assumption|
|DD|Data Definition|
|GD|General Definition|
|GS|Goal Statement|
|IM|Instance Model|
|PS|Physical System Description|
|R|Requirement|
|SRS|Software Requirements Specification|
|TM|Theoretical Model|
|Uncert.|Typical Uncertainty|
<h3>HTML Table</h3>
<div id="Table:TAbbAcc">
<table class="table">
<tr>
<th>Abbreviation</th>
<th>Full Form</th>
</tr>
<tr>
<td>1D</td>
<td>One-Dimensional</td>
</tr>
<tr>
<td>2D</td>
<td>Two-Dimensional</td>
</tr>
<tr>
<td>A</td>
<td>Assumption</td>
</tr>
<tr>
<td>DD</td>
<td>Data Definition</td>
</tr>
<tr>
<td>GD</td>
<td>General Definition</td>
</tr>
<tr>
<td>GS</td>
<td>Goal Statement</td>
</tr>
<tr>
<td>IM</td>
<td>Instance Model</td>
</tr>
<tr>
<td>PS</td>
<td>Physical System Description</td>
</tr>
<tr>
<td>R</td>
<td>Requirement</td>
</tr>
<tr>
<td>SRS</td>
<td>Software Requirements Specification</td>
</tr>
<tr>
<td>TM</td>
<td>Theoretical Model</td>
</tr>
<tr>
<td>Uncert.</td>
<td>Typical Uncertainty</td>
</tr>
</table>
</div>
|
|
|
|
|
|
|
|
|
|
|
|
<div id="Sec:TraceMatrices">
<h1>Markdown - Traceability Matrices and Graphs</h1>
</div>
The purpose of the traceability matrices is to provide easy references on what has to be additionally modified if a certain component is changed. Every time a component is changed, the items in the column of that component that are marked with an "X" should be modified as well. <a href=#Table:TraceMatAvsAll>Table:TraceMatAvsAll</a> shows the dependencies of data definitions, theoretical models, general definitions, instance models, requirements, likely changes, and unlikely changes on the assumptions. <a href=#Table:TraceMatRefvsRef>Table:TraceMatRefvsRef</a> shows the dependencies of data definitions, theoretical models, general definitions, and instance models with each other. <a href=#Table:TraceMatAllvsR>Table:TraceMatAllvsR</a> shows the dependencies of requirements, goal statements on the data definitions, theoretical models, general definitions, and instance models.
|<div id="Table:TraceMatAvsAll"></div>|<a href=#twoDMotion>A: twoDMotion</a>|A: cartSyst|A: yAxisGravity|A: launchOrigin|A: targetXAxis|A: posXDirection|A: constAccel|A: accelXZero|A: accelYGravity|A: neglectDrag|A: pointMass|A: freeFlight|A: neglectCurv|A: timeStartZero|
|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |
|<a href=#DD:vecMag>DD: vecMag</a>|||||||||||||||
|DD: speedIX||||||||||||<a href=#Table:TAbbAcc>Table of AAAA</a>|||
|DD: speedIY|||||||||||||||
|TM: acceleration|||||||||||||||
|TM: velocity|||||||||||||||
|GD: rectVel|||||||||||X|||X|
|GD: rectPos|||||||||||X|||X|
|GD: velVec|X|X|||||X|||||||X|
|GD: posVec|X|X|||||X|||||||X|
|IM: calOfLandingTime|||X|X|X|X|||X|||||X|
|IM: calOfLandingDist|||X|X||X||X|||||||
|IM: offsetIM||||||X|||||||||
|IM: messageIM||||||X|||||||||
|FR: Input-Parameters|||||||||||||||
|FR: Verify-Parameters|||||||||||||||
|FR: Calculate-Values|||||||||||||||
|FR: Output-Values|||||||||||||||
|NFR: Correct|||||||||||||||
|NFR: Verifiable|||||||||||||||
|NFR: Understandable|||||||||||||||
|NFR: Reusable|||||||||||||||
|NFR: Maintainable|||||||||||||||
|NFR: Portable|||||||||||||||
<div id="Sec:TraceMatrices">
<div class="section">
<h1>HTML - Traceability Matrices and Graphs</h1>
<p class="paragraph">
The purpose of the traceability matrices is to provide easy references on what has to be additionally modified if a certain component is changed. Every time a component is changed, the items in the column of that component that are marked with an "X" should be modified as well. <a href=#Table:TraceMatAvsAll>Table:TraceMatAvsAll</a> shows the dependencies of data definitions, theoretical models, general definitions, instance models, requirements, likely changes, and unlikely changes on the assumptions. <a href=#Table:TraceMatRefvsRef>Table:TraceMatRefvsRef</a> shows the dependencies of data definitions, theoretical models, general definitions, and instance models with each other. <a href=#Table:TraceMatAllvsR>Table:TraceMatAllvsR</a> shows the dependencies of requirements, goal statements on the data definitions, theoretical models, general definitions, and instance models.
</p>
<div id="Table:TraceMatAvsAll">
<table class="table">
<tr>
<th></th>
<th><a href=#twoDMotion>A: twoDMotion</a></th>
<th><a href=#cartSyst>A: cartSyst</a></th>
<th><a href=#yAxisGravity>A: yAxisGravity</a></th>
<th><a href=#launchOrigin>A: launchOrigin</a></th>
<th><a href=#targetXAxis>A: targetXAxis</a></th>
<th><a href=#posXDirection>A: posXDirection</a></th>
<th><a href=#constAccel>A: constAccel</a></th>
<th><a href=#accelXZero>A: accelXZero</a></th>
<th><a href=#accelYGravity>A: accelYGravity</a></th>
<th><a href=#neglectDrag>A: neglectDrag</a></th>
<th><a href=#pointMass>A: pointMass</a></th>
<th><a href=#freeFlight>A: freeFlight</a></th>
<th><a href=#neglectCurv>A: neglectCurv</a></th>
<th><a href=#timeStartZero>A: timeStartZero</a></th>
</tr>
<tr>
<td><a href=#DD:vecMag>DD: vecMag</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#DD:speedIX>DD: speedIX</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#DD:speedIY>DD: speedIY</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#TM:acceleration>TM: acceleration</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#TM:velocity>TM: velocity</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#GD:rectVel>GD: rectVel</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td>X</td>
</tr>
<tr>
<td><a href=#GD:rectPos>GD: rectPos</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td>X</td>
</tr>
<tr>
<td><a href=#GD:velVec>GD: velVec</a></td>
<td>X</td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
</tr>
<tr>
<td><a href=#GD:posVec>GD: posVec</a></td>
<td>X</td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
</tr>
<tr>
<td><a href=#IM:calOfLandingTime>IM: calOfLandingTime</a></td>
<td></td>
<td></td>
<td>X</td>
<td>X</td>
<td>X</td>
<td>X</td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
</tr>
<tr>
<td><a href=#IM:calOfLandingDist>IM: calOfLandingDist</a></td>
<td></td>
<td></td>
<td>X</td>
<td>X</td>
<td></td>
<td>X</td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#IM:offsetIM>IM: offsetIM</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#IM:messageIM>IM: messageIM</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>X</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#inputParams>FR: Input-Parameters</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#verifyParams>FR: Verify-Parameters</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#calcValues>FR: Calculate-Values</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#outputValues>FR: Output-Values</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#correct>NFR: Correct</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#verifiable>NFR: Verifiable</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#understandable>NFR: Understandable</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#reusable>NFR: Reusable</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#maintainable>NFR: Maintainable</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><a href=#portable>NFR: Portable</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</table>
<p class="caption">
Traceability Matrix Showing the Connections Between Assumptions and Other Items
</p>
</div>
| github_jupyter |
Goal:
Narrow down to only these sites that are valueable from analysis perspective
# Site selection
Sites with 4 or more years of complete data are picked.
```
from os.path import join, basename, splitext
from glob import glob
from dask import dataframe as dd
from matplotlib import rcParams
import pandas as pd
import dask
from collections import Counter
import pickle
from deep_aqi import ROOT
pd.set_option('max_columns', 50)
pd.set_option('max_rows', 25)
def site_code_test(table):
"""When files were loaded for the first time, integers were assumed missing and they were transfomed to
float, let's check if its consistent across all files, it could make it hard to look for same sites
across many files otherwise."""
if table.SiteCode.str.contains('.').all():
pass
# print('All SiteCodes have "." contained inside.')
else:
raise ValueError('Not all SiteCodes have "." contained inside!')
def check_coverage(parameter):
MINIMUM_YEAR_COUNT = 4
files = glob(f'{INTERIM_DATA}/*.parquet', recursive=True)
files = [file for file in files if parameter in file]
sites = []
for file in files:
df = dd.read_parquet(file)
site_code_test(df)
sites_ = df.SiteCode.unique().compute()
sites.extend(sites_)
return set([site for site, count in Counter(sites).items() if count >= MINIMUM_YEAR_COUNT])
INTERIM_DATA = join(ROOT, 'data', 'interim')
files = glob(f'{INTERIM_DATA}/*.parquet', recursive=True)
Counter([basename(file).split('_')[-1].split('.')[0] for file in files]), Counter([basename(file).split('_')[1] for file in files])
wind_sites = check_coverage('WIND')
temp_sites = check_coverage('TEMP')
press_sites = check_coverage('PRESS')
rhdp_sites = check_coverage('RH_DP')
ozone_sites = check_coverage('44201')
sulfur_sites = check_coverage('42401')
carbon_sites = check_coverage('42101')
nitro_sites = check_coverage('42602')
pm25frm_sites = check_coverage('88101')
pm10_sites = check_coverage('81102')
pm25_sites = check_coverage('88502')
spec_sites = check_coverage('SPEC')
weather_sites = wind_sites.intersection(temp_sites).intersection(press_sites).intersection(rhdp_sites)
ozone = weather_sites.intersection(ozone_sites)
len(ozone)
sulfur = weather_sites.intersection(sulfur_sites)
len(sulfur)
carbon = weather_sites.intersection(carbon_sites)
len(carbon)
nitro = weather_sites.intersection(nitro_sites)
len(nitro)
pm25frm = weather_sites.intersection(pm25frm_sites)
len(pm25frm)
pm10 = weather_sites.intersection(pm10_sites)
len(pm10)
pm25 = weather_sites.intersection(pm25_sites)
len(pm25)
spec = weather_sites.intersection(spec_sites)
len(spec)
available_sites = ozone.union(sulfur).union(carbon).union(nitro).union(pm25frm).union(pm10).union(pm25).union(spec)
len(available_sites)
# with open('available_sites.p', 'wb') as file:
# pickle.dump(available_sites, file)
ozone
```
| github_jupyter |
```
# !wget https://f000.backblazeb2.com/file/malay-dataset/tagging/ontonotes5/processed-ontonotes5.json
import json
from sklearn.model_selection import train_test_split
import random
with open('processed-ontonotes5.json') as fopen:
data = json.load(fopen)
from collections import defaultdict
entities = defaultdict(list)
for i in data:
entities['text'].append(i[0])
entities['label'].append(i[1])
pelancongan = """
Air terjun Kota Tinggi
Pantai Desaru
Gunung Ledang
Gunung Pulai
Gunung Sumalayang
Taman Negara
Taman Negara Endau-Rompin
Taman Negara Gunung Ledang
Taman Negara Tanjung Piai
Taman Negara Pulau Kukup
Taman Negara Johor Kepulauan Mersing
Makam Sultan Mahmud Mangkat Dijulang
Dataran Bandaraya
Taman Rekreasi Tampok
Pantai Cenang
Pantai Merdeka
Pantai Murni
Pulau Langkawi
Pulau Payar
Tanjung Rhu
Gunung Perak
Gunung Bayu
Gunung Jerai
Gunung Tok Bidan
Gunung Bokbak
Gunung Mat Chinchang
Air Terjun & Hutan Lipur
Air Terjun Bukit Hijau
Air Terjun Lembah Bujang
Air Terjun Puncak Janing
Air Terjun Telaga Tujuh
Peranginan Sri Peigi
Air Terjun Titi Hayun
Air Terjun Batu Hampar (Yan)
Lata Bayu
Lata Mengkuang
Hutan Lipur Sg. Sedim
Hutan Lipur Ulu Paip
Kawasan Rekreasi Batu Hampar(Air Putih)
Air Terjun Junjong
Taman Rekreasi Bukit Wang
Muzium Negeri Kedah
Balai Besar negeri Kedah
Balai Nobat negeri Kedah
Balai Seni negeri Kedah
Masjid Negeri negeri Kedah
Muzium Arkeologi Lembah Bujang
Muzium Padi negeri Kedah
Wayang Kulit Seri Asun
Tasik Pedu
Pekan Rabu
Kolam Air Panas Ulu Legong
Kolam Air Panas Air Hangat, Langkawi
Pasar Besar Siti Khadijah
Pasar Terapung Pantai Suri
Bazar Buluh Kubu
Bazar Tok Guru
Bazar Tuan Padang
KB Mall
Bandar Bentara Tumpat atau Bandar Baru Tumpat
Kompleks Bebas Cukai Rantau Panjang
Zon Beli-belah Bukit Bunga
Kompleks Bebas Cukai Pengkalan Kubor
G-Orange Mall, Tunjong
Kota Bharu Trade Centre
AEON Mall Kota Bharu
Wakaf Che Yeh
Pantai Bisikan Bayu
Pantai Cahaya Bulan
Pantai Tok Bali
Pantai Irama
Pantai Sri Tujuh
Pantai Sabak
Pantai Geting
Pulau Nami Malaysia
Bukit Kemahang
Gunung Noring Timor
Gunung Berangkat
Gunung Chamah
Gunung Rabong
Gunung Ayam
Gunung Setong
Pusat kesenian
Gelanggang Seni
Tokong-tokong Buddha
Jeram Pasu
Jeram Lenang
Lata Rek
Lata Bakah
Tasik Pergau, Jeli
Pasar Seni (Central Market)
Bangunan Sultan Abdul Samad
Bangunan Daya Bumi
Istana Negara
Stesen Keretapi Kuala Lumpur
Taman Tasik Perdana (Lake Gardens)
Taman Tasik Titiwangsa
Tugu Negara
Carcosa Seri Negara
Bangunan Parlimen
Planetarium Negara (National Planetarium)
Masjid Jamek
MATIC
Perpustakaan Negara
Masjid Negara
Muzium Negara
Pusat Sains Negara (National Science Center)
Zoo Negara
Jalan Petaling
Kuil Thean Hou
Memorial Tunku Abdul Rahman Putra
Menara Berkembar Petronas
Menara Kuala Lumpur
Stadium Merdeka
Port Dickson
Teluk Kemang
Blue Lagoon
Bukit & Gunung
Bukit Palong
Bukit Galla
Gunung Telapa Buruk
Gunung Angsi
Gunung Rembau
Gunung Tampin
Hutan Lipur
Hutan Lipur Ulu Bendul
Hutan Lipur Serting Ulu
Hutan Lipur De Bana
Hutan Lipur Jeram Tengkek
Kota Lukut
Air Terjun Sungai Pandan, Kuantan
Air Terjun Berkelah, Maran
Air Terjun Teladas, Maran
Air Terjun Chamang, Bentong
Air Terjun Chekas, Raub
Pantai Balok, Kuantan
Pantai Cherating, Kuantan
Pantai Rompin, Rompin
Teluk Chempedak, Kuantan
Pulau Tioman
Air Terjun Lubuk Yu, Maran
Gua dan bukit batu kapur
Gua Charah, Kuantan
Gua Bama, Lipis
Gua Kecil, Raub
Kota Gelanggi
Hutan dan taman negara
Taman Negara
Taman Rimba Kenong
Taman Negara Endau-Rompin
Bukit & Gunung
Bukit Angus
Gunung Beremban
Bukit Tujoh
Bukit Tapah
Bukit Chini
Gunung Rajah
Gunung Bujang
Gunung Pulang
Gunung Bakar (Palas)
Gunung Irong
Gunung Jasar
Gunung Tapis
Gunung Benom
Gunung Gagau
Gunung Lerak
Gunung Perdah
Gunung Serudom
Gunung Senyum
Gunung Ulu Kemapan
Tanah Tinggi Cameron
Bukit Fraser
Bukit Tinggi
Genting Highlands
Pusat Perlindungan Hidupan Liar Krau
bangunan lama Ibu Pejabat Polis Daerah Raub
homestay Kuala Medang
homestay Sungai Pasu, Raub
Jeram Busu
Empang Jaleh, Kuala Lipis
Lombong emas Penjom
Batu Feringgi
Pantai Monyet
Pantai Kerachut
Tanjong Bungah
Bukit Bendera
Hutan Lipur
Hutan Lipur Cherok To'Kun
Hutan Lipur Bukit Panchor
Kota Cornwallis
Meriam Sri Rambai
Menara Jam Peringatan Victoria
Rumah Kongsi Suku Khoo
Kuil Taoist Dewa Mercy, Pulau Pinang
Gereja St George, Anglican, Pulau Pinang
Kuil Hindu Maha Mariamman
Masjid Kapitan Kling
Kuil Kek Lok Si
Menara KOMTAR
Kuil Kek Lok Si
Kuil Ular
Empangan Mengkuang
Air Hitam Dalam
Ampang Jajar Sungai Krian
Pulau Aman
Taman Rekreasi Taman Robina
Taman Bandar, Ampang Jajar
Taman Burung
Perkampungan Tradisional(Pulau Pinang)
Pusat Pelawat "Explore Balik Pulau"
Jambatan Pulau Pinang
Padang Kota Lama
Rumah P.Ramlee
Jalan Seni
Taman Tema Escape
Taman Rempah Tropika
Suffolk House
Muzium Sun Yat Sen
Destinasi terkenal
Pulau Pangkor
Air terjun Lata Kinjang
Air terjun Ulu Chepor
Air Terjun Tanjung Kala
Pantai Teluk Batik
Sungai Perak
Bukit Merah
Marina Island Pangkor
Gunung dan gua
Bukit Damar
Bukit Gadang
Bukit Kabut
Bukit Ulu Laho
Gunung Besar
Gunung Biong
Gunung Bubu
Gua Kandu
Gunung Chabang
Gunung Chingkai
Gunung Gayong
Gunung Inas
Gunung Lang
Gunung Korbu
Gunung Larut
Gunung Ulu Jernih
Gunung Ulu Soh
Gua Tempurung
Hutan Lipur Ulu Kinta
Hutan Simpan Belum
Perkampungan Orang Asli, di Batang Padang
Hutan Rekreasi Kuala Woh, di Tapah
Air terjun Lata Iskandar, di Tapah
Air terjun Lata Kinjang, di Tapah
Air terjun Lata Sungai Bil, di Tapah
Istana Kellie (Kellie's Castle), di Batu Gajah
Taman Alam Kinta (Taman Burung)
Warisan Kapal Korek Bijih Timah di Tanjung Tualang
Pusat Penternakan Tuntong Sungai, di Bota Kanan
Bukit Gantang
Gunung Bubu, di Bukit Gantang
Lembah Belum, di Gerik
Tasik Temenggor, di Gerik
Gunung Kenderong, di Gerik
Air terjun Tanjung Kala, di Gerik
Gua Gendang, di Hulu Perak
Kota Tampan, di Lenggong
Tasik Raban, di Lenggong
Air terjun Lata Kekabu, di Lenggong
Tokong Kek Lok Tong & Tokong Sam Poh Tong, di Ipoh
Tokong Perak Tong, di Ipoh
Gua Tempurung, di Ipoh
Menara Peringatan Birch, di Ipoh
Kelab Golf Diraja Perak, di Ipoh
Kelab Golf Meru Valley, di Ipoh
Kuil Kallumalai Arul Migu Subramaniam, di Ipoh
Masjid Negeri, di Ipoh
Muzium Darul Ridzuan, di Ipoh
Muzium Geologi, di Ipoh
Stesen Keretapi Ipoh, di Ipoh
Taman D.R. Seenivasagam, di Ipoh
Taman Rekreasi Sultan Abdul Aziz, di Ipoh
Kompleks Sukan DBI, di Ipoh
Ladang Penternakan Kuda Kebangsaan, di Ipoh
Taman Rekreasi Gunung Lang, di Ipoh
Kraftangan Taman Seramik, di Chemor
Ulu Chepor, di Chemor
Gua Tambun, di Tambun
Kolam Air Panas Tambun, di Tambun
Air Terjun Tanjung Rambutan, di Tanjung Rambutan
Lost World of Tambun, di Tambun, Ulu Kinta
Kolej Melayu Kuala Kangsar (MCKK), di Kuala Kangsar
Pusat Pembangunan Kraftangan Perak, di Kuala Kangsar
Pokok Getah Pertama & Tertua, di Kuala Kangsar
Memorial Keris, di Kuala Kangsar
Menara Pavilion Segi Empat, di Kuala Kangsar
Masjid Ubudiah, di Kuala Kangsar
Muzium Diraja, di Kuala Kangsar
Istana Iskandariah, di Kuala Kangsar
Resort Sungai Perak, di Kuala Kangsar
Air Terjun Kampung Hulu Kenas, di Kuala Kangsar
Pusat Kraftangan Mariwasa, di Kuala Kangsar
Taman Chempaka Sari, di Parit
Kompleks Sejarah Pasir Salak, di Pasir Salak
Pusat Kraftangan Pulau Tiga, di Pulau Tiga
Kota Belanda, di Pulau Pangkor
Teluk Rubiah Beach & Golf Resort, di Lumut
Ladang Infoternak, di Sungai Siput
Felda Residence Hot Springs, di Felda Sungai Klah, Sungkai
Ladang Ternakan Rusa, di Sungkai
Bukit Larut (Maxwell Hill), di Taiping
Kediaman Residen British, di Taiping
Taman Tasik Taiping, di Taiping
Muzium Perak, Taiping, di Taiping
Zoo Taiping, di Taiping
Burmese Pool di Taiping
Tanah Perkuburan Perang Taiping di Taiping
Taiping Equine Park di Taiping
Kompleks Sejarah Kota Ngah Ibrahim di Taiping
Port Weld, Kuala Sepetang di Taiping
Paya Bakau Daerah Kerian di Taiping
Santuari Burung Kuala Gula di Taiping
Pantai Ban Pecah di Taiping
Taman Tema Air Bukit Merah, di Taiping
Kg Dew Fireflies Eco Tourism, di Taiping
Limbungan Bot Teluk Intan, di Teluk Intan
Menara Jam Condong Teluk Intan, di Teluk Intan
Bukit Mabauk
Coral Flyer
Gunung Kinabalu
kolam air panas Poring
Kundasang
Zoo Lok Kawi Baru
Gunung Trusmadi
Bukit Tawai
Gunung Tinutudan
Gunung Kuli
Gunung Lumutan
Gunung Magdelena
Pantai Tanjung Aru
Taman Laut Tunku Abdul Rahman
Pulau Sipadan
Pulau Mabul
Sungai Padas
Kolam air panas Poring
Hutan Lipur Sepilok
Taman Pertanian Sabah
Long Pasia
Gunung Tambuyukon
Pulau Sapi
Sunway Lagoon
The Mines Wonderlands
Wet World Shah Alam
Pantai & Pulau
Pantai Morib
Pantai Remis
Pantai Bagan Lalang
Pulau Ketam
Pulau Carey
Bukit & Gunung
Bukit Cahaya Seri Alam
Bukit Melawati
Gunung Bunga Buah
Bukit Broga
Jeram Sungai Congkak
Jeram Sungai Tekala
Air Terjun Sungai Gabai
Air Terjun Bukit Belacan
Jeram Sungai Congkak
Sungai Chiling
The Woods, Ulu Yam Perdana
Sungai Congkak
Membeli-belah
Petaling Jaya
1 Utama Shopping Centre
The Curve
Ikano Power Centre
e@curve
Ikea
Sunway Giza
Tropicana City Mall
Centrepoint
Paradigm Mall
Citta Mall
The Atria12
Amcorp Mall
Sunway Piramid
Subang Parade
Empire Shopping Gallery
Subang Avenue
Summit USJ
IOI Mall
One city Mall
The place
SACC Mall
Plaza Alam Sentral
Plaza Masalam
Anggerik Mall
Kompleks PKNS
AEON Bukit Tinggi
AEON Bukit Raja
Klang Parade
Klang City Square
Shaw Centrepoint
Setia City mall
WCT mall
Tesco Bukit Tinggi
Ampang Point
Selayang Mall
Plaza Metro Kajang
Metropoint Kajang
South City Plaza
Mines Wonderland
Pusat Latihan Bina Insan Sungai Tinggi, Batang Berjuntai
Pusat Latihan Bina Insan Pengkalan Tanjung, Batu 14, Sabak Bernam
Pusat Latihan Bina Insan The Woods, Ulu Yam Perdana[[1]]
Pusat Latihan Bina Insan Cope Adventure, Hulu Langat[[2]]
Litar Antarabangsa Sepang
Zoo Negara
Taman Templer
Tugu Keris
Masjid Shah Alam
Muzium Shah Alam
Gallery Sultan Abdul Aziz
Pantai Batu Buruk
Pantai Tok Jembal
Pantai Airport
Pantai Mengabang Telipot
Pulau Kapas
Pulau Perhentian
Pulau Redang
Pulau Lang Tengah
Pulau Gemia
Pulau Rhu Hentian
Pulau Tenggol
Pulau Bidong
Rantau Abang
Bukit Kapal
Bukit Patang
Bukit Kluang
Bukit Besar
Gunung Tebu
Gunung Lawit
Gunung Gajah Terom
Air Terjun Sekayu
Hutan Lipur Chemerong
Air Terjun Lata Payung
Air Terjun Saok, Tasik Kenyir
Air Terjun Sungai Buweh, Tasik Kenyir
Air Terjun Tembat, Tasik Kenyir
Air Terjun Sekayu
Belukar Bukit
Lata Belatan
Lata Tembakah
La Hot Spring
"""
pelancongan = [i.split(',')[0].strip() for i in pelancongan.split('\n') if len(i) > 0]
len(pelancongan)
# !wget https://raw.githubusercontent.com/huseinzol05/Malay-Dataset/master/tagging/entities-OntoNotes5/LOC/gunung.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malay-Dataset/master/tagging/entities-OntoNotes5/LOC/planet.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malay-Dataset/master/tagging/entities-OntoNotes5/LOC/sungai.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malay-Dataset/master/tagging/entities-OntoNotes5/LOC/tasik.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malay-Dataset/master/tagging/entities-OntoNotes5/LOC/tempat-pelancongan.txt
with open('gunung.txt') as fopen:
data = fopen.read().split('\n')
gunung = []
for t in data:
try:
gunung.append(t.split('\t')[1])
except:
pass
len(gunung)
with open('planet.txt') as fopen:
data = fopen.read().split('\n')
planet = []
for t in data:
try:
planet.append(t.split('(')[0].strip())
except:
pass
planet
with open('sungai.txt') as fopen:
data = fopen.read().split('\n')
sungai = data[:]
sungai = list(filter(None, sungai))
with open('tasik.txt') as fopen:
data = fopen.read().split('\n')
tasik = data[:]
tasik = list(filter(None, tasik))
loc = pelancongan + gunung + planet + sungai + tasik
len(loc)
results = []
i = 0
while i < len(entities['label']):
r = []
if entities['label'][i] == 'LOC':
while entities['label'][i] == 'LOC':
r.append(i)
i += 1
results.append(r)
i += 1
import math
def generate_index(l, name, texts, labels, length):
cp, indices = [], []
b = length - len(l)
left = math.ceil(b / 2)
right = b - left
minus = l[0] - left
if minus < 0:
absolute = np.abs(minus)
right += absolute
left -= absolute
for i in range(l[0] - left, l[0]):
cp.append(texts[i])
indices.append(labels[i])
cp.extend(name)
indices.extend([labels[l[0]] for _ in range(len(name))])
try:
for i in range(l[-1] + 1, l[-1] + right + 1):
cp.append(texts[i])
indices.append(labels[i])
except Exception as e:
print(e)
pass
return cp, indices
train_results, test_results = train_test_split(results, test_size = 0.2)
train_loc, test_loc = train_test_split(loc, test_size = 0.2)
train_X, train_Y = [], []
repeat = 20
for t in train_loc:
for i in range(repeat):
x, y = generate_index(train_results[random.randint(0, len(train_results) - 1)],
t.split(), entities['text'], entities['label'], 50)
if len(x) != len(y):
print('len not same')
continue
train_X.append(x)
train_Y.append(y)
len(train_X)
test_X, test_Y = [], []
repeat = 15
for t in test_loc:
for i in range(repeat):
x, y = generate_index(test_results[random.randint(0, len(test_results) - 1)],
t.split(), entities['text'], entities['label'], 50)
if len(x) != len(y):
print('len not same')
continue
test_X.append(x)
test_Y.append(y)
len(test_X)
len(train_X), len(test_X)
with open('augmentation-loc-ontonotes5.json', 'w') as fopen:
json.dump({'train_X': train_X, 'train_Y': train_Y,
'test_X': test_X, 'test_Y': test_Y}, fopen)
```
| github_jupyter |
## MNIST Training, Compilation and Deployment with MXNet Module
The **SageMaker Python SDK** makes it easy to train and deploy MXNet models. In this example, we train a simple neural network using the Apache MXNet [Module API](https://mxnet.apache.org/api/python/module/module.html) and the MNIST dataset. The MNIST dataset is widely used for handwritten digit classification, and consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). The task at hand is to train a model using the 60,000 training images and subsequently test its classification accuracy on the 10,000 test images.
### Setup
First we need to define a few variables that will be needed later in the example.
```
from sagemaker import get_execution_role
from sagemaker.session import Session
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket here if you wish.
bucket = Session().default_bucket()
# Location to save your custom code in tar.gz format.
custom_code_upload_location = 's3://{}/customcode/mxnet'.format(bucket)
# Location where results of model training are saved.
model_artifacts_location = 's3://{}/artifacts'.format(bucket)
# IAM execution role that gives SageMaker access to resources in your AWS account.
# We can use the SageMaker Python SDK to get the role from our notebook environment.
role = get_execution_role()
```
### The training script
The ``mnist.py`` script provides all the code we need for training and hosting a SageMaker model. The script we will use is adaptated from Apache MXNet [MNIST tutorial](https://mxnet.incubator.apache.org/tutorials/python/mnist.html).
```
!cat mnist.py
```
In the training script, there are two additional functions, to be used with Neo Deep Learning Runtime:
* `neo_preprocess(payload, content_type)`: Function that takes in the payload and Content-Type of each incoming request and returns a NumPy array. Here, the payload is byte-encoded NumPy array, so the function simply decodes the bytes to obtain the NumPy array.
* `neo_postprocess(result)`: Function that takes the prediction results produced by Deep Learining Runtime and returns the response body
### SageMaker's MXNet estimator class
The SageMaker ```MXNet``` estimator allows us to run single machine or distributed training in SageMaker, using CPU or GPU-based instances.
When we create the estimator, we pass in the filename of our training script, the name of our IAM execution role, and the S3 locations we defined in the setup section. We also provide a few other parameters. ``train_instance_count`` and ``train_instance_type`` determine the number and type of SageMaker instances that will be used for the training job. The ``hyperparameters`` parameter is a ``dict`` of values that will be passed to your training script -- you can see how to access these values in the ``mnist.py`` script above.
For this example, we will choose one ``ml.m4.xlarge`` instance.
```
from sagemaker.mxnet import MXNet
mnist_estimator = MXNet(entry_point='mnist.py',
role=role,
output_path=model_artifacts_location,
code_location=custom_code_upload_location,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
framework_version='1.4.0',
distributions={'parameter_server': {'enabled': True}},
hyperparameters={'learning-rate': 0.1})
```
### Running the Training Job
After we've constructed our MXNet object, we can fit it using data stored in S3. Below we run SageMaker training on two input channels: **train** and **test**.
During training, SageMaker makes this data stored in S3 available in the local filesystem where the mnist script is running. The ```mnist.py``` script simply loads the train and test data from disk.
```
%%time
import boto3
region = boto3.Session().region_name
train_data_location = 's3://sagemaker-sample-data-{}/mxnet/mnist/train'.format(region)
test_data_location = 's3://sagemaker-sample-data-{}/mxnet/mnist/test'.format(region)
mnist_estimator.fit({'train': train_data_location, 'test': test_data_location})
```
### Opimtize your model with Neo API
Neo API allows to optimize our model for a specific hardware type. When calling `compile_model()` function, we specify the target instance family (C5) as well as the S3 bucket to which the compiled model would be stored.
**Important. If the following command result in a permission error, scroll up and locate the value of execution role returned by `get_execution_role()`. The role must have access to the S3 bucket specified in ``output_path``.**
```
output_path = '/'.join(mnist_estimator.output_path.split('/')[:-1])
compiled_model = mnist_estimator.compile_model(target_instance_family='ml_c5',
input_shape={'data':[1, 784]},
role=role,
output_path=output_path)
```
### Creating an inference Endpoint
We can deploy this compiled model, note that we need to use the same instance that the target we used for compilation. This creates a SageMaker endpoint that we can use to perform inference.
The arguments to the ``deploy`` function allow us to set the number and type of instances that will be used for the Endpoint. Make sure to choose an instance for which you have compiled your model, so in our case `ml_c5`. Neo API uses a special runtime (DLR runtime), in which our optimzed model will run.
```
predictor = compiled_model.deploy(initial_instance_count = 1, instance_type = 'ml.c5.4xlarge')
```
This endpoint will receive uncompressed NumPy arrays, whose Content-Type is given as `application/vnd+python.numpy+binary`:
```
import io
import numpy as np
def numpy_bytes_serializer(data):
f = io.BytesIO()
np.save(f, data)
f.seek(0)
return f.read()
predictor.content_type = 'application/vnd+python.numpy+binary'
predictor.serializer = numpy_bytes_serializer
```
### Making an inference request
Now that our Endpoint is deployed and we have a ``predictor`` object, we can use it to classify handwritten digits.
To see inference in action, draw a digit in the image box below. The pixel data from your drawing will be loaded into a ``data`` variable in this notebook.
*Note: after drawing the image, you'll need to move to the next notebook cell.*
```
from IPython.display import HTML
HTML(open("input.html").read())
```
Now we can use the ``predictor`` object to classify the handwritten digit:
```
data = np.array(data)
response = predictor.predict(data)
print('Raw prediction result:')
print(response)
labeled_predictions = list(zip(range(10), response))
print('Labeled predictions: ')
print(labeled_predictions)
labeled_predictions.sort(key=lambda label_and_prob: 1.0 - label_and_prob[1])
print('Most likely answer: {}'.format(labeled_predictions[0]))
```
# (Optional) Delete the Endpoint
After you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it.
```
print("Endpoint name: " + predictor.endpoint)
import sagemaker
predictor.delete_endpoint()
```
| github_jupyter |
# Goal: Create clusters of 5 grams with different center
1. Investigate format of the .gz file
2. Filter the bundles by each word
2. Create first letter verification
Right now only focus on the 5 word bundles, excluding START and END
```
import os
import gzip
import json
import re
from tqdm import tqdm
#from nltk import WordNetLemmatizer
#lemmatizer = WordNetLemmatizer()
#import sys
#!{sys.executable} -m pip install Unidecode
from unidecode import unidecode
from collections import OrderedDict
```
### [NLTK POS Lemmatizer](https://www.nltk.org/_modules/nltk/stem/wordnet.html)
The Part Of Speech tag. Valid options are `"n"` for nouns,
`"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"`
for [satellite adjectives](https://stackoverflow.com/questions/18817396/what-part-of-speech-does-s-stand-for-in-wordnet-synsets).
Syntax:
`lemmatizer.lemmatize(word)`
### [Google Tags](https://books.google.com/ngrams/info)
These tags can either stand alone (\_PRON\_) or can be appended to a word (she_PRON)
- _NOUN_
- _VERB_
- _ADJ_ adjective
- _ADV_ adverb
- _PRON_ pronoun
- _DET_ determiner or article
- _ADP_ an adposition: either a preposition or a postposition
- _NUM_ numeral
- _CONJ_ conjunction
- _PRT_ particle
```
import string
PUNCTUATION = set(char for char in string.punctuation).union({'“','”'})
DIGITS = set(string.digits)
VOWELS = set("aeiouyAEIOUY")
#Excluding '_' (underscore) from DASHES precludes the tagged 1grams "_NOUN", add it to also include the tagged 1grams
DASHES = {'—','–','—','―','‒','-'}
PUNCTUATION.difference_update(DASHES)
STOPS = PUNCTUATION.union(DIGITS)
#GOOGLE_TAGS = {'_NOUN','_VERB','_ADJ','_ADV','_PRON','_DET','_ADP','_NUM','_CONJ','_PRT'}
```
[How to open Gzip files](https://stackoverflow.com/questions/31028815/how-to-unzip-gz-file-using-python)
```
def open_gzip(directory,file_path):
with gzip.open(directory+file_path,'r') as f_in:
rows = [x.decode('utf8').strip() for x in f_in.readlines()]
return rows
def csv2tuple(string):
return int(string.split(',')[1])
#Original
#year,match_count,volume_count = tuple(string.split(','))
#return int(year),int(match_count),int(volume_count)
def save_json(ngram_dict,directory,file_path):
output = file_path[:-3]+'_CLUSTERED.json'
if len(ngram_dict)>0:
with open(directory+output, 'w') as f_out:
json.dump(ngram_dict, f_out)
print('SAVED: ',output,len(ngram_dict))
else:
print('unigram dict empty',output)
def pentagram_tests(pentagram_l):
for gram in pentagram_l:
#Checks each character in the gram against the characters in the STOP set. (character level filtering) - no punctuation or digits allowed
if set(gram).intersection(STOPS):
return False
#Excluded all of the form _PRON_
if gram[0] == '_' and gram[-1] == '_':
return False
#must have a vowel (presupposes that it must also have a letter of the alphabet inside)
if not set(gram).intersection(VOWELS):
return False #Rewrite the alphabet one, I think this is better
#Words cannot start or end with dashes
if gram[0] in DASHES or gram[-1] in DASHES:
return False
#must have 0 non-english letters
test = unidecode(gram, errors='replace')
if test != gram:
return False
#Can implement more tests here if you need to do more filtering
return True
def clear_dict(dictionary):
for k, v in list(dictionary.items()):
#set the minumim number of bundles here
if v['bundles']<2:
del dictionary[k]
def preprocess_5grams(directory,file_path):
rows = open_gzip(directory,file_path)
pentagram_dict = dict()
'''
This implementation uses
pentagram_dict = {'cluster':{'bundles': # of bundles,
'total_uses':total uses of all of the bundles,
'words':{word1:usage1,
word2:usage2,
...}
...},
...}
where 'cluster' is a string of form: word 1 2 '_' 4 5
'''
#FIRST ROW, FIRST COLUMN, FIRST WORD, LOWERED
#prev_first_word = rows[0].split('\t')[0].split()[0].lower()
for row in tqdm(rows):
columns = row.split('\t')
pentagram = columns[0]
#pentagram_l is the list version of
pentagram_l = pentagram.lower().split()
#If it passes the word tests continue parsing the pentagram
if pentagram_tests(pentagram_l):
word = pentagram_l[2]
cluster = pentagram_l[0]+' '+pentagram_l[1]+' _ '+pentagram_l[3]+' '+pentagram_l[4]
total_count = 0
for entry in columns[1:]:
match_count = csv2tuple(str(entry))
total_count+=match_count
if cluster in pentagram_dict.keys():
pentagram_dict[cluster]['bundles']+=1
pentagram_dict[cluster]['total_uses']+=total_count
pentagram_dict[cluster]['words'][word]=total_count
else:
pentagram_dict[cluster] = {'total_uses':total_count,
'bundles':1,
'words':{word:total_count},
}
prev_first_word = word
#Filter out the insignificant ones
clear_dict(pentagram_dict)
#Save as JSON
save_json(pentagram_dict,directory,file_path)
%%time
preprocess_5grams('./5grams/','5-19384-of-19423.gz')
def open_json(directory,file_path):
with open(directory+file_path,'r') as f:
dictionary = json.load(f)
f.close()
return dictionary
pentagrams = open_json('./5grams/','5-19384-of-19423_CLUSTERED.json')
def top_counts(dictionary,count_type,num_hits=10,head = True):
return OrderedDict(sorted(dictionary.items(), key=lambda x: x[1][count_type], reverse=head)[:num_hits])
top = top_counts(pentagrams,'total_uses')
top
```
This section should be for streaming the \*\.gz files from google ngrams server
```
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
url = 'http://storage.googleapis.com/books/ngrams/books/20200217/eng/eng-5-ngrams_exports.html'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('li')
url_pattern = re.compile('(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-&?=%.]+')
urls = [url_pattern.findall(str(link))[0] for link in links]
urls[:10]
from io import BytesIO, TextIOWrapper,StringIO
for url in urls[:10]:
gzipped_file = requests.get(url)
print(gzipped_file,type(gzipped_file),gzipped_file.raw,type(gzipped_file.raw))
break
print(gzipped_file.raw.read(1000),type(gzipped_file.raw.read(),))
response = requests.post(urls[0])
buffer_data = BytesIO(response.content)
# Lets decompress
f = gzip.GzipFile(fileobj=buffer_data)
for row in f.readlines():
print(row,type(row))
break
i=1
for row in gzipped_file.readlines():
# filter out keep-alive new lines
if i==1:
print(row,type(row))
with gzip.open(BytesIO(row)) as f:
#rows = [x.decode('utf8').strip() for x in f_in.readlines()]
for line in f:
string = line.decode('utf8').strip()
print(string)
break
i+=1
for url in urls[:10]:
r = requests.get(url, stream=True)
for chunk in r.raw.stream(1024):
#chunk.decode('utf8').strip()
#BytesIO(chunk).decode('utf8').strip()
wrapper = TextIOWrapper(BytesIO(chunk), encoding='utf-8')
print(wrapper.read())
#print(chunk,type(chunk))
break
break
for url in urls[:10]:
r = requests.get(url, stream=True)
for row in r.iter_lines():
# filter out keep-alive new lines
print(row,type(row))
with gzip.open(BytesIO(row)) as f:
#rows = [x.decode('utf8').strip() for x in f_in.readlines()]
for line in f:
string = line.decode('utf8').strip()
print(string)
break
break
break
break
for url in urls[:10]:
r = requests.get(url)
for row in r.iter_lines():
# filter out keep-alive new lines
print(line,type(line))
with gzip.open(BytesIO(line)) as f:
#rows = [x.decode('utf8').strip() for x in f_in.readlines()]
for line in f:
string = line.decode('utf8').strip()
print(string)
break
break
for url in urls[:10]:
r = requests.get(url, stream=True)
for chunk in r.iter_content(chunk_size=336):
with gzip.open(BytesIO(chunk)) as f:
#rows = [x.decode('utf8').strip() for x in f_in.readlines()]
for line in f:
string = line.decode('utf8').strip()
print(string)
#rows = [x.decode('utf8').strip() for x in f_in.readlines()]
#break
#for chunk in r.raw.stream(decode_content=False):
#print(chunk,type(chunk))
break
'''
with open(local_filename, 'wb') as f:
for chunk in r.raw.stream(1024, decode_content=False):
if chunk:
f.write(chunk)
'''
with gzip.open(directory+file_path,'r') as f_in:
rows = [x.decode('utf8').strip() for x in f_in.readlines()]
directory = './5grams/'
files = os.listdir(directory)
for file_path in files:
if '.gz' in file_path:
rows = open_gzip(directory,file_path)
#preprocess_ngrams(directory,file_path)
break
rows[:10]
```
| github_jupyter |
```
import ROOT
ROOT.RooMsgService.instance().setGlobalKillBelow(5)
meas = ROOT.RooStats.HistFactory.Measurement( "meas", "meas" )
meas.SetPOI( "SignalStrength" )
meas.SetLumi( 1.0 )
meas.SetLumiRelErr( 0.02 )
meas.AddConstantParam( "Lumi" )
```
## Make some example data
expected and observed data, one bin, 10% more events observed than expected so SignalStrength should be 1.1
```
data_hist = ROOT.TH1D("observed","observed",1,0,1)
for i in range(1100):
data_hist.Fill(0.5)
signal_hist = ROOT.TH1D("above_expected","above_expected",1,0,1)
for i in range(100):
signal_hist.Fill(0.5)
model_hist = ROOT.TH1D("expected","expected",1,0,1)
for i in range(1000):
model_hist.Fill(0.5)
```
## Create a measurement and fill it.
```
chan = ROOT.RooStats.HistFactory.Channel( "Region1" )
chan.SetStatErrorConfig(0.05, "Poisson")
chan.SetData( data_hist )
model = ROOT.RooStats.HistFactory.Sample( "model" )
model.SetNormalizeByTheory( False )
model.SetHisto( model_hist )
signal = ROOT.RooStats.HistFactory.Sample( "signal" )
signal.SetNormalizeByTheory( False )
signal.SetHisto( signal_hist )
```
And add our parameter of interest with a sensible bound.
```
signal.AddNormFactor( "SignalStrength", 1, 0, 3)
```
and one nuisance parameter
```
uncertainty_up = 1000 * 1.1
uncertainty_down = 1000 * 0.9
signal.AddOverallSys( "signal_norm_uncertainty", uncertainty_down*.1, uncertainty_up*.1 )
model.AddOverallSys( "background_norm_uncertainty", uncertainty_down,uncertainty_up )
sig_np_up = signal_hist.Clone()
sig_np_down = signal_hist.Clone()
bkg_np_up = model_hist.Clone()
bkg_np_down = model_hist.Clone()
for b in range(1,sig_np_up.GetNbinsX()+1):
sig_np_up.SetBinContent(b, sig_np_up.GetBinContent(b) + sig_np_up.GetBinContent(b) * .1 * b)
sig_np_down.SetBinContent(b, sig_np_down.GetBinContent(b) - sig_np_down.GetBinContent(b) * 0.1 * b)
bkg_np_up.SetBinContent(b, bkg_np_up.GetBinContent(b) + bkg_np_up.GetBinContent(b) * .1 * b)
bkg_np_down.SetBinContent(b, bkg_np_down.GetBinContent(b) - bkg_np_down.GetBinContent(b) * 0.1 * b)
signal_shape = ROOT.RooStats.HistFactory.HistoSys("signal_shape")
signal_shape.SetHistoHigh( sig_np_up )
signal_shape.SetHistoLow( sig_np_down )
signal.AddHistoSys( signal_shape )
background_shape = ROOT.RooStats.HistFactory.HistoSys("background_shape")
background_shape.SetHistoHigh( bkg_np_up )
background_shape.SetHistoLow( bkg_np_down )
model.AddHistoSys( background_shape )
```
And add to measuremnet
```
chan.AddSample( model )
chan.AddSample( signal )
meas.AddChannel( chan )
```
## Make workspace!
```
hist2workspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast(meas)
workspace = hist2workspace.MakeSingleChannelModel( meas, chan )
```
ok this was put into a function...
```
from Builder import get_workspace
workspace = get_workspace(nchannels = 1, events = 1000, nbins = 1)
```
ok that seemed to work
```
workspace.SetName('BinnedWorkspace')
workspace.writeToFile("output/workspace{}channels{}events{}bins{}nps.root".format(1,1000,1,0))
```
```
events = 1000
chans = 1
nps = 0
for bins in [1,10,20,30,40,50,60,70,80,90,100]:
workspace = get_workspace(nchannels = chans, events = events, nbins = bins, nnps = nps)
workspace.SetName('BinnedWorkspace')
workspace.writeToFile("output/workspace{}channels{}events{}bins{}nps.root".format(chans, events, bins, nps))
events = 1000
chans = 1
nps = 0
bins = 1
for events in [10,100,1000,10000,100000,1000000,10000000]:
workspace = get_workspace(nchannels = chans, events = events, nbins = bins, nnps = nps)
workspace.SetName('BinnedWorkspace')
workspace.writeToFile("output/workspace{}channels{}events{}bins{}nps.root".format(chans, events, bins, nps))
events = 1000
chans = 1
nps = 0
bins = 1
for nps in range(10):
workspace = get_workspace(nchannels = chans, events = events, nbins = bins, nnps = nps)
workspace.SetName('BinnedWorkspace')
workspace.writeToFile("output/workspace{}channels{}events{}bins{}nps.root".format(chans, events, bins, nps))
```
| github_jupyter |
```
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
df = pd.read_csv("glassdoor_jobs.csv")
df.head()
# Each row corresponds to a job (data scientist job)
```
### Things to do
* salary parsing
* company name text only
* state field
* age of company
* parsing of job description (python, etc.)
```
df.shape
# Number of missing values in the dataframe
df.isnull().sum()
df.dtypes
```
## Salary Parsing
```
(df['Salary Estimate'] == "-1").sum()
# remove the rows containing "-1"
df = df[df['Salary Estimate'] != "-1"]
df.shape
# use [regex or lamda functions] to remove symbols and text in Salary Estimate
# lamda is like a normal function is one line of code
salary = df['Salary Estimate'].apply(lambda x: x.split('(')[0])
# split till '(', and then take the 1st element of the list
salary
# remove the K's and $ sign
minus_kd = salary.apply(lambda x: x.replace('K','').replace('$',''))
minus_kd
# some salary estimates are *perhour and *employer provided. Lets make them into new columns
df['hourly'] = df['Salary Estimate'].apply(lambda x: 1 if 'per hour' in x.lower() else 0) # ternary statement - if else in one line
df['employer provided'] = df['Salary Estimate'].apply(lambda x: 1 if 'employer provided salary' in x.lower() else 0)
df.head()
# Remove 'perhour' and 'employer provided' from Salary Estimates since we have columns for them now
min_hr = minus_kd.apply(lambda x: x.lower().replace('per hour','').replace('employer provided salary:',''))
if 'per hour' in min_hr and 'employer provided salary:' in min_hr:
print(True)
else:
print(False)
# Split salary -> minSalary-maxSalary into separate columns
# split --> [0]-[1]
df['min_salary'] = min_hr.apply(lambda x: int(x.split('-')[0]))
df['max_salary'] = min_hr.apply(lambda x: int(x.split('-')[1]))
df['avg_salary'] = (df.min_salary + df.max_salary)/2
df.head()
```
## Company name text only
```
# get company name based on ratings as well (since some ratings have -1), remove ratings from company name - 3 char long
# axis=1 --> each columns as a bulk
# [:-4] --> take all data other than last 4, which are \n + ratings
df['company_txt'] = df.apply(lambda x : x['Company Name'] if x['Rating']<0 else x['Company Name'][:-4], axis=1 )
```
## State Field
```
# Take the shortform of the states
df['job_state'] = df['Location'].apply(lambda x: x.split(',')[1])
# find how many jobs are there in each state --> using value_counts()
df.job_state.value_counts()
# Find out if the actual job is at the company's headquarter location
df['same_state'] = df.apply(lambda x : 1 if x.Location == x.Headquarters else 0, axis=1)
```
## Age of company
```
# subtract age of company from current year
df['age'] = df.Founded.apply(lambda x : x if x < 1 else 2021 - x)
df.head()
```
## Parsing of job description (python, excel, etc.)
```
# find how many jobs require python
df['python_yn'] = df['Job Description'].apply(lambda x: 1 if 'python' in x.lower() else 0)
df.python_yn.value_counts()
# r studio
df['r_yn'] = df['Job Description'].apply(lambda x: 1 if 'r studio' in x.lower() else 0)
df.r_yn.value_counts()
# spark
df['spark'] = df['Job Description'].apply(lambda x: 1 if 'r studio' in x.lower() else 0)
df.python_yn.value_counts()
# aws
df['aws'] = df['Job Description'].apply(lambda x: 1 if 'aws' in x.lower() else 0)
df.aws.value_counts()
# excel
df['excel'] = df['Job Description'].apply(lambda x: 1 if 'excel' in x.lower() else 0)
df.excel.value_counts()
df.columns
df_final = df.drop(['Unnamed: 0'], axis=1)
df_final.head()
```
## Other things to do
* Job title and seniority
* Fix state Los Angeles
* Job description length
* Competitor count
* hourly wage to annual
## Job title and seniority
```
def title_simplifier(title):
if 'data scientist' in title.lower():
return 'data scientist'
elif 'data engineer' in title.lower():
return 'data engineer'
elif 'analyst' in title.lower():
return 'analyst'
elif 'machine learning' in title.lower():
return 'mle'
elif 'manager' in title.lower():
return 'manager'
elif 'director' in title.lower():
return 'director'
else:
return 'na'
def seniority(title):
if 'sr' in title.lower() or 'senior' in title.lower() or 'sr.' in title.lower() or 'lead' in title.lower() or 'principal' in title.lower():
return 'senior'
elif 'jr' in title.lower() or 'jr.' in title.lower():
return 'jr'
else:
return 'na'
df['job_simp'] = df['Job Title'].apply(title_simplifier) # apply a custom function to values of a column|
df['job_simp'].value_counts()
df['seniority'] = df['Job Title'].apply(seniority)
df['seniority'].value_counts()
```
## Fix state Los Angeles
```
df['job_state']= df.job_state.apply(lambda x: x.strip() if x.strip().lower() != 'los angeles' else 'CA')
df.job_state.value_counts()
```
## Job description length
```
df['desc_len'] = df['Job Description'].apply(lambda x: len(x)) # loop through each row in column, and find len of each row
df['desc_len']
```
## Competitor count
```
# len of array after split
df['num_comp'] = df['Competitors'].apply(lambda x : len(x.split(',')) if x!=-1 else 0)
df['num_comp']
```
## hourly wage to annual
```
# some jobs pay hourly, conver them to annual
df[['hourly','min_salary','max_salary']] # group 3 columns and display as a dataframe
df['min_salary'] = df.apply(lambda x : x.min_salary*2 if x.hourly==1 else x.min_salary,axis=1)
df['max_salary'] = df.apply(lambda x : x.max_salary*2 if x.hourly==1 else x.max_salary,axis=1)
df[df.hourly == 1][['hourly','min_salary','max_salary']]
df = df.drop(['Unnamed: 0'], axis = 1)
# convert dataframe to a webframe
df.to_html("data_cleaned.html")
df.to_csv("salary_data_cleaned.csv", index=False)
# index=False --> avoid getting unwanited index column
```
| github_jupyter |
<a href="https://colab.research.google.com/github/lineality/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module1-join-and-reshape-data/GGA_Asmnt_v9_LS_DS_121_Join_and_Reshape_Data_Assignment_Geoffrey_Gordon_Ashbrook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
###Assignment Submission for Geoffrey Gordon Ashbrook 2019.09 DS8
_Lambda School Data Science_
# Join and Reshape datasets
Objectives
- concatenate data with pandas
- merge data with pandas
- understand tidy data formatting
- melt and pivot data with pandas
Links
- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
- Combine Data Sets: Standard Joins
- Tidy Data
- Reshaping Data
- Python Data Science Handbook
- [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append
- [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join
- [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
- [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
Reference
- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# New Section
```
#libraries#library Imports
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import seaborn as sns
!ls
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
#where am I?
import os
os.getcwd()
```
```
#change directoreis to instacart
#%cd instacart_2017_05_01/
#check
!ls
```
# Assignment
## Join Data Practice
These are the top 10 most frequently ordered products. How many times was each ordered?
1. Banana
2. Bag of Organic Bananas
3. Organic Strawberries
4. Organic Baby Spinach
5. Organic Hass Avocado
6. Organic Avocado
7. Large Lemon
8. Strawberries
9. Limes
10. Organic Whole Milk
First, write down which columns you need and which dataframes have them.
Next, merge these into a single dataframe.
Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.
Literally, this is asking how many orders were placed that contained that item:
##df needed:
order prior
order train
products
##df not needed:
aisles
departments
orders
##Plan:
The question is, for each item, how many orders were made that contained that item.
We need to find where the items are listed (in: products), where relevant information about those items are listed (again products: order_ID is relevant), where the orders are listed (OPPrior, OPTrain).
Which are the relevant and irrelvant data frames?
Relevant:
products (name and id number)
OPP, OPT (id number number or orders)
Merge: two order lists (oPP, opt)
filter out other orders
Use:
subset_products = products.loc[product_id, product_name]
subset.head()
merged_orders = pd.merge(order_products__train, order_products__prior, how='inner', on='order_id')
subset_merged_orders = merged_orders.loc[product_id, order_id]
subset.head()
merged
The plan is to merge the two df's we need into one.
Then slim down the df to only the rows we need.
Q: counting lines?
```
aisles = pd.read_csv('aisles.csv')
print(aisles.shape)
aisles.head()
departments = pd.read_csv('departments.csv')
print(departments.shape)
departments.head()
order_products__prior = pd.read_csv('order_products__prior.csv')
print(order_products__prior.shape)
order_products__prior.head()
order_products__train = pd.read_csv('order_products__train.csv')
print(order_products__train.shape)
order_products__train.head()
orders = pd.read_csv('orders.csv')
print(orders.shape)
orders.head()
products = pd.read_csv('products.csv')
print(products.shape)
products.head()
products.head(2)
#did not work...trying alternatibve
#subset_products = products.loc[product_id, product_name]
#subset_products.head()
```
#Step 1: removed un-needed columns
using .drop
```
#Use .drop to remove extra columns
#resource: documentation for library
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html
subset_products = products.drop(columns=['aisle_id', 'department_id'])
#checking
#this worked
subset_products.head(5)
#remove extranous columns from order-prior-train Data frames
subset_order_products__prior = order_products__prior.drop(columns=['add_to_cart_order', 'reordered'])
#checking
subset_order_products__prior.head(2)
#remove extranous columns
subset_order_products__train = order_products__train.drop(columns=['add_to_cart_order', 'reordered'])
#checking
subset_order_products__train.head(2)
```
## Step 2: concatonate the two order_prior_train files
making one file of order information
```
# This combines the two files of orders into one complete list:
all_orders = pd.concat([subset_order_products__train, subset_order_products__prior])
all_orders.head()
all_orders.shape
```
##Step 3: Merge the product name and product order files, matching them along the lines of product ID
```
#orders_products = pd.merge(subset_products, all_orders, how='inner', on='order_id')
# Using .merge to create a combined file
# resource = https://chrisalbon.com/python/data_wrangling/pandas_join_merge_dataframe/
orders_products = pd.merge(subset_products, all_orders, on='product_id')
orders_products.shape
orders_products.head(5)
```
## Step 4: filter out just the top ten shopping items:
These are the top 10 most frequently ordered products. How many times was each ordered?
1. Banana
2. Bag of Organic Bananas
3. Organic Strawberries
4. Organic Baby Spinach
5. Organic Hass Avocado
6. Organic Avocado
7. Large Lemon
8. Strawberries
9. Limes
10. Organic Whole Milk
First, write down which columns you need and which dataframes have them.
Next, merge these into a single dataframe.
Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.
```
orders_products['product_name'].value_counts().head(10)
top_ten = orders_products['product_name'].value_counts().head(10)
```
#Answer:
## Here are the Top 10 products and how many orders for each
```
top_ten
```
## Reshape Data Section
- Replicate the lesson code
- Complete the code cells we skipped near the beginning of the notebook
- Table 2 --> Tidy
- Tidy --> Table 2
- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
```
flights = sns.load_dataset('flights')
flights.head()
flights.shape
```
##Let's pivot the table so that there is a Tidy format with the index being the year:
```
# Ref: documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot_table.html
year_flights = flights.pivot_table(index='year', columns='month', values='passengers')
year_flights
```
## Join Data Stretch Challenge
The [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of "**Popular products** purchased earliest in the day (green) and latest in the day (red)."
The post says,
> "We can also see the time of day that users purchase specific products.
> Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.
> **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**"
Your challenge is to reproduce the list of the top 25 latest ordered popular products.
We'll define "popular products" as products with more than 2,900 orders.
```
##### YOUR CODE HERE #####
```
## Reshape Data Stretch Challenge
_Try whatever sounds most interesting to you!_
- Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
- Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"
- Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)
- Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
```
##### YOUR CODE HERE #####
```
| github_jupyter |
<img src="https://static.spacecrafted.com/cb0d364909d24dda9229292f57656d38/i/e36dc24e36ab42d88109b8366d596ff1/1/4SoifmQp45JMgBnHp7ed2/NYCDSA.png" width=450 align='center'>
# Readability Counts: Best Practices in Python Coding
# Part I
## Introduction
Thank you for joining this **NYC Data Science Academy** live learning session! Today we will be discussing different ways you can format and structure your Python code so that it aligns with common **best coding practices**. Using these best practices is very important because your code will be more interpretable by others (and yourself). Colleagues will consider you a more suitable team member which will aid in collaboration within a company and on team projects. The importance of this cannot be overstated. Follow the best practices we discuss today so that you are a more attractive candidate or colleague in the industry!
### Topics we will cover today:
- <a href="#interpretable">Why Write Interpretable Code?</a>
- <a href="#naming">Naming Conventions</a>
- <a href="#documentation">Documentation</a>
- <a href="#beautiful">Beautiful is Better than Ugly</a>
- <a href="#explicit">Explicit is Better than Implicit</a>
- <a href="#simple">Simple is Better than Complex</a>
- <a href="#flat">Flat is Better than Nested</a>
- <a href="#sparse">Sparse is Better than Dense</a>
- <a href="#errors">Errors Should Never Pass Silently</a>
- <a href="#ambiguity">In the Face of Ambiguity, Refuse the Temptation to Guess</a>
- <a href="#hardtoexplain">If the Implementation is Hard to Explain, It's a Bad Idea</a>
- <a href="#now">Now is Better than Never
<a name='interpretable'></a>
## Why Write Interpretable Code?
One of the key strengths of a programming language such as Python is how it can be used in a **collaborative** way. This is most obviously evident when we import modules and packages to use in our own work. Even when we write code that we don't intend to be used by others, we don't want to write it just for our "current" selves. We want to make sure that we will be able to easily pick up where we left off when we use the code in the future. Uniformly adhering to **good coding practices** will also help your syntax stay consitent no matter what or who your code is intended for.
Guido van Rossum, the creator of Python, once said that "**code is read much more often than it is written**." It is important to keep this in mind when we design our code because the success of your work may rely on how easily someone else can read and use it, not on how cleverly or efficiently it accomplishes the intended purpose.
To help Python coders maintain a unviersal standard of coding convention, van Rossum and a few other Python experts developed a series of documents called the **Python Enhancement Proposals**, or **PEPs**, which cover a range of different topics including Python development, backwards compatibility, style, and debugging. One of the most well-known PEPs is the **PEP8 Style Guide for Python Code**.
Another Python expert, Tim Peters, summarizes many of the key takeaways from the PEPs in a sort of Python poem. This poem is a PEP itself (PEP20) and is titled **"The Zen of Python."** We can access PEP20 using the following command:
```
import this
```
In this session, we will look more closely at a few of these aphorisms and consider some examples for each to show how to write code that embodies the essence of "The Zen of Python."
<a name='naming'></a>
## Naming Conventions
Although naming convention isn't specifically addressed in Peters' collection of aphorisms, this is an important topic that is under the umbrella of a number of the key ideas above. There are a myriad of ways to name objects in Python, but **some ways are definitely better than others**. Some common paradigms are:
- `x`: single lowercase
- `X`: single uppercase
- `lowercase`
- `lower_case_with_underscores`
- `UPPERCASE`
- `UPPER_CASE_WITH_UNDERSCORES`
- `CapitalizedWords`: also known as CamelCase
- `mixedCase`: first word lower case, following words upper case
In addition to the options above, there are other naming conventions in Python that have special meanings:
- `_single_leading_underscore`: weak "internal use" indicator, e.g. `from module import *` does not import objects whose names start with an underscore.
- `single_trailing_underscore_`: used by convention to avoid conflicts with Python keywords.
- `__double_leading_underscore`: when naming a class attribute, invokes name mangling (inside class `FooBar`, `__boo` becomes `_FooBar__boo`).
- `__double_leading_and_trailing_underscore__`: "magic" objects or attributes that live in user-controlled namespaces, e.g. `__init__`, `__add__`, or `__str__`. These are also called "special name methods." Never invent such names; only use them as documented.
Now that we know these different paradigms, let's talk about when to use them.
### Names to Avoid
Never use the characters `l` (lowercase letter el), `O` (uppercase letter oh), or `I` (uppercase letter eye) as single character variable names. Depending on the font you use, these characters can be **indistinguishable** from the numbers one and zero. If you must use `l`, use `L` instead.
You should **avoid using Python keywords and built-in function/class names as variables names**. Names such as `max`, `sum`, `class`, `list`, and others should be avoided. When these names must be used, end the name with a **single underscore** to differentiate it, such as `list_`.
### Modules and Packages
Modules should have short, all-lowercase names. Underscores can be used in the module name if it improves readability. Python packages should follow the same convention, although use of underscores in packages names is discouraged.
### Variables and Functions
Variable and function names should be lowercase. Individual words can be separated by underscores when needed to improve readability. Method names should follow the same convention as function names.
Constants should be represented by all captial letters, separated by underscores when needed to improve readability.
Additionally, use names that are representative of the meaning of the object rather than meaningless, single-character names. In general, the names `i`, `j`, and `k` should be **reserved for representing index values**.
```
# Bad
# 1
l = 16 # Not descriptive and poor character choice
list_of_values_to_hold = ['a', 'b', 'c'] # Too long
list = [1,2,3] # Overwrites the built-in list function
Variable = 5 # Incorrect use of uppercase (see Classes, Object/Class Methods, and Arguments)
# 2
def func(x): # Not descriptive
return x*2
# 3
z = [1,2,3,4,5] # Not descriptive
for x, y in enumerate(z): # Poor choice of dummy variable names
print(x, y)
# Good
# 1
tax = .125
price = 49.99
total_price = price * (price*tax)
PI = 3.14159265359
SPEED_OF_LIGHT = 3e8
# 2
def price_after_tax(price, tax):
"""
Returns the price of the item
after tax has been added.
"""
return price + (price*tax)
# 3
ex_list = [1,2,3,4,5]
for i, val in enumerate(ex_list):
print(i, val)
```
### Classes, Object/Class Methods, and Arguments
Class names should use the CapWords or CamelCase convention. There are exceptions to this, but for the most part you should follow this convention.
Object methods should always use `self` as the first argument. If another object will be called in the method, that object should be referred to as `other`.
Class methods should always use `cls` as the first argument.
```
# Bad
class pythonVector(object): # Wrong naming convention
def __init__(obj, coords): # Wrong object naming convention
obj.coords = coords
def __eq__(obj1, obj2): # Wrong other object naming convention
return obj1.coords == obj2.coords
def __notmagic__(obj): # Don't invent special name methods
pass
@classmethod
def from_string(some_arg, string_coords): # Wrong cls naming convention
try:
as_float_list = [float(val) for val in string_coords.split()]
except:
pass # Don't pass exceptions silently! More on this later.
return some_arg(as_float_list)
class some_weird_class(object): # Wrong naming convention
pass
# Good
class Vector(object):
def __init__(self, coords):
self.coords = coords
def __eq__(self, other):
"""
Checks if two Vector objects are equivalent.
Called when using the == operator.
Args:
self: One Vector object -> Vector
other: Another Vector object -> Vector
Return:
True or False based on the equivalency of the Vectors -> bool
"""
return self.coords == other.coords
@classmethod
def from_string(cls, string_coords):
"""
Creates an object of the Vector class using a string
of coordinates, separated by spaces.
Args:
cls: The class constructor -> class
string_coords: A string of coordinates, separated by spaces -> string
"""
try: # Attempt to parse coordinates from string
as_float_list = [float(val) for val in string_coords.split()]
except:
raise ValueError('Could not parse coordinates from given string!')
# If coordinate parsing passes, return new object instance
return cls(as_float_list)
```
<a name='documentation'></a>
## Documentation
**Clear and complete** documentation is essential when you are writing code that will be read and used by other programmers. You can think of code documentation as a **mini instruction manual**. Make sure to describe the function of your code, what it expects as input, and what the user should expect as output.
**Code commenting** is another great habit when you want to explain your thought process or what a certain piece of code is doing. However, be careful not to overcomment your code, as the syntax can quickly become cluttered and have the opposite effect.
### Docstring
Docstring is a long string used to describe what a function, method, or class does. Note that the docstring should not describe _how_ a function or class works, but rather **what it does and how to use it**. The long string should be the first line beneath the class, method, or function definition.
The example below shows docstring for both a class and the methods within the class. Docstring is written for a function in the same way as it is written for a method.
```
# Good
class Dog(object):
"""
A class representing a pet dog. The Dog object has
the following attributes:
name: Name of the pet dog -> str
breed: Breed of the pet dog -> str
age: Age of the pet dog -> int
The Dog class has the following methods:
__init__: Initializes a new Dog object
__str__: Returns the string representation of the Dog object
walk_the_dog: Prints that the dog has been walked
dog_years: Returns the dog's age in dog years
"""
def __init__(self, name, breed, age):
"""
Initializes a Dog object
Args:
name: Name of the pet dog -> str
age: Age of the pet dog -> int
breed: Breed of the pet dog -> str
Return:
Object instance of type Dog -> Dog
"""
self.name = name
self.age = age
self.breed = breed
def __str__(self):
"""
Returns the string representation of the Dog object.
Args:
self: The Dog object the method is called on -> Dog
Return:
String representation of the Dog object -> str
"""
return f'{self.name} the {self.breed}, age {self.age}'
def walk_the_dog(self):
"""
Prints that the dog has been walked (intentionally simple).
Args:
self: The Dog object the method is called on -> Dog
Return:
None
"""
print(f'You walked {self}!')
def dog_years(self):
"""
Returns the Dog object's age in dog years
Args:
self: The Dog object the method is called on -> Dog
Return:
Age in dog years -> int
"""
return self.age*7
fido = Dog('Fido', 'Husky', 6)
print(fido)
fido.walk_the_dog()
fido.dog_years()
```
**Note**: We added docstring for the special name method \_\_str\_\_. This may be unnecessary if the functionality of the method is not different from what one would expect in any other class.
### Commenting
Use comments to **highlight portions of your code** that may be unclear to other programmers. The function below also includes docstring.
```
# Good
def factorial(num):
"""
Returns the factorial of an integer.
Args:
num: A whole number -> int
Return:
The factorial of num -> int
"""
# Base case
if num == 1:
return num
# Recursive call of the function, until the base case is reached
return factorial(num-1)*num
```
**Note**: We can access the docstring of a function in Jupyter Notebook by writing the function name followed by a `?`. This can be very useful when trying to understand how a function should be used, which is the point of writing docstring in the first place!
```
sum?
```
<a name='beautiful'></a>
## Beautiful is Better than Ugly
Coding beautifully is one of the best ways you can make your code **more interpretable**. What does beautiful code look like? The syntax is **separated into logical sections**. There aren't **too many characters** on a single line (PEP8 suggests max 79 characters per line). **Indentation** is correct (this is actually necessary for Python). There is **correct and consistent spacing** between arguments in lists, dictionary, and functions and between operators and operands. All of this can help the reader **parse your syntax** in a logical way which will help them to understand the function of the code and how to use it.
There are a few styles you can use to make your code more beautiful. As long as the style you choose is in the realm of acceptable convention, you can use whatever works best for you. However, it's very important to **be consistent with whatever style you use**. Don't swap between different styles throughout your code. **Always be consistent!**
### Spacing
It is important that you are cognizant of **how your code looks on the screen**. Code that has very few spaces and no lines spearating code chunks can be very **hard to read** and, therefore, hard to follow. It is good practice to add empty lines between statements or chunks of code. Don't overdo this, but it can be helpful to **separate thoughts** or steps in your code.
```
# Bad
# Incorrect or inconsistent spacing
first_sentence= "This is a sentence with some words to count.\n"
second_sentence ="This is the second sentence, and it has a comma."
full_sentence=first_sentence+ second_sentence
def count_words (sentence): # Don't put spaces between function names and arguments!
punctuation_list=['.',',']
for punct in punctuation_list:
sentence=sentence.replace(punct,'')
word_dict={}
for word in sentence.split():
word_dict[word]=word_dict.get(word,0)+1
return word_dict
count_words(full_sentence)
# Good
# Use consistent and correct spacing
first_sentence = "This is a sentence with some words to count.\n"
second_sentence = "This is the second sentence, and it has a comma."
# Add lines between code chunks to separate ideas into logical segments
full_sentence = first_sentence + second_sentence
def count_words(sentence):
"""
Function that returns a dictionary containing the counts
of different words in a string. Removes punctuation.
Args:
sentence: A string -> str
Return:
A dictionary of word counts -> dict
"""
# Remove punctuation, just a few examples for simplicity
punctuation_list = ['.', ',']
for punct in punctuation_list:
sentence = sentence.replace(punct, '')
# Create an empty dictionary and store each word as a
# key in the dictionary, with the value being the count.
word_dict = {}
for word in sentence.split():
word_dict[word] = word_dict.get(word, 0) + 1
return word_dict
```
In the bad example above, notice how everything is very crowded which makes it **difficult to delineate the different steps** in the code. Keep your code chunks in logical segments to make reading the code easier.
### Maximum Characters on a Line
PEP8 suggests **no more than 79 characters** on a single line. However, if you don't want to count the characters of each of your lines, think about this: does your statement sound like a **run-on sentence** if you read the logic to yourself? If it does, split it up into separate lines.
There is discussion about why this number in particular was chosen, and there are a few reasons. There are psychological implications in terms of reading comprehension based on line length. Also, many devices can show only around 80 characters on a single line (with a standard font). Using this convention for maximum line length helps to keep statements on a single line on the screen, even when people are setting up windows side by side on a monitor.
```
# Bad
three_digit_prods = [number1 * number2 for number1 in range(100,1000) for number2 in range(100,1000)]
# Good
num_range1 = range(100,1000)
num_range2 = range(100,1000)
three_digit_prods = [number1 * number2
for number1 in num_range1
for number2 in num_range2]
```
When splitting up a statement that contains operators onto multiple lines, make sure the **operator preceeds the operand**.
```
# Bad
total = (variable_one
+ variable_two -
vairable_three *
variable_four)
# Good
total = (variable_one
+ variable_two
- vairable_three
* variable_four)
```
When splitting up a function call and its arguments onto multiple lines, make sure that the **indentation is aligned**.
```
# Bad
def function(arg1, arg2,
arg3, arg4):
return some_stuff
# Good
def function(arg1, arg2,
arg3, arg4):
return some_stuff
```
### Using Keywords
Python includes **special keywords** that we can use to make our code easier to read. Some examples of these keywords are:
- `and`
- `or`
- `not`
- `is`
These keywords help us write code that makes sense when read from a **human language perspective**. It makes our "coding grammar" easier to digest.
```
# Bad
# 1
if condition1 & condition2 | (not condition3):
print('Not pretty.')
# 2
if is_correct(a) & b == 10 | s == 'include':
print('Could be better.')
# Good
# 1
if condition1 and condition2 or not condition3:
print('Looks good!')
# 2
if is_correct(a) and b == 10 or s == 'include':
print('Hurray!')
```
**Note**: If you are trying to use the truth value of some object to drive the flow of your code, know that most empty objects have a truth value of `False` and an object with any value has a truth value of `True`. Also, use the truth value itself instead of using the comparison operator to check if the value is `True` or `False`.
```
# Bad
condition1 = True
condition2 = False
if condition1 != False and condition2 is False:
print('Just use the truth value, don\'t use comparison operators!')
# Good
condition1 = True
condition2 = False
empty_list = [] # False
zero_int = 0 # False
empty_str = '' # False
non_empty_list = [1,2,3] # True
non_zero_int = 5 # True
non_empty_str = 'False' # This one is tricky! Try it out.
if empty_list or zero_int or empty_str:
print('This will not print!')
if non_empty_list and non_zero_int and non_empty_str:
print('This will print!')
if condition1 or condition2:
print('You used the truth value to control the flow of the code! Nice!')
```
### Helper Functions
We often write functions that involve multiple operations. Trying to understand the inner workings of a function that contains a lot of raw code can be overwhelming.
To help our code's readability, we can write **helper functions**. These functions contain smaller portions of the overall goal. These smaller functions are then called by the main function. This way, we can more **logically segment** the steps we take to get to our finished product.
```
# Could be better
# This code uses binary search to find a hidden value
# between two user-defind bounds.
def guess_num():
"""
Guesses a hidden value in a range sepcified by the user.
The final answer is printed.
Args:
No args
Return:
No return
"""
while True:
try:
lower_bound = int(input('Please enter a lower bound as an integer:'))
upper_bound = int(input('Please enter an upper bound as an integer:'))
except ValueError:
print('Please enter the bound in the form of an integer (e.g. 10 or 40).')
continue
if lower_bound >= upper_bound:
print('Please enter a lower bound that is strictly less than the upper bound.')
else:
break
while lower_bound != upper_bound:
midpoint = (upper_bound + lower_bound) // 2
print(f'Current range: [{lower_bound}, {upper_bound}]')
while True:
answer = input(f'Is {midpoint} greater than or equal to your hidden number? Please answer with Y/N.')
if answer.lower() in ['yes', 'y']:
answer = True
break
elif answer.lower() in ['no', 'n']:
answer = False
break
else:
print('Please answer with Yes/No or Y/N.')
if answer:
upper_bound = midpoint
else:
lower_bound = midpoint + 1
print(f'Your hidden number is {lower_bound}!')
# Good
# The different steps are now split up into helper functions.
# This solution goes beyond to implement recursion.
def get_check_bounds():
"""
Asks for an upper and lower bound and checks if they are valid.
Args:
No args
Return:
A tuple of the form (lower_bound, upper_bound) -> tuple (int, int)
"""
while True:
try:
lower = int(input('Please enter a lower bound as an integer:'))
upper = int(input('Please enter an upper bound as an integer:'))
except ValueError:
print('Please enter the bound in the form of an integer (e.g. 10 or 40).')
continue
if lower >= upper:
print('Please enter a lower bound that is strictly less than the upper bound.')
else:
return lower, upper
def ask_question(prompt):
"""
Asks the user a question and checks if the response
is valid (of the form 'Y' or 'N').
Args:
prompt: A prompt for the user -> str
Return:
A boolean to indicate response to the question -> bool
"""
while True:
answer = input(prompt)
if answer.lower() in ['yes', 'y']:
return True
elif answer.lower() in ['no', 'n']:
return False
else:
print('Please answer with Yes/No or Y/N.')
def guess_num(lower_bound, upper_bound):
"""
Given a lower bound and upper bound, guess a hidden value in the range.
Args:
lower_bound: Lower bound of the range -> int
upper_bound: Upper bound of the range -> int
Return:
No return
"""
if lower_bound == upper_bound:
print(f'Your hidden number is {lower_bound}!')
else:
midpoint = (upper_bound + lower_bound) // 2
print(f'Current range: [{lower_bound}, {upper_bound}]')
prompt = f'Is {midpoint} greater than or equal to your hidden number? Please answer with Y/N.'
# Here is one of our helper functions
if ask_question(prompt):
# This is a recursive call of the function
guess_num(lower_bound, midpoint)
else:
guess_num(midpoint + 1, upper_bound)
# Here is our other helper function
guess_num(*get_check_bounds())
```
These are just a few good ways to improve the formatting of your code to make it is easier to read and digest. Try to adopt these habits!
# Part II
<a name='explicit'></a>
## Explicit is Better than Implicit
When in doubt, you should **write your code as explicitly as possible**. This means to be as literal as you can so there is no room for incorrect interpretation on the side of code reader.
### Naming Conventions Again
We have already discussed naming conventions, but those concepts also apply to this aphorism. Choose names that are **descriptive and leave no room for interpretation or ambiguity**.
```
# Bad
# 1
x = 'John Smith' # Not descriptive
y, z = x.split() # Not descriptive
# 2
def read(filename): # This is an implicit function
# Code for reading different file types
# depending on the file extension
pass
# Good
# 1
name = 'John Smith'
first_name, last_name = name.split()
# 2
def read_csv(filename): # This is an explicit function
# Code for reading a csv
pass
def read_json(filename): # This is an explicit function
# Code for reading a json
pass
```
### Calling Objects from Modules and Packages
Another opportunity for using explicit rather than implicit syntax is how you should refer to objects imported from modules and packages. You should try to refer to a module when calling a function or class from that module.
```
# Bad
# 1
from math import *
r = 5
area = pi * r**2
# 2
from requests import *
response = get('www.yelp.com')
# Good
# 1
import math
r = 5
# Explicitly calling pi from math
circle_area = math.pi * r**2
# 2
import requests
# Explicitly calling get from requests
response = requests.get('www.yelp.com')
```
**Note**: It is generally frowned upon to import all the objects within a module or package into your namespace like we did in the "bad" cell above. This can create conflicts with other objects you may have or will create in your namespace. When in doubt, it's better to explicitly call the objects from the correct module or package. For modules with very long names that you don't want to repeatedly write out, you can use an alias to shorten the module or package name. Two good examples are the `numpy` and `pandas` aliases, which are used universally. You can decide on your own aliases for other modules and packages.
```
# Alias example
import numpy as np
import pandas as pd
# Using the np alias for numpy
my_ary = np.array([1,2,3,4,5])
# Using the pd alias for pandas
my_df = pd.DataFrame([[1, 'a', 5.6],
[2, 'b', 10.9]],
columns = ['rank', 'name', 'return'])
```
<a name='simple'></a>
## Simple is Better Than Complex
Try to write **simple** rather than overly complicated code. Recognizing when you can simplify your code comes with practice and experience. There are many **built-in Python functions** that you can use to simplify your code. Become familiar with these functions so you don't need to reinvent the wheel all the time.
```
# Bad
# All of these examples try to perform these operations
# essentially from scratch. Look for more "Pythonic" ways
# to write your code. It will look more professional.
# 1
my_investments = [1000, 2000, 3500]
sum_of_investments = 0
for investment in my_investments:
sum_of_investments += investment
# 2
words = ['first', 'second', 'third']
index = 1
for word in words:
print(index, ": ", word)
index += 1
# 3
classes = ['Intro to Python', 'R Data Analysis', 'Python Machine Learning']
grades = [98, 96, 89]
grade_dict = {}
for idx in range(len(classes)):
grade_dict[classes[idx]] = grades[idx]
# 4
# This function requires use of the Counter class from
# the collections module. Although this can be a very useful
# class, consider whether we can use built-in Python tools.
from collections import Counter
def same_num_xo(xo_string):
xo_string = xo_string.lower()
counts = Counter(xo_string)
if counts['x'] == counts['o']:
return True
else:
return False
# Good
# All of these examples use built-in Python
# functions to help simplify the code.
# 1
my_investments = [1000, 2000, 3500]
sum_of_investments = sum(my_investments)
# 2
words = ['first', 'second', 'third']
for idx, word in enumerate(words, 1):
print(f'{idx}: {word}')
# 3
classes = ['Intro to Python', 'R Data Analysis', 'Python Machine Learning']
grades = [98, 96, 89]
grade_dict = dict(zip(classes, grades))
print(grade_dict)
# 4
def same_num_xo(xo_string):
"""
CodeWars challenge. Checks if the given string has the same
number of x's and o's.
Args:
xo_string: A string containing any characters -> str
Return:
True or False based on the count of x's and o's -> bool
"""
s = s.lower()
return s.count('x') == s.count('o')
```
## Complex is Better Than Complicated
Although simple is often better than complex, sometimes problems require **complex solutions** to solve the problem efficiently. Don't try to oversimplify your solutions using base Python if a more complex approach gets the job done **better and in a more interpretable way**.
Don't always try to oversimplify your code by using base Python classes. It is often the case that there are other more advanced classes which are more optimized for certain operations. Don't reinvent the wheel, because your "wheel" will often be more like a square when compared with the existing tools!
```
# Bad
# 1
# Consider the operation you're trying to perform and
# whether there is a more appropriate data structure
# that can be used to accomplish the task.
list1 = range(1000)
list2 = range(1000)
total_list = []
for idx in range(len(list1)):
total_list.append(list1[idx] + list2[idx])
# 2
# Finding the transpose of a matrix
matrix_list = [[1,2,3],[4,5,6]]
matrix_trans = list(zip(*matrix_list))
# 3
def pig_it(text):
text = text.split()
for i in range(len(text)):
if text[i].isalnum():
text[i] = text[i][1:] + text[i][0] + 'ay'
return ' '.join(text)
# Good
# Use other Python tools that may be more complex but
# increase the efficiency and/or readability of your code.
# 1
# The numpy array is great for vector operations
import numpy as np
ary1 = np.arange(1000)
ary2 = np.arange(1000)
total_ary = ary1 + ary2
# 2
# Numpy also has a matrix data structure
mat = np.matrix([[1,2,3],[4,5,6]])
mat_trans = mat.T
# 3
def pig_it(text):
"""
CodeWars challenge. This function takes a sentence
and converts each word into pig Latin.
Args:
text: A sentence -> str
Return:
The sentence with each word converted to pig Latin
"""
lst = text.split()
# Use list comprehensions in place of for loops
# when it's appropriate.
return ' '.join(word[1:] + word[:1] + 'ay'
if word.isalpha()
else word
for word in lst)
```
A great place to gain more exposure to complex coding approaches is on coding challenge websites like [HackerRank](https://www.hackerrank.com/) and [CodeWars](https://www.codewars.com/). There will often be different test cases to gauge the correctness of your solution. The last few cases generally test the efficiency of your code and will not pass if your code takes too long to run. When you work on these problems, try to solve them on your own first. If after a few tries you can't come up with a solution or your solution only passes the first few test cases, check the **discussion board** for that problem. You can find solutions from other coders, and users on the site can upvote more popular answers. Reading these popular solutions helps immensely to **increase your coding maturity**. Make sure to really dig into the code to see how these solutions differ from your own, and try to indentify areas for optimization.
<a name='flat'></a>
## Flat is Better Than Nested
When we **nest operations** within eachother, the logic becomes more difficult to follow. There are some situations where we can't avoid this, but if it can be avoided you should try to **keep your code as flat as possible**.
```
# Bad
# 1
condition1, condition2, condition3 = False, True, False
if condition1:
print('This prints if only condition1 is True.')
else:
if condition2:
print('Use elif instead of nested if/else.')
else:
if condition3:
print('Using elif will be easier to read.')
else:
print('Avoid these nested if/else statements.')
# 2
num_range = range(1,20)
odd_numbers = []
for num in num_range:
if num % 2 == 1:
odd_numbers.append(num)
# Good
# 1
# Multiple control flow conditions
condition1, condition2, condition3 = False, False, False
if condition1:
print('This prints if only condition1 is True.')
elif condition2:
print('This prints if condition1 is False and condition2 is True.')
elif condition3:
print('This prints if both condition1 and condition2 are False and condition3 is True.')
else:
print('This prints if condition1, condition2, and condition3 are False.')
# 2
# Use list comprehension instead of for loops
# when possible.
num_range = range(1,20)
odd_numbers = [num for num in num_range if num % 2 == 1]
```
### Modules and Packages
When writing code for modules and packages, try not to have too many **levels of nested objects**. Design your code so you don't need to nest classes within classes or sub sub modules within sub modules. Prefer **shallow, rather than deep**, nesting. If importing functionality from your package requires a statement such as `import package.module.submodule.subsubmod.function`, reconsider how you are organizing your code.
<a name='sparse'></a>
## Sparse is Better than Dense
Although we should try to write concise code, don't do so at the expense of readbility. Don't try to write code all on one line when you can write the same code on multiple lines in a more interpretable way. This will infuriate your coworkers when they try to decipher your super dense lines of code. There's a **delicate balance** here that you should try to reach.
Don't confuse striving for sparsity with reducing the efficiency of your code. Your goal should still be efficiency, but try to do so in a way where the code is still easy to read.
```
# Bad
# 1
print('\n'.join("%i bytes = %i bits which has %i possible values." % (j, j*8, 256**j-1) for j in (1 << i for i in range(8))))
# 2
is_raining = True
have_umbrella = True
umbrella_has_holes = False
wet_level = ('dry' if have_umbrella and umbrella_works
else 'damp' if is_raining and have_umbrella and umbrella_has_holes
else 'drenched' if is_raining and not have_umbrella
else 'dry')
# 3
list(filter(lambda val: val % 2 == 0, map(lambda num: num**2, range(10))))
# Good
# 1
byte_value_gen = (1 << num for num in range(8))
fmt_string = "{0} bytes = {1} bits which has {2} possible values."
print('\n'.join(fmt_string.format(byte, byte*8, 256**byte-1) for byte in byte_value_gen))
# 2
is_raining = True
have_umbrella = True
umbrella_has_holes = True
if have_umbrella and umbrella_works:
wet_level = 'dry'
elif is_raining and have_umbrella and umbrella_has_holes:
wet_level = 'damp'
elif is_raining and not have_umbrella:
wet_level = 'drenched'
else:
wet_level = 'dry'
# 3a
num_range = range(10)
sqr_nums = map(lambda num: num**2, num_range)
even_sqr_nums = list(filter(lambda val: val % 2 == 0, sqr_nums))
print(even_sqr_nums)
# 3b, even better
even_sqr_nums = [num**2 for num in range(10) if num**2 % 2 == 0]
print(even_sqr_nums)
```
**Note**: If/else one-liners are generally OK if the conditions aren't too complicated. For example:
`is_even = True if num % 2 == 0 else False`
<a name='errors'></a>
## Errors Should Never Pass Silently (Unless Explicitly Silenced)
When handling errors in Python, make sure you are **aware** of what exceptions are being raised. It is often necessary to handle exceptions differently based on what error occured. For example, let's say we want to find the reciprocal of each element in a list. Make sure that you **handle different exception types correctly**.
```
# Bad
value_list = ['word', 0, 50]
for val in value_list:
try:
reciprocal = 1/int(val)
except:
pass
else:
print(f'The reciprocal of {val} is: {reciprocal}')
# Good
value_list = ['word', 0, 50]
for val in value_list:
try:
print(f'The value is: {val}')
reciprocal = 1/int(val)
except Exception as e:
print(type(e), e)
else:
print(f'No error occurred. The reciprocal is: {reciprocal}')
```
Notice in the bad example above, **we aren't notified** in any way when we get an error while trying to calculate the reciprocal of two of the three elements. In this simple example, we can see that if we had three valid values in our `value_list`, then we should end up with three reciprocals. However, in a more complicated example when we're applying some operation to many elements, the errors can be accidentally ignored which could cause serious problems with our result (namely we would have a bunch of missing values). The key idea here is that **these issues would be unknown to us because of the silent error handling**.
The good example **catches every exception type** and prints out the type and the error message so we know when any kind of error is occurring.
Let's consider another example. In the example below, we have a list of dates as strings. We want to convert these strings into true `datetime` objects.
```
# Bad
from datetime import datetime
string_dates = ['July 5, 2018', 'Oct 16, 2017', 'September 13, 2020',
'01/10/2016', 'May 27, 2015', 'April 34, 2019']
datetime_dates = []
for date in string_dates:
try:
parsed_date = datetime.strptime(date, "%B %d, %Y")
datetime_dates.append(parsed_date)
except:
datetime_dates.append(None)
```
Because we passed the exceptions silently, we have **no evidence** that there were a few issues while parsing the date strings. When we look closer at our list of date strings, we can see that 'Oct' is abreviated, the January date is in a different format, and the April date has the day 34.
Let's run this again with **better exception handling**.
```
# Good
from datetime import datetime
string_dates = ['July 5, 2018', 'Oct 16, 2017', 'September 13, 2020',
'01/10/2016', 'May 27, 2015', 'April 34, 2019']
datetime_dates = []
for idx, date in enumerate(string_dates):
try:
parsed_date = datetime.strptime(date, "%B %d, %Y")
datetime_dates.append(parsed_date)
except Exception as e:
print(f'{type(e)} occurred for element at index: {idx}')
print(e)
datetime_dates.append(None)
```
Now we have output that **alerts us** that we are encountering errors in our date parsing operation. For a list of thousands of dates, this can be very helpful so that we're not blindly adding `None` to our data when we can potentially avoid it.
For these formatting issues, we can try the most common formats and only replace with `None` if the date is in a difficult or invalid format. Next, we'll write a function that checks if our date can be parsed by some common date formats. We can pass the errors silently within this function because our main operation will alert us if a date cannot be parsed (since the function will return the most common format if a working format can't be found, which will raise an exception in the main operation).
```
# Better
from datetime import datetime
# Helper function!
def date_fmt_checker(date_string, formats):
"""
Returns the correct format (from a list) for parsing a date string.
Args:
date_string: A date in string format -> str
formats: A list of strings which represent datetime parsing formats -> list
Return:
The format to use for parsing or the default format if none are correct -> str
"""
from datetime import datetime
for fmt in formats:
try:
date = datetime.strptime(date_string, fmt)
return fmt
except:
pass
return '%B %d, %Y'
string_dates = ['July 5, 2018', 'Oct 16, 2017', 'September 13, 2020',
'01/10/2016', 'May 27, 2015', 'April 34, 2019']
datetime_dates = []
formats = ['%B %d, %Y', '%b %d, %Y', '%m/%d/%Y']
for idx, date in enumerate(string_dates):
fmt = date_fmt_checker(date, formats)
try:
parsed_date = datetime.strptime(date, fmt)
datetime_dates.append(parsed_date)
except Exception as e:
print(f'{type(e)} occurred for element at index: {idx}')
print(e)
datetime_dates.append(None)
datetime_dates
```
Now we can see that most of our dates are parsed successfully. Unfortunately, the April date is still a problem because the day value is invalid. This can be handled separately by doing some **data cleaning**, although the true date is most likely lost because of the ambiguity. Is it April 30th? April 3rd? April 4th? Without additional information we may never know...
### Raising Errors
Sometimes we may need to raise errors if an operation is legal in Python but doesn't make sense in the context of our use case. For example, let's consider a function that calculates volume:
```
def calc_volume(x, y, z):
"""Takes three dimensions and calculates volume.
Args:
x: Value of the first dimension -> int/float
y: Value of the second dimension -> int/float
z: Value of the third dimension -> int/float
Return:
The volume of the shape -> int/float
"""
return x*y*z
```
This seems simple enough. However, what happens if we pass a zero or negative value to our function?
```
calc_volume(-2, 5, 6)
```
This operation is completely legal from a coding perspective, but in our case it wouldn't make sense to have a zero or negative volume. We can add a check to see if any of the dimensions are equal to or less than zero and then alert the user that there's an issue if this is the case.
```
def calc_volume(x, y, z):
"""Takes three dimensions and calculates volume.
Args:
x: Value of the first dimension -> int/float
y: Value of the second dimension -> int/float
z: Value of the third dimension -> int/float
Return:
The volume of the shape -> int/float
"""
if x <= 0 or y <= 0 or z <= 0:
raise ValueError('Dimensions must have a value greater than 0!')
return x*y*z
```
Now when we try to pass innapropriate values for our dimensions, our code won't run.
```
calc_volume(-2, 5, 6)
```
Try to think about what might go wrong when your code is used. Don't be afraid to raise your own errors where appropriate; errors are here to help!
<a name='ambiguity'></a>
## In the Face of Ambiguity, Refuse the Temptation to Guess
This idea is related to the "Explicit" section. Ambiguity is a scary concept for programmers. Why? Because **ambiguity leaves room for interpretation**. The solution or approach to solving a problem should be clear and should not need any explanation beyond what is given.
This can be difficult to implement with larger chunks of code (we can use commenting and documentation to help!), but a good way to avoid situations with ambiguity is to use tools and data structures that are appropriate to the situation.
```
# Bad
# 1
condition1 = True
condition2 = True
if not condition1 or condition2: # What is the order of operations here?
print('Hello World')
else:
print('Bye World')
# 2
import numpy as np
ary = np.arange(10)
print(ary < 6)
# Why does this raise an exception?
if ary < 6:
print('There are values less than 6 in the array.')
```
In the two examples above, there is ambiguity in the code. The ambiguity may be clear to more advanced programmers, but **you should not assume** that whoever is reading your code will be able to navigate through the confusion.
For the first example, we are running into an issue with **operator precedence**. A full table of operator precedence can be found [here](https://docs.python.org/3/reference/expressions.html#operator-precedence) (the table starts with least priority and goes to highest priority). As we can see in the table, the `not` operator is **higher priority** than the `or` operator. If our intention in this code is to evaluate the expression `condition1 or condition2` first, we should **wrap that expression in parentheses**.
For the second example, we create a numpy array of 10 values. If we evaluate the expression `ary < 6`, a **boolean array** is returned where each value indicates whether that element in the original array is less than 6. This is called **broadcasting**, where we apply an operation to each individual array element, and this is a very useful property of numpy arrays! However, if we try to evaluate the absolute truth value of the boolean array we get an error. This is because the array as a whole cannot be determined to be `True` or `False` without some additional work. The array class in numpy has some methods we can use to **aggregate the values** to a single truth value, namely the `any()` and `all()` methods.
```
# Good
# 1
condition1 = True
condition2 = True
if not (condition1 or condition2): # Order is clear
print('Hello World')
else:
print('Bye World')
# 2
import numpy as np
ary = np.arange(10)
less_than_six = ary < 6
less_than_eleven = ary < 11
print(less_than_six.any())
if less_than_six.any(): # This method applies or to each element
print('There are values less than 6 in the array.')
if less_than_eleven.all(): # This method applies and to each element
print('All the elements in the array are less than 11.')
```
These are just a few tools we can use to reduce the ambiguity of our code. Sometimes, the code may not even run until we eliminate the ambiguity (like in the array example), but the more **dangerous cases are when the ambiguous code runs without error**. Put effort into making sure that your code is **devoid of ambiguity**, it will make a big difference in the long run!
<a name='hardtoexplain'></a>
## If the Implementation is Hard to Explain, It’s a Bad Idea. If the Implementation is Easy to Explain, It May Be a Good Idea.
These two aphorisms are related to the **logic of your code**. If you have difficulty explaining your code logic to a separate party, chances are that the code will be even more difficult to understand when you're not there to walk them through it. Your code should be **self-explanatory** because you will not always be available to help others. Even if you have the most efficient, robust, and portable solution possible, **the code will be useless if it is too difficult to understand**.
That being said, don't confuse code that is easy to explain with the best possible solution. The idea here is that it _may_ be a good solution. Always **consider different ways to code up the same logic** and make a judgment call to balance efficiency and robustness with ease of understanding.
<a name='now'></a>
## Now is Better Than Never. Although Never is Often Better Than *Right* Now.
When you are writing functions, classes, and even modules, it's a good idea to **think to the future**. How will this code be used? How will the code be updated to have additional functionality? How can we write the code so it is easier to update and improve? These are all important points to consider when formatting and structuring our code.
However, although we should keep these points in mind, **don't go overboard** trying to make your code unreasonably robust. Implement the tools you know you will need, and try to limit the functionality that _may_ be used in the future. There are a number of reasons for doing this:
1. The additional functionality may never be used, which means you wasted time creating tools that don't have value.
2. The tools you add may need to be used in a way different from how you first intended.
3. Additional functionality generally means higher code complexity. This means you're requiring the user to parse more complicated code. Make sure the functionality you add is relevant to the user.
Try to get something working that **solves the immediate issues**. You can always go back and update the code to implement additional functionality. Remember to use **version control** (GitHub) so you can revert to previous versions if the updates break your code!
## Conclusion
Congratulations! You've taken your first step towards writing more interpretable code. As you mature as a programmer, remember these aphorisms, and look for places where you can incorporate these concepts. It will take directed effort at first, but as you continue to use these good practices they will become second nature. Whatever formatting practice you end up using, make sure to **always be consistent**! Let's work together to adopt these conventions so that we can easily share code and collaborate!
## References
- [PEP 8 -- Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/)
- [PEP 20 -- The Zen of Python](https://www.python.org/dev/peps/pep-0020/)
- [How to Write Beautiful Python Code With PEP 8 - Real Python](https://realpython.com/python-pep8/)
- [Python Pro Tips: Understanding Explicit Is Better Than Implicit - Miguel González-Fierro](https://miguelgfierro.com/blog/2018/python-pro-tips-understanding-explicit-is-better-than-implicit/#:~:text=In%20the%20explicit%20behavior%2C%20it,the%20code%20is%20called%20get.&text=An%20implicit%20behavior%20would%20have,manage%20internally%20the%20different%20inputs.)
- [How to Make Your Python Code More Elegant - Hannibal Liang](https://medium.com/better-programming/how-to-make-python-programming-more-elegant-and-decent-4b5962695aa9)
- [Contemplating the Zen of Python - Chaitanya Baweja](https://medium.com/better-programming/contemplating-the-zen-of-python-186722b833e5)
- [Some examples from CodeWars.com](https://www.codewars.com/)
| github_jupyter |
# Search analysis
Here I wanted to answer the following questions, answers that I found are beneath each one
Worth mentioning that the search data consists of 506,992 individual searches, all on the 10th/11th Nov 2020 - so might not be representative of wider trends but should be enough to make some high level inferences
#### What percentage of searches have nouns or verbs or both?
Nouns and verbs: 111059 (24%)
Just nouns: 305836 (68%)
Just verbs: 33486 (7%)
I suspect the true number of verbs is lower as spacy can mislabel some words, eg "fund" can be a verb or noun, in the context of government its a noun and spacy could have mislabelled words like that.
Thus, though, we can see that almost exactly 2/3 of searches consist entirely of nouns. This is interesting and makes intuitive sense.
"Here be opinions" -> the 'bad thing' though is that it could mean that users are making non specific, vague searches - from which it is hard to tell (even as a human) what they want. Eg if someone wants to sign in the their universal credit account and searches for "universal credit" then they might well get the right page but no search engine could correctly infer from the search term why that person wanted that page - other people searching for the same term could well want to apply or just get more information about it. Thus, to my mind, if we can increase the number of people making more specific searches then they would (likely) get better results and it would also be easier for us to improve search (as we'd have a better idea of user intent).
#### What percentage of searches have constrictively detected SVO triples?
I've used a _very_ constrictive method of detecting SVO triples here (ie that it must follow good grammar rules etc.) which we can be pretty sure is a bad assumption. I deliberately shied away from assuming that a search like "apply passport" means that passport is the object and apply is the verb - even though it's a pretty reasonable assumption. This is because I wanted to see how well the approach of being sure that two words are related is. Thus, 16% of 1000 searches had a triple, in theory it could be as high as 24% (the percentage of searches with a verb and noun). This is higher than I initially thought
#### What percentage of searches have entities in them?
39%. I suspect that the true value is higher as govNER has been trained on GOV.UK content which has correct casings and thus it doesn't always detect entities in strings like "universal credit" or misspellings. A simple way to get around this is to try getting a list of all unique entities from the Knowledge Graph, lowercasing them and doing a simple string match or Levenstein distance (to account for typos).
```
import pandas as pd
import spacy
import os
from py2neo import Graph
import sys
import os
os.environ['MODEL_FILE_PATH'] = '../../govuk-knowledge-graph/data'
sys.path.append("../../govuk-language-model")
from sagemaker.container.govner.govner import GovNER
ner = GovNER()
nlp = spacy.load("en_core_web_sm")
search_queries_df = pd.read_csv("../data/raw/search_queries.csv")
search_queries_df = search_queries_df.drop_duplicates(subset=['search_term', 'session_id'])
search_queries_df.head()
class SOV:
def __init__(self):
self.subject = None
self.object = None
self.verb = None
def cypher_subject(self):
return self._cypher_safe(self.subject)
def cypher_object(self):
return self._cypher_safe(self.object)
def cypher_verb(self):
return self._cypher_safe(self.verb)
def _cypher_safe(self, token):
if token is None:
return ""
if type(token) is list:
text = ''.join([t.text_with_ws for t in token])
else:
text = token.text
text = text.lower()
text = text.strip()
return text.replace("'", "")
class Title:
def __init__(self, title, nlp):
self.nlp = nlp
self.title = title
self.triples = []
def subject_object_triples(self):
if any(self.triples):
return self.triples
self.triples = self._get_triples_for_title()
return self.triples
def _verbs(self):
return ["VERB", "AUX"]
def _cypher_safe(self, words):
return [word.replace("'", "") for word in words]
def _is_object_of_prepositional_phrase(self, token):
# Finds objects of prepositional phrases
# eg "Apply online for a UK passport", "Apply for this licence"
if token.dep_ == "pobj" and token.head.dep_ == "prep" and token.head.head.pos_ in self._verbs():
triple = SOV()
triple.verb = token.head.head
triple.object = [token]
# experiment
triple.subject = []
reversed_lefts = list(token.lefts) or []
reversed_lefts.reverse()# or []
print(reversed_lefts)
if reversed_lefts:
for left in reversed_lefts:
print(f"left text: {left.text}")
print(f"left dep: {left.dep_}")
if left.dep_ == "poss":
triple.subject.append(left)
# end experiment
compound_lefts = self._compound_left_compounds(token)
if any(compound_lefts):
compound_lefts.reverse()
print(compound_lefts)
triple.object = compound_lefts + triple.object
return [triple]
def _is_object(self, token):
# Finds simple objects
# eg "Get a passport for your child"
# TODO: should probably extract "for your child" bit as a modifier of some kind
if token.dep_ == "dobj" and token.head.pos_ in self._verbs():
triple = SOV()
triple.verb = token.head.head
triple.object = [token]
compound_lefts = self._compound_left_compounds(token)
if any(compound_lefts):
compound_lefts.reverse()
print(compound_lefts)
triple.object += compound_lefts
return [triple]
def _compound_left_compounds(self, token):
print(f"compounded lefts for token: {token.text}")
compounded_lefts = []
reversed_lefts = list(token.lefts) or []
reversed_lefts.reverse()# or []
print(reversed_lefts)
if reversed_lefts:
for left in reversed_lefts:
print(f"left text: {left.text}")
print(f"left dep: {left.dep_}")
if left.dep_ == "compound":
compounded_lefts.append(left)
compounded_lefts += self._compound_left_compounds(left)
else:
break
return compounded_lefts
def _find_triples(self, token, debug=False):
is_object_of_prepositional_phrase = self._is_object_of_prepositional_phrase(token)
if is_object_of_prepositional_phrase:
if debug:
print("is_object_of_prepositional_phrase")
return is_object_of_prepositional_phrase
is_object = self._is_object(token)
if is_object:
if debug:
print("is_object")
return is_object
def _to_nltk_tree(self, node):
if node.n_lefts + node.n_rights > 0:
return Tree(node.orth_, [self._to_nltk_tree(child) for child in node.children])
else:
return node.orth_
def _get_triples_for_title(self, debug=False):
doc = self.nlp(self.title)
if debug:
[self._to_nltk_tree(sent.root).pretty_print() for sent in doc.sents]
triples = []
for token in doc:
if debug:
print(f"text: {token.text}")
print(f"dep: {token.dep_}")
print(f"head dep: {token.head.dep_}")
print(f"head head pos: {token.head.head.pos_}")
print(f"lefts: {list(token.lefts)}")
print()
subject_object_triples = self._find_triples(token, debug)
if subject_object_triples:
triples += subject_object_triples
return triples
```
### Find searches with SVOs and/or entities
This is ridiculously computationally expensive so I've limited it to 1000 searches
```
triples = []
searches_with_entities = []
for _index, row in search_queries_df[0:1000].iterrows():
try:
title = Title(row['search_term'], nlp)
if any(title.subject_object_triples()):
triples.append(title)
else:
entities = ner.entities(row['search_term'])
if any(entities):
searches_with_entities.append([entities, row['search_term']])
except:
next
print(f"number of searches (out of 1000) with triples in it: {len(triples)}")
print(f"number of searches (out of 1000) with an entity in it: {len(searches_with_entities)}")
```
### Can the KG return content that matches an SVO?
Yes is the answer! It actually has some really good results (requires SVO triples to be inserted into the graph with the extract_subject_verb_object_from_titles notebook)
```
host = os.environ.get('REMOTE_NEO4J_URL')
user = os.environ.get('NEO4J_USER')
password = os.environ.get('NEO4J_PASSWORD')
graph = Graph(host=host, user='neo4j', password = password, secure=True)
has_result = []
for triple in triples:
result = graph.run('MATCH ({name: "' + triple.subject_object_triples()[0].cypher_verb() + '"})-[:HAS_VERB|HAS_OBJECT|HAS_SUBJECT]-(n:Action)-[:HAS_VERB|HAS_OBJECT|HAS_SUBJECT]-({name: "' + triple.subject_object_triples()[0].cypher_object() + '"}) WITH n MATCH (n)-[:TITLE_MENTIONS]-(c:Cid) return c.name').data()
if any(result):
has_result.append([result, triple])
len(has_result)
for result in has_result:
print()
print(result[1].title)
print(result[0])
```
### What percentage of searches have nouns, verbs or both?
```
just_nouns = 0
just_verbs = 0
nouns_and_verbs = 0
just_noun_sents = []
just_verb_sents = []
nouns_and_verbs_sents = []
for _index, row in search_queries_df[0:100].iterrows():
try:
doc = nlp(row['search_term'])
except TypeError:
next
has_verb = False
has_noun = False
for token in doc:
if token.pos_ == "VERB":
has_verb = True
if token.pos_ == "NOUN":
has_noun = True
if has_verb and has_noun:
nouns_and_verbs += 1
nouns_and_verbs_sents.append(row['search_term'])
next
if has_verb and not has_noun:
just_verbs += 1
just_verb_sents.append(row['search_term'])
next
if has_noun and not has_verb:
just_nouns += 1
just_noun_sents.append(row['search_term'])
print(nouns_and_verbs)
print(just_nouns)
print(just_verbs)
```
| github_jupyter |
# A Pypher Overview
This notebook is an introduction to Pypher, what it can do, and how it can help manage your Cypher from within Python.
## Install the Pypher package from pypi
```
!pip install python_cypher
```
## Import what you need
For these examples we will need the `Pypher` object, the Pypher factory object `__`, and the `Param` object
```
from pypher import Pypher, __
from pypher.builder import Param
def cprint(pypher):
c = str(pypher)
p = pypher.bound_params
print('Cypher:')
print(str(pypher))
if p:
print('\nBound Params:')
print(dict(p))
```
## Sample Pypher Queries
Lets write a few simple Cypher queries, but using Pypher.
```
p = Pypher()
p.Match.node('a').relationship('r').node('b').RETURN('a', 'b', 'r')
cprint(p)
p = Pypher()
p.MATCH.node('u', labels='User').RETURN.user
cprint(p)
```
In this example a complex Cypher query is built. The Pypher automatically binds the parameters passed into the functions.
```
p.reset()
p.OPTIONAL.MATCH.node('user', 'User').rel('FRIENDS_WITH').node('friend', 'User')
# continue later
p.WHERE.user.__id__ == 1234
p.RETURN(__.user, __.count('friend').alias('number_of_friends'))
cprint(p)
```
We can also manually bind parameters using the `Param` object or by calling the `.bind_param` method on the `Pypher` instances.
```
p = Pypher()
name = Param('my_name', 'Mark')
p.CREATE.node(name=name).RETURN.node()
cprint(p)
```
As you can see in both the generated Cypher and the bound_params, `my_name` is used because it was defined.
```
p = Pypher()
name = p.bind_param('Mark', 'my_param')
p.CREATE.node(name=name).RETURN.node()
cprint(p)
```
These next few examples were taken from [https://github.com/Readify/Neo4jClient/wiki/cypher-examples](https://github.com/Readify/Neo4jClient/wiki/cypher-examples)
### Create a user, only if they don't already exist
```
p = Pypher()
p.MERGE.node('user', labels='User', Id=456).ON_CREATE.user.SET(__.user.__Name__ == 'Jim')
cprint(p)
```
### Create a user and relate them to an existing one
```
p = Pypher()
p.MATCH.node('invitee', labels='User').WHERE.invitee.__id__ == 123
p.CREATE.node('invitee').rel_out(labels='INVITED').node('invited', lables='User')
cprint(p)
```
### Relate two existing users
This example does a few notable things:
* It breaks up the query building across a few lines. This is useful for when you want do things like conditionally build a query
* It runs Pypher Statement objects as functions. When done this way multiple arguments can be passed in.
* It uses the Pypher factory to create anonymous chains that are passed into the statement functions.
* It accesses properties via the double undersore syntax. `user.__id__` is equivilent to `user.property('id')`
```
p = Pypher()
p.MATCH(__.node('user1', labels='User'), __.node('user2', labels='User')) # line is getting long
p.WHERE(__.user1.__Id__ == 123, __.user2.__Id__ == 456)
p.CREATE.node('user1').relationship(direction='out', labels='FRIENDS_WITH').node('user2')
cprint(p)
```
### Update a single property on a user
This example shows how to access a property via the `property` method.
```
p = Pypher()
p.MATCH.node('user', 'User').WHERE.user.property('id') == 123
p.SET(__.user.property('Age') == 25)
cprint(p)
```
## Take a Look at Partial Objects
In Pypher, Partial objects allow for complex query to be handled in another object with its own interface. Pypher comes with one called `Case` that handles the Cypher swith case syntax.
```
from pypher.partial import Case
c = Case('')
c.WHEN(__.n.__eyes__ == 'blue', 1).WHEN(__.n.__age__ < 40, 2).ELSE(3)
p = Pypher()
p.MATCH.node().RETURN(c).alias('Result')
cprint(p)
```
| github_jupyter |
# Project 3: Smart Beta Portfolio and Portfolio Optimization
## Overview
Smart beta has a broad meaning, but we can say in practice that when we use the universe of stocks from an index, and then apply some weighting scheme other than market cap weighting, it can be considered a type of smart beta fund. A Smart Beta portfolio generally gives investors exposure or "beta" to one or more types of market characteristics (or factors) that are believed to predict prices while giving investors a diversified broad exposure to a particular market. Smart Beta portfolios generally target momentum, earnings quality, low volatility, and dividends or some combination. Smart Beta Portfolios are generally rebalanced infrequently and follow relatively simple rules or algorithms that are passively managed. Model changes to these types of funds are also rare requiring prospectus filings with US Security and Exchange Commission in the case of US focused mutual funds or ETFs.. Smart Beta portfolios are generally long-only, they do not short stocks.
In contrast, a purely alpha-focused quantitative fund may use multiple models or algorithms to create a portfolio. The portfolio manager retains discretion in upgrading or changing the types of models and how often to rebalance the portfolio in attempt to maximize performance in comparison to a stock benchmark. Managers may have discretion to short stocks in portfolios.
Imagine you're a portfolio manager, and wish to try out some different portfolio weighting methods.
One way to design portfolio is to look at certain accounting measures (fundamentals) that, based on past trends, indicate stocks that produce better results.
For instance, you may start with a hypothesis that dividend-issuing stocks tend to perform better than stocks that do not. This may not always be true of all companies; for instance, Apple does not issue dividends, but has had good historical performance. The hypothesis about dividend-paying stocks may go something like this:
Companies that regularly issue dividends may also be more prudent in allocating their available cash, and may indicate that they are more conscious of prioritizing shareholder interests. For example, a CEO may decide to reinvest cash into pet projects that produce low returns. Or, the CEO may do some analysis, identify that reinvesting within the company produces lower returns compared to a diversified portfolio, and so decide that shareholders would be better served if they were given the cash (in the form of dividends). So according to this hypothesis, dividends may be both a proxy for how the company is doing (in terms of earnings and cash flow), but also a signal that the company acts in the best interest of its shareholders. Of course, it's important to test whether this works in practice.
You may also have another hypothesis, with which you wish to design a portfolio that can then be made into an ETF. You may find that investors may wish to invest in passive beta funds, but wish to have less risk exposure (less volatility) in their investments. The goal of having a low volatility fund that still produces returns similar to an index may be appealing to investors who have a shorter investment time horizon, and so are more risk averse.
So the objective of your proposed portfolio is to design a portfolio that closely tracks an index, while also minimizing the portfolio variance. Also, if this portfolio can match the returns of the index with less volatility, then it has a higher risk-adjusted return (same return, lower volatility).
Smart Beta ETFs can be designed with both of these two general methods (among others): alternative weighting and minimum volatility ETF.
## Instructions
Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.
## Packages
When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.
The other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.
### Install Packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
```
### Load Packages
```
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
```
## Market Data
### Load Data
For this universe of stocks, we'll be selecting large dollar volume stocks. We're using this universe, since it is highly liquid.
```
df = pd.read_csv('../../data/project_3/eod-quotemedia.csv')
percent_top_dollar = 0.2
high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')
volume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume')
dividends = df.reset_index().pivot(index='date', columns='ticker', values='dividends')
```
### View Data
To see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
```
project_helper.print_dataframe(close)
```
# Part 1: Smart Beta Portfolio
In Part 1 of this project, you'll build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. You'll compare this portfolio to a market cap weighted index to see how well it performs.
Note that in practice, you'll probably get the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's), but for this exercise we will simulate a market cap weighted index.
## Index Weights
The index we'll be using is based on large dollar volume stocks. Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is close prices and volume data:
```
Prices
A B ...
2013-07-08 2 2 ...
2013-07-09 5 6 ...
2013-07-10 1 2 ...
2013-07-11 6 5 ...
... ... ... ...
Volume
A B ...
2013-07-08 100 340 ...
2013-07-09 240 220 ...
2013-07-10 120 500 ...
2013-07-11 10 100 ...
... ... ... ...
```
The weights created from the function `generate_dollar_volume_weights` should be the following:
```
A B ...
2013-07-08 0.126.. 0.194.. ...
2013-07-09 0.759.. 0.377.. ...
2013-07-10 0.075.. 0.285.. ...
2013-07-11 0.037.. 0.142.. ...
... ... ... ...
```
```
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
total_vol = close * volume
return total_vol.div(total_vol.sum(axis=1), axis=0)
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
```
### View Data
Let's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
```
index_weights = generate_dollar_volume_weights(close, volume)
project_helper.plot_weights(index_weights, 'Index Weights')
```
## Portfolio Weights
Now that we have the index weights, let's choose the portfolio weights based on dividend. You would normally calculate the weights based on trailing dividend yield, but we'll simplify this by just calculating the total dividend yield over time.
Implement `calculate_dividend_weights` to return the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead.
For example, assume the following is `dividends` data:
```
Prices
A B
2013-07-08 0 0
2013-07-09 0 1
2013-07-10 0.5 0
2013-07-11 0 0
2013-07-12 2 0
... ... ...
```
The weights created from the function `calculate_dividend_weights` should be the following:
```
A B
2013-07-08 NaN NaN
2013-07-09 0 1
2013-07-10 0.333.. 0.666..
2013-07-11 0.333.. 0.666..
2013-07-12 0.714.. 0.285..
... ... ...
```
```
def calculate_dividend_weights(dividends):
"""
Calculate dividend weights.
Parameters
----------
dividends : DataFrame
Dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
#TODO: Implement function
dividends = dividends.cumsum()
return dividends.div(dividends.sum(axis=1), axis=0)
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
```
### View Data
Just like the index weights, let's generate the ETF weights and view them using a heatmap.
```
etf_weights = calculate_dividend_weights(dividends)
project_helper.plot_weights(etf_weights, 'ETF Weights')
```
## Returns
Implement `generate_returns` to generate returns data for all the stocks and dates from price data. You might notice we're implementing returns and not log returns. Since we're not dealing with volatility, we don't have to use log returns.
```
def generate_returns(prices):
"""
Generate returns for ticker and date.
Parameters
----------
prices : DataFrame
Price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
return prices / prices.shift(1) - 1
project_tests.test_generate_returns(generate_returns)
```
### View Data
Let's generate the closing returns using `generate_returns` and view them using a heatmap.
```
returns = generate_returns(close)
project_helper.plot_returns(returns, 'Close Returns')
```
## Weighted Returns
With the returns of each stock computed, we can use it to compute the returns for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using the returns and weights.
```
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
return returns * weights
project_tests.test_generate_weighted_returns(generate_weighted_returns)
```
### View Data
Let's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap.
```
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
project_helper.plot_returns(index_weighted_returns, 'Index Returns')
project_helper.plot_returns(etf_weighted_returns, 'ETF Returns')
```
## Cumulative Returns
To compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. Implement `calculate_cumulative_returns` to calculate the cumulative returns over time given the returns.
```
ret = None
ret
test = ret.sum(axis=1)
test
test.cumprod() - 1
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
return (returns.sum(axis=1) + 1).cumprod()
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
```
### View Data
Let's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two.
```
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
```
## Tracking Error
In order to check the performance of the smart beta portfolio, we can calculate the annualized tracking error against the index. Implement `tracking_error` to return the tracking error between the ETF and benchmark.
For reference, we'll be using the following annualized tracking error function:
$$ TE = \sqrt{252} * SampleStdev(r_p - r_b) $$
Where $ r_p $ is the portfolio/ETF returns and $ r_b $ is the benchmark returns.
_Note: When calculating the sample standard deviation, the delta degrees of freedom is 1, which is the also the default value._
```
def tracking_error(benchmark_returns_by_date, etf_returns_by_date):
"""
Calculate the tracking error.
Parameters
----------
benchmark_returns_by_date : Pandas Series
The benchmark returns for each date
etf_returns_by_date : Pandas Series
The ETF returns for each date
Returns
-------
tracking_error : float
The tracking error
"""
assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index)
return np.sqrt(252) * np.std(benchmark_returns_by_date - etf_returns_by_date, ddof=1)
project_tests.test_tracking_error(tracking_error)
```
### View Data
Let's generate the tracking error using `tracking_error`.
```
smart_beta_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(etf_weighted_returns, 1))
print('Smart Beta Tracking Error: {}'.format(smart_beta_tracking_error))
```
# Part 2: Portfolio Optimization
Now, let's create a second portfolio. We'll still reuse the market cap weighted index, but this will be independent of the dividend-weighted portfolio that we created in part 1.
We want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index.
$Minimize \left [ \sigma^2_p + \lambda \sqrt{\sum_{1}^{m}(weight_i - indexWeight_i)^2} \right ]$ where $m$ is the number of stocks in the portfolio, and $\lambda$ is a scaling factor that you can choose.
Why are we doing this? One way that investors evaluate a fund is by how well it tracks its index. The fund is still expected to deviate from the index within a certain range in order to improve fund performance. A way for a fund to track the performance of its benchmark is by keeping its asset weights similar to the weights of the index. We’d expect that if the fund has the same stocks as the benchmark, and also the same weights for each stock as the benchmark, the fund would yield about the same returns as the benchmark. By minimizing a linear combination of both the portfolio risk and distance between portfolio and benchmark weights, we attempt to balance the desire to minimize portfolio variance with the goal of tracking the index.
## Covariance
Implement `get_covariance_returns` to calculate the covariance of the `returns`. We'll use this to calculate the portfolio variance.
If we have $m$ stock series, the covariance matrix is an $m \times m$ matrix containing the covariance between each pair of stocks. We can use [`Numpy.cov`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) to get the covariance. We give it a 2D array in which each row is a stock series, and each column is an observation at the same period of time. For any `NaN` values, you can replace them with zeros using the [`DataFrame.fillna`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html) function.
The covariance matrix $\mathbf{P} =
\begin{bmatrix}
\sigma^2_{1,1} & ... & \sigma^2_{1,m} \\
... & ... & ...\\
\sigma_{m,1} & ... & \sigma^2_{m,m} \\
\end{bmatrix}$
```
def get_covariance_returns(returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
returns_covariance : 2 dimensional Ndarray
The covariance of the returns
"""
#TODO: Implement function
return np.cov(returns.fillna(value=0).transpose())
project_tests.test_get_covariance_returns(get_covariance_returns)
```
### View Data
Let's look at the covariance generated from `get_covariance_returns`.
```
covariance_returns = get_covariance_returns(returns)
covariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns)
covariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns))))
covariance_returns_correlation = pd.DataFrame(
covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation),
covariance_returns.index,
covariance_returns.columns)
project_helper.plot_covariance_returns_correlation(
covariance_returns_correlation,
'Covariance Returns Correlation Matrix')
```
### portfolio variance
We can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$
Recall that the $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form.
We can use the cvxpy function `quad_form(x,P)` to get the quadratic form.
### Distance from index weights
We want portfolio weights that track the index closely. So we want to minimize the distance between them.
Recall from the Pythagorean theorem that you can get the distance between two points in an x,y plane by adding the square of the x and y distances and taking the square root. Extending this to any number of dimensions is called the L2 norm. So: $\sqrt{\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\left \| \mathbf{x} - \mathbf{index} \right \|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.html#norm)
`norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights.
### objective function
We want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights.
We also want to choose a `scale` constant, which is $\lambda$ in the expression.
$\mathbf{x^T} \mathbf{P} \mathbf{x} + \lambda \left \| \mathbf{x} - \mathbf{index} \right \|_2$
This lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\lambda$).
We can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function?
### constraints
We can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`.
### optimization
So now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$.
cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object.
The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio.
It also updates the vector $\mathbf{x}$.
We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value`
```
import cvxpy as cvx
def get_optimal_weights(covariance_returns, index_weights, scale=2.0):
"""
Find the optimal weights.
Parameters
----------
covariance_returns : 2 dimensional Ndarray
The covariance of the returns
index_weights : Pandas Series
Index weights for all tickers at a period in time
scale : int
The penalty factor for weights the deviate from the index
Returns
-------
x : 1 dimensional Ndarray
The solution for x
"""
assert len(covariance_returns.shape) == 2
assert len(index_weights.shape) == 1
assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0]
# number of stocks m is number of rows of returns, and also number of index weights
m = covariance_returns.shape[1]
# x variables (to be found with optimization)
x = cvx.Variable(m)
#portfolio variance, in quadratic form
portfolio_variance = cvx.quad_form(x,covariance_returns)
# euclidean distance (L2 norm) between portfolio and index weights
distance_to_index = cvx.norm(x-index_weights,p=2,axis=0)
#objective function
objective = cvx.Minimize(portfolio_variance + scale * distance_to_index)
#constraints
constraints = [x >= 0, sum(x) == 1]
#use cvxpy to solve the objective
problem = cvx.Problem(objective=objective, constraints=constraints)
problem.solve()
#retrieve the weights of the optimized portfolio
x_values = x.value
return x_values
project_tests.test_get_optimal_weights(get_optimal_weights)
```
## Optimized Portfolio
Using the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time.
```
raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, index_weights.iloc[-1])
optimal_single_rebalance_etf_weights = pd.DataFrame(
np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)),
returns.index,
returns.columns)
```
With our ETF weights built, let's compare it to the index. Run the next cell to calculate the ETF returns and compare it to the index returns.
```
optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(optim_etf_returns, 1))
print('Optimized ETF Tracking Error: {}'.format(optim_etf_tracking_error))
```
## Rebalance Portfolio Over Time
The single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Let's rebalance the portfolio over the same period instead of using the same weights. Implement `rebalance_portfolio` to rebalance a portfolio.
Reblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, you should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, compute the optoimal weights using `get_optimal_weights` and `get_covariance_returns`.
```
def rebalance_portfolio(returns, index_weights, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
index_weights : DataFrame
Index weight for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
Returns
-------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
"""
assert returns.index.equals(index_weights.index)
assert returns.columns.equals(index_weights.columns)
assert shift_size > 0
assert chunk_size >= 0
#TODO: Implement function
weights_return = []
for i in range(chunk_size, len(returns), shift_size):
start_date = i - chunk_size
if returns.iloc[start_date:i,].shape[0] - chunk_size < 0:
continue
optimal_weights = get_optimal_weights(get_covariance_returns(returns.iloc[start_date:i,]), index_weights.iloc[i-1,])
weights_return.append(optimal_weights)
return weights_return
project_tests.test_rebalance_portfolio(rebalance_portfolio)
```
Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
```
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, index_weights, shift_size, chunk_size)
```
## Portfolio Turnover
With the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_portfolio_turnover` to calculate the annual portfolio turnover. We'll be using the formulas used in the classroom:
$ AnnualizedTurnover =\frac{SumTotalTurnover}{NumberOfRebalanceEvents} * NumberofRebalanceEventsPerYear $
$ SumTotalTurnover =\sum_{t,n}{\left | x_{t,n} - x_{t+1,n} \right |} $ Where $ x_{t,n} $ are the weights at time $ t $ for equity $ n $.
$ SumTotalTurnover $ is just a different way of writing $ \sum \left | x_{t_1,n} - x_{t_2,n} \right | $
```
def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252):
"""
Calculage portfolio turnover.
Parameters
----------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
n_trading_days_in_year: int
Number of trading days in a year
Returns
-------
portfolio_turnover : float
The portfolio turnover
"""
assert shift_size > 0
assert rebalance_count > 0
#TODO: Implement function
return None
project_tests.test_get_portfolio_turnover(get_portfolio_turnover)
```
Run the following cell to get the portfolio turnover from `get_portfolio turnover`.
```
print(get_portfolio_turnover(all_rebalance_weights, shift_size, len(all_rebalance_weights) - 1))
```
That's it! You've built a smart beta portfolio in part 1 and did portfolio optimization in part 2. You can now submit your project.
## Submission
Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
| github_jupyter |
```
import imp
imp.reload(lamb.parsing)
reload_lamb()
```
# Continuations and scope
## Notebook author: Kyle Rawlins
Based on Chris Barker, "[Continuations and the Nature of Quantification](http://www.semanticsarchive.net/Archive/902ad5f7/barker.continuations.pdf)", Natural Language Semantics, 2002.
This notebook implements a version of Barker's account of scope using continuations. Barker develops a semantic account of quantifier scope that does not rely at all on LF manipulations etc., but rather complicates the types of lexical items and composition rules in a systematic way to make them more general. This notebook develops the analysis in roughly the sequence found in Barker's paper; first I show how to continuize ordinary meanings, and how to write a quantifier in this framework. I then turn to scope ambiguities, scope freezing, and finally inverse linking / QDP-within-QDP cases.
Initial note: because I do not use rule-to-rule translation, and instead use general composition rules, the predictions of the system in terms of possible composition paths are somewhat closer to those developed in Yusuke Kubota and Wataru Uegaki, "[Continuation-based semantics for Conventional Implicatures: The case of Japanese benefactives](http://elanguage.net/journals/salt/article/view/19.18)", Proceedings of SALT 19, 2009. Basically, a system with general rules tends to overgenerate possible composition paths that converge on the same meaning; as far as I am aware the resultant S-level meanings (filtered to be the appropriate type) are the same as Barker's system. I will note several places where this is relevant.
Continuations are notoriously hard to explain, and I will do so here mainly by example. I recommend using the notebook to inspect objects that are mysterious, check on the details of derivations, etc.
An ordinary meaning can be continuized by turning it into a function from its continuation to itself applied as an argument to the continuation. The continuation represents something like the "next state" of the computation. The continuation of X is always a function from X to type t; the output type is because all derivations (by assumption) end in ordinary type t.
The effect is perhaps easiest to see when continuizing type t, exemplified by `raining1` (the proposition that it is raining) and `raining2` (its continuized form) below. The continuation of something of ordinary type `t` is a function that maps that type to the output type, namely `t`. Crucially, if the identity function for `<t,t>` is combined via function application with a continuized denotation of this type, we always get back its ordinary denotation. This is always true of the sentence-level meanings, which I assume to be of type `<<t,t>,t>` following Barker. The identity function can be thought of as an empty context of sorts.
Two more examples for type e, and type `<e,t>`, are given as well.
```
%%lamb
||raining1|| = Raining_t # reminder: hit shift-enter to run
||raining2|| = L f_<t,t> : f(Raining_t)
||john1|| = John_e
||john2|| = L f_<e,t> : f(John_e)
||cat1|| = L x_e : Cat(x)
||cat2|| = L f_<<e,t>,t> : f(L x_e : Cat(x))
```
Mapping any ordinary denotation to its continuation is relatively straightforward at a mechanical level, at least. `econt` below illustrates a combinator that does this for type e.
```
%%lamb
econt = L x_e : L f_<e,t> : f(x)
econt(john1.content).reduce_all() # we get back the content of john2
```
The following function generalizes this; given any type it constructs the continuization combinator for that type. This is illustrated using `cat1`.
```
%%lamb
continuize = L x_X : L f_<X,t> : f(x)
```
This function could be used for e.g. constructing a typeshift or unary composition operation (as in the Kubota and Uegaki system). Here I will use it slightly differently, to construct a 'lexical transform' that can be applied in metalanguage definitions. This is indicated by the =<> notation below.
```
def continuize_lex(te):
new_te = continuize(te).reduce_all()
return new_te
#continuize_lex(cat1.content)
lamb.parsing.eq_transforms["cont"] = continuize_lex
%%lamb
||cat2|| =<cont> L x_e : Cat(x)
||dance2|| =<cont> L x_e : Dance(x)
||john2|| =<cont> John_e
||the|| =<cont> L f_<e,t> : Iota x_e : f(x)
```
While in some cases standard FA can actually work and even produce effectively the right result, this isn't really what we want to do. Note for example that given the above types, and regular function application, there would be no way to compose `the` with `cat`.
A continuized apply is somewhat complicated and easiest to see through working examples. It needs to, effectively, sequence continuations. The following example is a combinator that continuizes just the composition of a property (ordinary type `<e,t>`) with its argument. `b` is roughly the decontinuized version of the function, and `c` the decontinuized version of the argument; `abar` is the continuation argument for the whole expression. Below this I illustrate this with `dance` and `john`, with some of the internal steps of the derivation revealed.
```
%%lamb
cfaet = L f_<<<e,t>,t>,t> : L arg_<<e,t>,t> : L abar_<t,t> : f(L b_<e,t> : arg(L c_e : abar(b(c))))
(cfaet(dance2.content)(john2.content)).reduce_all().derivation
```
To build a compositional rule along these lines we can generate a combinator like this at arbitrary values of function type and argument type. The following combinator looks quite complicated, but the heart of it is `b(c)`. `b` is the decontinuized version of the function `f`, and `c` is the decontinuized version of the argument `arg`.
*Note*: Barker implements application as a set of rule-to-rule (i.e. category specific) operations. I'm not personally a fan of this style (and in any case the lambda notebook doesn't currently have the infrastructure) so I will implement things via general composition rules. This is important to keep in mind, since the implementation here overgenerates quite a bit in consequence. This is very similar to the Kubota and Uegaki system mentioned in the intro.
```
%%lamb
contapply = L f_Z : L arg_Z1 : L abar_Z2 : f(L b_<X,X1> : arg(L c_X : abar(b_<X,X1>(c_X))))
```
Here are a few examples of generated combinators in action.
```
contapply(cat2.content)(john2.content)
contapply(cat2.content)(john2.content).reduce_all()
contapply(continuize(cat1.content))(continuize(john1.content)).reduce_all()
contapply(the.content)(cat2.content)
contapply(the.content)(cat2.content).reduce_all()
```
Add this operation as a composition operation named `CA`. Below this are a few examples of the rule in action.
```
system = lang.td_system.copy()
system.remove_rule("FA")
system.remove_rule("PA")
system.remove_rule("PM")
#system.add_rule(ca_op)
system.add_binary_rule(contapply, "CA")
lang.set_system(system)
system
(john2 * dance2).tree()
%%lamb
||saw|| =<cont> L y_e : L x_e : Saw(x,y)
||mary|| =<cont> Mary_e
john2 * (saw * mary)
(john2 * (saw * mary)).tree()
```
### Quantification
At this point it is time to turn to quantifiers. Items like `everyone` have continuized types, but are not generated by continuizing an ordinary meaning. Rather, they are written as continuized types that manipulate their continuations. In fact, their standard GQ entry is their continuized entry. For comparison, a continuized version of ordinary "everyone" is given as `everyone0`. (All of these ignore animacy.) While these two entries for `everyone` get the same result in subject position (shown below), they do it in different ways.
```
%%lamb
||someone|| = L xbar_<e,t> : Exists x_e : xbar(x)
||everyone|| = L xbar_<e,t> : Forall x_e : xbar(x)
||everyone0|| =<cont> L f_<e,t> : Forall x_e : f(x)
everyone * (saw * mary)
everyone0 * (saw * mary)
```
`everyone0` will not work in object position (as in standard approaches cf. Heim and Kratzer), but the Barker's versions will, effectively for "free" (once the infrastructure is accepted). The first example shows what happens with the continuized ordinary generalized quantifer; the resulting errors are generated inside the two possible continuized apply combinators. The other examples demonstrate Barker's quantifiers in object position.
```
(saw * everyone0)
mary * (saw * everyone)
everyone * (saw * someone)
```
Multiple quantifiers work as well, shown above. This generates inverse scope; we will attend to surface scope shortly. I started with inverse scope because Barker does, but I'm not aware of any real significance to this choice.
To get surface scope, we need a second version of application which prioritizes continuations differently.
```
%%lamb
contapply2 = L f_Z : L arg_Z1 : L abar_Z2 : arg(L c_X : f(L b_<X,X1>: abar(b_<X,X1>(c_X))))
system = lang.td_system.copy()
system.remove_rule("FA")
system.remove_rule("PA")
system.remove_rule("PM")
system.add_binary_rule(contapply, "CA")
system.add_binary_rule(contapply2, "CA2")
lang.set_system(system)
system
```
And on to the interesting stuff. In general each of these rules can apply to any continuized type, so now we do overgenerate the same result; the lambda notebook will collapse these because they are equivalent. (Barker overgenerates less, because of his use of rule-to-rule translation.) All the results are right, so this isn't really a problem. When there are multiple quantifiers, we generate both readings.
```
everyone * (saw * mary)
everyone * (saw * someone)
(someone * (saw * everyone))
```
If you're wondering about the overgeneration, this can be inspected in a number of ways, but the easiest here is with a tree. The tree will show alternative composition operations that lead to the same result. For the inverse scope reading, either CA or CA2 can apply to compose the verb with the object:
```
(someone * (saw * everyone))[1].tree()
```
What about quantificational determiners? Barker presents two treatments. In the text, regular FA is allowed to combine a determiner with its complement (in a rule-to-rule fashion). In the appendix, a different general treatment not requiring FA, but using choice functions, is presented. For the moment I stick to the text version, and allow FA as a general possibility (so I will overgenerate more than Barker did, a la Kubota and Uegaki). Later we'll need the choice function version.
```
%%lamb
||every|| = L pbar_<<<e,t>,t>,t> : L xbar_<e,t> : pbar(L f_<e,t> : Forall x_e : (f(x) >> xbar(x)))
system = lang.td_system.copy()
#system.remove_rule("FA")
system.remove_rule("PA")
system.remove_rule("PM")
system.add_binary_rule(contapply, "CA")
system.add_binary_rule(contapply2, "CA2")
lang.set_system(system)
def tfilter_fun(i):
return (i.type == lang.tp("<<t,t>,t>"))
tfilter = lang.CRFilter("S-filter", tfilter_fun)
```
The latter part of the above box prunes down on a bit of overgeneration; any derivation that does not result in a sentence type (e.g. `<<t,t>,t>` in a continuized setting) is eliminated.
Another form of overgeneration is that we can see that the CA rule can also apply in D-N combination, though the derivation resulting from this choice won't converge so it doesn't especially matter.
```
(every * cat2).tree()
r = (every * cat2) * (saw * someone)
tfilter(r)
r[1].tree()
```
### Scope bounding
In order to make tensed S nodes a scope island, Barker provides a different composition rule for S$\rightarrow$NP VP nodes that blocks continuation passing. In a setting with generalized composition rules, this needs to be done a bit differently. One can define an operator that performs the same function as this rule, and this operator might be lexically instantiated by e.g. `that`. I've named this operator `Disrupt`:
```
%%lamb
||Disrupt|| = L s_<<t,t>,t> : L abar_<t,t> : abar(s(L p_t : p))
```
It is actually somewhat hard to find examples that really use this in an extensional setting with no binding/traces; Barker provides conjunction. I'm not (currently) treating conjunction in this fragment so we must search for something a little different. One relevant case is the simplest analysis of embedding predicates like `it is false that` as negation. Empirically, quantifiers cannot scope over this, though they can scope over ordinary negation.
1. It is not true that some student left. ($\neg >> \exists$)
2. Some student didn't leave. ($\exists >> \neg$, or marginally $\neg >> \exists$)
```
%%lamb
||iift|| =<cont> L p_t : ~p
```
We can first see that Disrupt has a non-substantive but visible effect on simple quantificational sentences. In one case, the quantifier scopes under the continuation, in the other case, over.
```
tfilter(Disrupt * (someone * dance2))
tfilter(someone * dance2)
```
This now does something interesting when `iift` (_it is false that_) composes with or without Disrupt:
```
tfilter(iift * (someone * dance2))
r2 = tfilter(iift * (Disrupt * (someone * dance2)))
r2
```
Negation in these derivations is scopally inert, and quantifiers scope over it. In fact this reveals an interesting property of this system: whatever items are scopally potent must scope over whatever material is scopally inert, up to operators like Disrupt or other scopal elements.
If you think about scope as a form of '_projective meaning_', then this means that in a framework like the present with only _at-issue_ meaning as the other choice, scopal elements project maximally. This fact was exploited by Kubota and Uegaki in using continuations for Potts-style Conventional Implicature (CI) content, which should project maximally. It is worth noting, however, that as far as I can see scope is _not_ CI content as such, and we would certainly not want scopal ambiguities to interact with projective meanings in the way that combining the two uses of continuations would involve. (At the least, tensed finite clauses are not generally a scope island for CI content.) In summary, it is currently unclear (to me) that continuations can be used as a _general_ mechanism for projective meaning displacement in a compositional semantics, because there are different empirical properties for different types of projective meaning; I don't yet see how it could account for both quantifier scope and CI projection at the same time.
Moving on, one can generate a non-scopally-inert negation by using a similar trick to what was performed on the quantifiers, i.e. applying the content of the item after the continuation. This is shown below:
```
%%lamb
||sneg|| = L f_<<t,t>,t> : ~ f(L p_t : p)
```
Now negation of this form must take widest form w.r.t. scopally inert elements, but will scope ambiguously w.r.t. scopal elements.
```
tfilter(sneg * (someone * dance2))
```
### Recursive DPs and inverse linking
The LF analysis of scope has in general had a hard time with the scope of DPs within DPs. First, in many cases the judgments aren't very clear. But to the extent that they are clear, one must block some of the readings. Barker's proposal is that the possible readings just fall out of the architecture of continuation-based scope. Let's see how this works.
```
%%lamb
||no|| = L pbar_<<<e,t>,t>,t> : L xbar_<e,t> : pbar(L f_<e,t> : (~ (Exists x_e : (f(x) & xbar(x)))))
||a|| = L pbar_<<<e,t>,t>,t> : L xbar_<e,t> : pbar(L f_<e,t> : (Exists x_e : (f(x) & xbar(x))))
||fromP|| =<cont> L x_e : L f_<e,t> : L y_e : f(y) & From(y,x)
||france|| =<cont> France_e
||fcountry|| =<cont> L x_e : ForeignCountry(x)
```
Just as a sanity check, something like "no cat from france danced" should compose as is, and generate one scoping. This works!
```
tfilter((no * (cat2 * (fromP * france))) * dance2)
```
Now the real test: what happens with
1. No cat from a foreign country danced.
2. No cat from a foreign country saw someone.
```
r = tfilter((no * (cat2 * (fromP * (a * fcountry)))) * dance2)
r
```
Failure! Only one scoping is generated. What's going on?
It turns out that the hack/presentational strategy used in the body of Barker's paper to combine NPs with Ds via regular function application doesn't allow for inverse scoping. This is unsurprising when you think about it as this operation's effect on scope is a bit like what happens with `Disrupt`: continuations are effectively trapped.
We'll have to move to the version in Barker's appendix. Before doing that, you may want to inspect what composition paths are being found, by looking at `r`.
```
r[0].tree()
```
In the appendix, Barker treats quantificational determiners as quantifiers over choice functions of type `<<e,t>,e>`, that is, functions that map properties into individuals. It turns out, somewhat magically at first glance, that in consequence quantificational determiners are the right type to compose with property-denoting sisters via `CA` or `CA2` and generate a GQ type. This requires staring at the derivation for a while but the basic idea is that the continuation of a GQ should be type `<e,t>`, and so its input needs to be type `e`, and a choice function for property types bridges this need with the continuized-property-denoting sister.
Strictly speaking, one should do this for `everyone` and `someone` as well, but it is pedagogically simpler not to, so I'll leave them as is.
```
%%lamb
||a|| = L dbar_<<<e,t>,e>,t> : Exists f_<<e,t>,e> : dbar(f)
||no|| = L dbar_<<<e,t>,e>,t> : ~(Exists f_<<e,t>,e> : dbar(f))
||every|| = L dbar_<<<e,t>,e>,t> : (Forall f_<<e,t>,e> : dbar(f))
every * cat2
(every * cat2)[0].tree(derivations=True)
```
It is worth pausing for a moment to contemplate the way this works in the above derivation.
With this framework we can return to our set of inverse linking examples. Note that some of these can be extremely slow to render and we are getting an exponential explosion because CA and CA2 can both apply at any stage; this can be filtered if necessary using the `eliminate_dups` function on `CompositionResult`s, but it is instructive to see all composition paths so it is not done by default.
```
tfilter((no * (cat2 * (fromP * france))) * dance2)
r = tfilter((no * (cat2 * (fromP * (a * fcountry)))) * dance2)
r
```
Three quantifiers:
```
r[0].tree()
r2 = tfilter((no * (cat2 * (fromP * (a * fcountry)))) * (saw * everyone))
r2
```
We have:
* 0: everyone >> no cat >> a foreign country
* 1: no cat >> a foreign country >> everyone
* 2: everyone >> a foreign country >> no cat
* 3: a foreign country >> no cat >> everyone
What is missing?
* no cat >> everyone >> a foreign country
* a foreign country >> everyone >> no cat
These are exactly the cases where something would split the scope of the two quantifiers in the subject DP, and empirically, these readings are supposed to be absent in general (this observation is due to May). This demonstrates what Barker calls the _Integrity Constraint_, which is that when scopal elements form a constituent together, they can't scope independently w.r.t. scopal elements outside that constituent. Intuitively, their scope is compositionally determined inside the smallest constituent they are a member of, and can't be changed or interrupted after that.
Arguably, the derivation of this constraint from extremely general principles is the most important feature of this analysis.
This concludes this fragment; I have left out discussion of conjunction, and of the mathematical parts of Barker's paper.
| github_jupyter |
```
import xshinnosuke as xs
```
# Simple Add Multiply, etc
```
xs.seed(0)
a = xs.randn(3, 4, requires_grad=True)
b = xs.ones(4, 2, requires_grad=True)
c = xs.randn(1, 3, requires_grad=True)
y = xs.tensor(10, requires_grad=True)
# (3, 2) = (3, 4) * (4, 2)
x1 = a * b
# (2, 3) = (2, 3) * (1, 3)
x2 = x1.t() * c
# (2, )
x3 = x2.mean(axis=1)
x4 = x3.sum()
loss = (x4 - y) ** 2
print(loss)
x4.retain_grad()
x3.retain_grad()
x2.retain_grad()
x1.retain_grad()
loss.backward()
print(x4.grad)
print(x3.grad)
print(x2.grad)
print(x1.grad)
```
# Fully Connected Demo
```
from xshinnosuke.layers import Dense
xs.seed(0)
inputs = xs.randn(1, 5, requires_grad=True)
fc_layer = Dense(out_features=2)
pred1 = fc_layer(inputs)
loss1 = pred1.sum()
print('loss1: ', loss1)
loss1.backward()
print('inputs grad: ', inputs.grad)
# empty inputs' gradient
inputs.zero_grad()
# get the same parameters of Dense
weight, bias = fc_layer.variables
# manually apply fully connection operation
pred2 = inputs.dot(weight) + bias
loss2 = pred2.sum()
print('loss2: ', loss2)
loss2.backward()
print('inputs grad: ', inputs.grad)
```
# Convolutional Demo
```
from xshinnosuke.layers import Conv2D
# (batch, channels, height, width)
inputs = xs.randn(1, 1, 5, 5, requires_grad=True)
kernel_size = 3
conv_layer = Conv2D(out_channels=1, kernel_size=kernel_size, use_bias=False)
pred1 = conv_layer(inputs)
loss1 = pred1.mean()
print('loss1: ', loss1)
loss1.backward()
print('inputs grad: ', inputs.grad)
inputs.zero_grad()
weight, _ = conv_layer.variables
pred2 = xs.zeros(3, 3, requires_grad=True)
for i in range(3):
for j in range(3):
pred2[i, j] = (inputs[:, :, i: i + kernel_size, j: j + kernel_size] * weight).sum()
loss2 = pred2.mean()
print('loss2: ', loss2)
loss2.backward()
print('inputs grad: ', inputs.grad)
```
# Pooling Demo
```
from xshinnosuke.layers import MaxPooling2D
# (batch, channels, height, width)
inputs = xs.randn(1, 1, 4, 4, requires_grad=True)
kernel_size = 2
stride = 2
pool_layer = MaxPooling2D(kernel_size=kernel_size, stride=stride)
pred1 = pool_layer(inputs)
loss1 = pred1.mean()
print('loss1: ', loss1)
loss1.backward()
print('inputs grad: ', inputs.grad)
inputs.zero_grad()
pred2 = xs.zeros(2, 2, requires_grad=True)
for i in range(2):
for j in range(2):
h_start = i * stride
h_end = h_start + kernel_size
w_start = j * stride
w_end = w_start + kernel_size
pred2[i, j] = inputs[:, :, h_start: h_end, w_start: w_end].max()
loss2 = pred2.mean()
print('loss2: ', loss2)
loss2.backward()
print('inputs grad: ', inputs.grad)
```
| github_jupyter |
# Deep Convolutional Generative Adversarial Network
**Learning Objectives**
- Build a GAN architecture (consisting of a generator and discriminator) in Keras
- Define the loss for the generator and discriminator
- Define a training step for the GAN using `tf.GradientTape()` and `@tf.function`
- Train the GAN on the MNIST dataset
## Introduction
This notebook demonstrates how to build and train a [Generative Adversarial Network](https://arxiv.org/abs/1406.2661) (GAN) to generate images of handwritten digits using a [Deep Convolutional Generative Adversarial Network](https://arxiv.org/pdf/1511.06434.pdf) (DCGAN).
GANs consist of two models which are trained simultaneously through an adversarial process. A *generator* ("the artist") learns to create images that look real, while a *discriminator* ("the art critic") learns to tell real images apart from fakes.

During training, the *generator* progressively becomes better at creating images that look real, while the *discriminator* becomes better at recognizing fake images. The process reaches equilibrium when the *discriminator* can no longer distinguish real images from fakes.

In this notebook we'll build a GAN to generate MNIST digits. This notebook demonstrates this process on the MNIST dataset. The following animation shows a series of images produced by the *generator* as it was trained for 50 epochs. The images begin as random noise, and increasingly resemble hand written digits over time.

## Import TensorFlow and other libraries
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
tf.__version__
# To generate GIFs
!python3 -m pip install -q imageio
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
```
## Load and prepare the dataset
For this notebook, we will use the MNIST dataset to train the generator and the discriminator. The generator will generate handwritten digits resembling the MNIST data.
```
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
BUFFER_SIZE = 60000
BATCH_SIZE = 256
```
Next, we define our input pipeline using `tf.data`. The pipeline below reads in `train_images` as tensor slices and then shuffles and batches the examples for training.
```
# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(train_images)
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
```
## Create the generator and discriminator models
Both our generator and discriminator models will be defined using the [Keras Sequential API](https://www.tensorflow.org/guide/keras#sequential_model).
### The Generator
The generator uses [tf.keras.layers.Conv2DTranspose](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2DTranspose) (upsampling) layers to produce an image from a seed (random noise). We will start with a `Dense` layer that takes this seed as input, then upsample several times until you reach the desired image size of 28x28x1.
**Exercise.** Complete the code below to create the generator model. Start with a dense layer that takes as input random noise. We will create random noise using `tf.random.normal([1, 100])`. Use `tf.keras.layers.Conv2DTranspose` over multiple layers to upsample the random noise from dimension 100 to ultimately dimension 28x28x1 (the shape of our original MNIST digits).
Hint: Experiment with using `BatchNormalization` or different activation functions like `LeakyReLU`.
```
#TODO 1
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
```
Let's use the (as yet untrained) generator to create an image.
```
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
```
### The Discriminator
Next, we will build the discriminator. The discriminator is a CNN-based image classifier. It should take in an image of shape 28x28x1 and return a single classification indicating if that image is real or not.
**Exercise.** Complete the code below to create the CNN-based discriminator model. Your model should be binary classifier which takes as input a tensor of shape 28x28x1. Experiment with different stacks of convolutions, activation functions, and/or dropout.
```
#TODO 1.
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
assert model.output_shape == (None, 1)
return model
```
Using `.summary()` we can have a high-level summary of the generator and discriminator models.
```
make_generator_model().summary()
make_discriminator_model().summary()
```
Let's use the (as yet untrained) discriminator to classify the generated images as real or fake. The model will be trained to output positive values for real images, and negative values for fake images.
```
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print(decision)
```
## Define the loss and optimizers
Next, we will define the loss functions and optimizers for both the generator and discriminaotr models. Both the generator and discriminator will use the `BinaryCrossentropy`.
```
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
```
### Discriminator loss
The method below quantifies how well the discriminator is able to distinguish real images from fakes.
Recall, when training the discriminator (i.e. holding the generator fixed) the loss function has two parts: the loss when sampling from the real data and the loss when sampling from the fake data. The function below compares the discriminator's predictions on real images to an array of 1s, and the discriminator's predictions on fake (generated) images to an array of 0s.
**Exercise.**
Complete the code in the method below. The `real_loss` should return the cross-entropy for the discriminator's predictions on real images and the `fake_loss` should return the cross-entropy for the discriminator's predictions on fake images.
```
#TODO 2
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
```
### Generator loss
The generator's loss quantifies how well it was able to trick the discriminator. Intuitively, if the generator is performing well, the discriminator will classify the fake images as real (or 1). Here, we will compare the discriminators decisions on the generated images to an array of 1s.
**Exercise.**
Complete the code to return the cross-entropy loss of the generator's output.
```
#TODO 2
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
```
### Optimizers for the generator and discriminator
Note that we must define two separate optimizers for the discriminator and the generator optimizers since we will train two networks separately.
```
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
```
### Save checkpoints
This notebook also demonstrates how to save and restore models, which can be helpful in case a long running training task is interrupted.
```
checkpoint_dir = "./gan_training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
```
## Define the training loop
Next, we define the training loop for training our GAN. Below we set up global variables for training.
```
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
```
The training loop begins with generator receiving a random seed as input. That seed is used to produce an image. The discriminator is then used to classify real images (drawn from the training set) and fakes images (produced by the generator). The loss is calculated for each of these models, and the gradients are used to update the generator and discriminator.
**Exercise.**
Complete the code below to define the training loop for our GAN. Notice the use of `tf.function` below. This annotation causes the function `train_step` to be "compiled". The `train_step` function takes as input a batch of images. In the rest of the function,
- `generated_images` is created using the `generator` function with `noise` as input
- apply the discriminator model to the `images` and `generated_images` to create the `real_output` and `fake_output` (resp.)
- define the `gen_loss` and `disc_loss` using the methods you defined above.
- compute the gradients of the generator and the discriminator using `gen_tape` and `disc_tape` (resp.)
Lastly, we use the `.apply_gradients` method to make a gradient step for the `generator_optimizer` and `discriminator_optimizer`
```
# TODO 3
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(
gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables))
```
We use the `train_step` function above to define training of our GAN. Note here, the `train` function takes as argument the `tf.data` dataset and the number of epochs for training.
```
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(
epoch + 1, time.time()-start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
```
**Generate and save images.**
We'll use a small helper function to generate images and save them.
```
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5,
cmap='gray')
plt.axis('off')
plt.savefig('./gan_images/image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
```
## Train the model
Call the `train()` method defined above to train the generator and discriminator simultaneously. Note, training GANs can be tricky. It's important that the generator and discriminator do not overpower each other (e.g., that they train at a similar rate).
At the beginning of the training, the generated images look like random noise. As training progresses, the generated digits will look increasingly real. After about 50 epochs, they resemble MNIST digits. This may take about one ot two minutes / epoch.
```
!test -d ./gan_images || mkdir ./gan_images/
# TODO 4
train(train_dataset, EPOCHS)
```
Restore the latest checkpoint.
```
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
```
## Create a GIF
Lastly, we'll create a gif that shows the progression of our produced images through training.
```
# Display a single image using the epoch number
def display_image(epoch_no):
return PIL.Image.open('./gan_images/image_at_epoch_{:04d}.png'.format(epoch_no))
display_image(EPOCHS)
```
Use `imageio` to create an animated gif using the images saved during training.
```
anim_file = 'dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('./gan_images/image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
if IPython.version_info > (6,2,0,''):
display.Image(filename=anim_file)
```
## Next steps
This tutorial has shown the complete code necessary to write and train a GAN. As a next step, you might like to experiment with a different dataset, for example the Large-scale Celeb Faces Attributes (CelebA) dataset [available on Kaggle](https://www.kaggle.com/jessicali9530/celeba-dataset). To learn more about GANs we recommend the [NIPS 2016 Tutorial: Generative Adversarial Networks](https://arxiv.org/abs/1701.00160).
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
```
import pandas as pd
import re
from orv_cleanup_utils import *
import numpy as np
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', None)
data_dir = '/home/common/regulation_data/parsed_reports/'
main_df = pd.read_csv(data_dir + 'main_df.csv')
otm_tables_fnames = sorted([fn for fn in os.listdir(data_dir) if not fn.startswith('main')])
otm_tables = {fn[:-4]: pd.read_csv(data_dir + fn) for fn in otm_tables_fnames}
otm_tables.keys()
```
## Очистка и нормализация заполнений
```
def is_empty(text):
if text.replace('\r', '').replace('\n', '').strip() == '' \
or text == '' or text == 'nan':
return True
return False
def is_hyphens(text):
if text.replace('-', '').strip() == '':
return True
return False
def is_underscores(text):
if text.replace('_', '').strip() == '':
return True
return False
def is_junk(text):
return is_empty(text) or is_hyphens(text) or is_underscores(text)
def clean_up(text):
text = str(text)
text = text.strip()
text = text.lower()
text = text.replace('(место для текстового описания)', '')
if is_junk(text):
return 0
text = text.rstrip('.')
text = text.replace('«', '"').replace('»', '"')
text = text.replace('\t', '').replace('\r\n', '')
return text
from rnnmorph.predictor import RNNMorphPredictor
predictor = RNNMorphPredictor(language="ru")
def use_yandexspeller(bad_str):
"""исправление орфографических ошибок с помощью Яндекс.Спеллер"""
post_str = re.sub(r'[^\w|\d]', '', bad_str.lower())
if not post_str:
return None
with requests.Session() as s:
url = 'https://speller.yandex.net/services/spellservice.json/checkText?text='
r = s.get(url + post_str)
if r.status_code != 200:
print(f'GET error: {r.status_code}')
return None
jsn = r.content.decode('unicode-escape')
jsn_dict = json.loads(jsn)
if not jsn_dict:
return bad_str
ya_answer = jsn_dict[0]['s']
if ya_answer:
return ya_answer[0]
else:
return bad_str
def tokenize(sentence):
sentence = re.sub('\(', ',', sentence)
sentence = re.sub('\)', '', sentence)
sentence = re.sub('(?<! )(?=[.,!?()])|(?<=[.,!?()])(?! )', r'', sentence)
words = sentence.split()
return words
def tag(sentence):
words = tokenize(sentence)
tagged_words = predictor.predict(words)
return tagged_words
def preparse(sentence):
tagged_words = tag(sentence)
degree_re = re.compile('(.*)\|Degree=\w+(.*)')
gender_re = re.compile('(.*)\|Gender=\w+(.*)')
res = ''
for word in tagged_words:
word_tag = word.tag
if 'Degree' in word_tag:
word_tag = degree_re.sub(r"\1\2", word_tag)
word_pos = 'pos=' + word.pos#.lower().capitalize()
if 'PUNCT' in word_pos:
word_pos = ''
if 'ADJ' in word_pos:
if 'Gender' in word_tag:
word_tag = gender_re.sub(r"\1\2", word_tag)
word_tags = [
word.normal_form,
word_pos,
word_tag.replace('|', ' ').replace('_', ''),
]
res += ' ' + ' '.join([tag for tag in word_tags if tag])
res = res.strip().replace(' ', ' ')
return res
def norm (sentence):
if sentence is None:
return
tagged_words = tag(sentence)
return ' '.join(word.normal_form for word in tagged_words)
sup_date = main_df['supposed_date: Предполагаемая дата вступления в силу проекта акта']
sup_date = sup_date.apply(clean_up)
sup_date_norm = [norm(use_yandexspeller(str(i))) for i in sup_date]
goals = otm_tables['goals']
goals_timing = goals['Установленные сроки достижения целей предлагаемого регулирования']
goals_timing = goals_timing.apply(clean_up)
goals_timing_norm = [norm(use_yandexspeller(str(i))) for i in goals_timing]
```
## Вычленение дат
Как в числовом формате ('01.01.2021', '1 января 2021', '1 квартал 2021', '2021', и тд), так и в виде периода ('в течение 180 дней со дня принятия нпа').
Для этого убираем из заполнений все прочие цифры, в том числе даты, не относящиеся к срокам достижения целей регулирования.
```
def let_to_num(s):
s = s.replace('1 один', '1').replace('6 шесть', '6')
if re.search(r'девяносто\s+день', s):
s = s.replace('девяносто','90')
if re.search(r'сто\s+восемьдесят\s+день', s):
s = s.replace('сто восемьдесят','180')
if re.search(r'шесть\s+месяц', s):
s = s.replace('шесть','6')
if re.search(r'6\s+месячный', s):
s = s.replace('месячный','месяц')
if re.search(r'пять\s+месяц', s):
s = s.replace('пять','5')
if re.search(r'триста\s+день', s):
s = s.replace('триста','300')
if re.search(r'тридцать\s+день', s):
s = s.replace('тридцать','30')
if re.search(r'пять\s+месяц', s):
s = s.replace('пять','5')
if re.search(r'десять\s+день', s):
s = s.replace('десять','10')
if re.search(r'(три\s+день)|(три\s+месяц)|(три\s+год)', s):
s = s.replace('три','3')
if re.search(r'(два\s+месяц)|(два\s+год)', s):
s = s.replace('два','2')
if re.search(r'(один\s+месяц)|(один\s+год)', s):
s = s.replace('один','1')
s = s.replace('но не ранее чем по истечение один месяц', '')
s = s.replace('1v', 'iv').replace('четвёртый', 'четвертый').replace('1 й квартал','1 квартал')
return(s)
def insert_dot (s):
regex = '(\d{2})(\d{2})(20)'
pattern = '\\1.\\2.2020'
s = re.sub(regex, pattern , s)
return (s)
def insert_dots (s):
if re.search(r'(151220)|(210220)|(010920)|(151220)|(301220)|(010820)|(311220)|(010320)|(010420)', s):
s = insert_dot(s)
return (s)
def drop_trash (s):
s = s.replace('70 лет', '').replace('70 год', '')
s = s.replace('5g', '')
s = re.sub(r'в\s+\d*\s+раз', '', s)
s = re.sub(r'\d*\s+фз', '', s)
s = re.sub(r'\d*\s+%', '', s)
s = re.sub(r'на\s+100', '', s)
s = re.sub(r'приказ\s+\d*', '', s)
s = re.sub(r'г\s+\d*', '', s)
s = re.sub(r'\d*\s+диметилгидразин', '', s)
s = re.sub(r'\d*\s+млрд', '', s)
s = re.sub(r'\d\s+с\s+', '',s )
s = re.sub(r'\d*\.\s+в', '', s)
s = s.replace('802.11ax','')
s = s.replace('"112"', '')
s = s.replace('календарный день со', '')
s = s.replace('24.01.2019 № 903п-п9', '').replace('24.01.2019 № вм-п9-8992', '')
s = re.sub(r'(ммв-\d*-\d*/\d*@/\d*)|(ммв-\d*-\d*/\d*@)', '', s)
s = s.replace('пункт 1, 7.1.1.- 7.1.4., 7.1.13 - 7.1.14, 7.1.31, 7.1.37 - 7.1.38., 7.3.3, 7.4.4, 7.19.1', '')
s = s.replace('2-й поколение', '').replace('на 3 и более поколение', '')
s = s.replace('2387-р', '')
s = s.replace('мм-3-09/536@', '')
return (s)
def norm_before_strip (s):
s = s.replace('12 год', '1-2 год').replace('35 год', '3-5 год')
s = s.replace('поздний 20 март года','').replace('не поздний 20 май года','').replace('в течение 5 день со день', '')
if re.search('(ii\sкв)|(iii\sкв)|(iv\sкв)|(\d\sкв)', s):
s = s.replace('кв','квартал')
if re.search('\d\sдень', s):
s = s.replace('день','day')
if re.search('\d\sдней', s):
s = s.replace('дней','day')
if re.search('\d\sрабочий\sдень', s):
s = s.replace('день','day')
if re.search('календарный\sдень\sс', s):
s = s.replace('день','day')
if re.search('\d\s+месяц', s):
s = s.replace('месяц','month')
if re.search(r'(\s+\d{2}\s+год)', s):
s = s.replace('год','year')
if re.search(r'(\s+\d{1}\s+год)', s):
s = s.replace('год','year')
if re.search(r'(\d{1}-\d{1}\s+год)', s):
s = s.replace('год','year')
if re.search(r'(\d{1}\s+лет)', s):
s = s.replace('лет','year')
if re.search(r'(течение\s+год)|(истечение\s+год)', s):
s = s.replace('год','1 year')
s = s.replace('10 год', '10 year')
if re.search(r'(\d{2}).(\d{2}).(\d{4})', s):
s = s.replace('/','.')
return (s)
def strip_spaces(s):
s = "".join(s.split())
return (s)
def months_num (s):
s = s.replace('январь', '.01.')
s = s.replace('февраль', '.02.')
s = s.replace('март', '.03.')
s = s.replace('апрель', '.04.')
s = s.replace('май', '.05.')
s = s.replace('июнь', '.06.')
s = s.replace('июль', '.07.')
s = s.replace('август', '.08.')
s = s.replace('сентябрь', '.09.')
s = s.replace('октябрь', '.10.').replace('октябярить','.10.')
s = s.replace('ноябрь', '.11.')
s = s.replace('декабрь', '.12.')
s = s.replace('|', '')
s = s.replace('2-й', '2')
s = re.sub(r'(ivквартал)|(4квартал)|(четвертыйквартал)|(4йквартал)', '.12.', s)
s = re.sub(r'(iiiквартал)|(3квартал)|(третийквартал)', '.09.', s)
s = re.sub(r'(iiквартал)|(2квартал)|(2ойквартал)|(второйквартал)', '.06.', s)
s = re.sub(r'(iквартал)|(1квартал)|(первыйквартал)', '.03.', s)
s = re.sub(r'(iiполугодие)|(2полугодие)|(второйполугодие)', '.12.', s)
s = re.sub(r'(iполугодие)|(1полугодие)|(первыйполугодие)', '.06.', s)
s = s.replace('1бытьполугодие','.01.')
return (s)
def drop_law_dates (s):
s = re.sub(r'от(\d{1}).(\d{2}).(\d{4})', '', s)
s = re.sub(r'от(\d{2}).(\d{2}).(\d{4})', '', s)
s = re.sub(r'(\d{2}).(\d{2}).(\d{4}г\.\№\d+)', '', s)
return (s)
def remove_law_attr (s):
s = s.lower()
s = s.replace('1-йчисло','')
s = s.replace('1гочисло','')
s = re.sub(r'закон(\d*).(\d{2}).(\d{4})', '', s)
s = re.sub(r'\№\d*-\d*', '',s)
s = re.sub(r'\№\d+', '', s)
s = re.sub(r'\d+\№', '', s)
s = s.replace('ст.','статья')
s = re.sub(r'статья\d*\.\d*', '', s)
s = re.sub(r'(статья\d*)|(\d*статья)', '', s)
s = re.sub(r'(стать\d*\.\d*)|(стать\d*)','', s)
s = re.sub(r'(пункт\d\.\d*)|(пункт\d*)||(п\.\d*)', '', s)
s = re.sub(r'(часть\d*\.\d*)|(часть\d*)', '', s)
s = re.sub(r'глава\d*', '', s)
s = re.sub(r'ставка\d*', '', s)
s = re.sub(r'\d*перечень', '', s)
s = re.sub(r'законопроект\d*', '', s)
s = re.sub(r'постановление\d*', '', s)
s = re.sub(r'\dприложение*', '', s)
s = re.sub(r'\d*кодекс', '', s)
s = re.sub(r'\d*федеральный', '', s)
s = re.sub(r'\d*"земельный','', s)
s = re.sub(r'\d*нк', '', s)
s = re.sub(r'пр-\d*', '', s)
s = re.sub(r'(\d*).(\d{2}).(\d{4})г\.\,регистрационный', '', s)
s = s.replace('гн2.2.5.18', '').replace('пб05-580-03', '').replace('рд-05-328-99', '').replace('рд-05-350-00', '')
s = s.replace('пб05-619-03', '').replace('гн 2.2.5.3532-18', '').replace('01/136009-ак','').replace('гн 2.2.5.3532-18','')
return (s)
def drop_rus (s):
s = re.findall(r'[^А-я]+', s)
s = "".join(s)
return (s)
def drop_eng (s):
s = re.sub(r'\d+[A-Za-z]+','',s)
return (s)
def extract_num (s):
s = re.findall(r'\d+', s)
s = "-".join(s)
return (s)
def extract_eng (s):
s = re.findall('\d+[A-Za-z]', s)
s = "".join(s)
return (s)
def extract_period (s):
s = re.findall(r'(\d+year)|(\d+month)|(\d+day)', s)
s = (''.join(map(lambda x: str(x[0]) + '' + str(x[1]) + '' + str(x[2]), s)))
return (s)
goals_t_num = pd.Series(goals_timing_norm)
goals_t_num = goals_t_num.apply(let_to_num)
goals_t_num = goals_t_num.apply(insert_dots)
goals_t_num = goals_t_num.apply(drop_trash)
goals_t_num = goals_t_num.apply(norm_before_strip)
goals_t_num = goals_t_num.apply(strip_spaces)
goals_t_num = goals_t_num.apply(months_num)
goals_t_num = goals_t_num.apply(drop_law_dates)
goals_t_num = goals_t_num.apply(remove_law_attr)
goals_t_num2 = goals_t_num.apply(remove_law_attr)
goals_t_num2 = goals_t_num2.apply(extract_period)
goals_t_num = goals_t_num.apply(drop_rus)
goals_t_num = goals_t_num.apply(drop_eng)
goals_t_num = goals_t_num.apply(extract_num)
sup_date = pd.Series(sup_date_norm)
sup_date = sup_date.apply(let_to_num)
sup_date = sup_date.apply(insert_dots)
sup_date = sup_date.apply(drop_trash)
sup_date = sup_date.apply(norm_before_strip)
sup_date = sup_date.apply(strip_spaces)
sup_date = sup_date.apply(months_num)
sup_date = sup_date.apply(drop_law_dates)
sup_date = sup_date.apply(remove_law_attr)
sup_date = sup_date.apply(drop_rus)
sup_date = sup_date.apply(drop_eng)
sup_date = sup_date.apply(extract_num)
sup_df = pd.DataFrame({'id': main_df['header: id'], 'sup_date_raw': main_df['supposed_date: Предполагаемая дата вступления в силу проекта акта'], 'sup_date': sup_date})
sup_df.iloc[3997, sup_df.columns.get_loc('sup_date')] = '2019'
sup_df.iloc[457, sup_df.columns.get_loc('sup_date')] = '30-06-2016'
sup_df.iloc[6048, sup_df.columns.get_loc('sup_date')] = '30-06-2019'
```
## Разделение дат и приведение к единому формату (гггг-мм-дд)
```
def clean_sup_date (s):
i = len(str(s))
if i < 3:
s = ''
return (s)
def drop_trash (s):
s = s.replace('38399', '').replace('79616', '').replace('95319', '').replace('398', '')
s = s.replace('02-04-03-19-00089665', '').replace('76953', '').replace('91271', '')
s = re.sub(r'^06-2$', '', s)
s = re.sub(r'^1-09$', '', s)
s = s.replace('02-08-04-20-00101035', '')
s = re.sub(r'^01-07-201$', '', s)
s = re.sub(r'^1-01-20126$', '', s)
s = re.sub(r'^06-2$', '', s)
s = s.replace('7-1-1-7-1-4-7-1-13-7-1-14-7-1-31-7-1-37-7-1-38-7-3-3-7-4-4-7-19-1-01-2017', '1-01-2017')
return (s)
def transform_unique (s):
s = s.replace('09-2107','09-2017').replace('08-1018','08-2018').replace('08-2919','08-2019')
s = re.sub(r'^1-01$','1-01-2018', s)
s = re.sub(r'^02017$','2017', s)
s = re.sub(r'^22015$','31-12-2015', s)
s = re.sub(r'^01-122017$','01-12-2017', s)
s = re.sub(r'^22017$','31-12-2017', s)
s = re.sub(r'^42017$','31-12-2017', s)
s = re.sub(r'^12021$','1-01-2021', s)
s = re.sub(r'^12-218$','31-12-2018', s)
s = re.sub(r'^1-01-219$','1-01-2019', s)
s = re.sub(r'^05-201$','1-05-2020', s)
s = re.sub(r'^01201$','01-01-2020', s)
s = re.sub(r'^02019$','2019', s)
s = re.sub(r'^1-01-2018206$', '1-01-2018', s)
s = re.sub(r'^07-218$', '07-2018', s)
if re.search(r'^1-20\d{2}$', s): # кварталы
s = re.sub('^1', '01-03', s)
if re.search(r'^2-20\d{2}$', s):
s = re.sub('^2', '01-06', s)
if re.search(r'^3-20\d{2}$', s):
s = re.sub('^3', '01-09', s)
if re.search(r'^4-20\d{2}$', s):
s = re.sub('^4', '01-12', s)
return (s)
def split_multiple (s):
if re.search(r'^\d{2}-\d{5}-\d*-20\d{2}$', s): # 09-20181-01-2020
s = re.sub('\d{1}-\d*-20\d{2}$', '', s)
if re.search(r'^\d{2}-\d{4}-\d{1}-\d{2}-\d{4}-\d{1}-\d{2}-\d{4}$', s): # 06-2021-1-07-2021-1-07-2021
s = re.sub('-\d{1}-\d{2}-\d{4}-\d{1}-\d{2}-\d{4}$', '', s)
if re.search(r'^\d{2}-20\d{2}-\d*-\d*-20\d{2}-\d*-20\d{2}$', s): # 06-2021-1-07-2021-07-2021
s = re.sub('-\d*-\d*-20\d{2}-\d*-20\d{2}$', '', s)
if re.search(r'^\d*-\d*-\d{4}-\d*-\d*-\d{4}-\d*-\d*-\d{4}-\d*-\d*-\d{4}$', s):
s = re.sub('-\d*-\d*-\d{4}-\d*-\d*-\d{4}-\d*-\d*-\d{4}$', '', s)
if re.search(r'^\d*-\d*-\d{4}-\d*-\d*-\d{4}$', s):
s = re.sub('-\d*-\d*-\d{4}$', '', s)
if re.search(r'^\d*-\d{4}-\d*-\d{4}$', s):
s = re.sub('-\d*-\d{4}$', '', s)
if re.search(r'^\d*-\d{4}-\d*-\d{4}$', s):
s = re.sub('^\d*-\d{4}-', '', s)
if re.search(r'^\d*-\d*-\d{6}-\d*-\d*$', s): # 1-01-201821-01-2019
s = re.sub('\d{2}-\d*-\d{4}$', '', s)
if re.search(r'^\d*-\d*-\d{5}-\d*-\d*$', s):
s = re.sub('\d{1}-\d*-\d{4}$', '', s)
if re.search(r'^20\d{2}20\d{2}', s):
s = re.sub('\d{4}$', '', s)
if re.search(r'^\d{1}-\d*-20\d{2}-\d*$', s): # 1-01-2018-2018
s = re.sub('-\d*$', '', s)
if re.search(r'^\d*-\d*-\d*-20\d{2}$', s): # 4-01-07-2020
s = re.sub('^\d*-', '', s)
if re.search(r'^\d{1}-\d*-20\d{2}20\d{2}$', s): # 1-01-20202025
s = re.sub('\d{4}$', '', s)
if re.search(r'^\d{4}-\d*-\d*-20\d{2}$', s): # 2021-1-01-2025
s = re.sub('-\d*-\d*-\d{4}$', '', s)
if re.search(r'^\d{2}-20\d{2}-\d*-\d*-20\d{2}$', s): # 12-2018-1-12-2019
s = re.sub('-\d*-\d*-\d{4}$', '', s)
if re.search(r'^\d{2}-20\d{2}20\d{2}$', s): # 12-20182019
s = re.sub('\d{4}$', '', s)
if re.search(r'^20\d{2}-20\d{2}$', s): # 2017-2018
s = re.sub('-20\d{2}$', '', s)
if re.search(r'^\d{2}-\d{2}-20\d{2}-\d{2}-20\d{2}$', s): # 30-04-2021-07-2021
s = re.sub('-\d{2}-20\d{2}$', '', s)
return (s)
def add_sep (s):
if re.search(r'^\d{4}20\d{2}$', s): # 01022016
s = s[:2] + "-" + s[2:4] + "-" + s[4:]
if re.search(r'^\d{2}20\d{2}$', s): # 012025
s = '01-' + s[:2] + "-" + s[2:]
if re.search(r'^\d{4}2\d{1}$', s): # 010220
s = s[:2] + "-" + s[2:4] + "-20" + s[4:]
if re.search(r'^0\d{1}2\d{1}$', s): # 0120
s = '01-' + s[:2] + "-" + s[2:4] + "20"
# 12-20 (?)
if re.search(r'^\d{2}-\d{2}-\d{2}$', s): # 01-04-18
s = s[:6] + "20" + s[6:]
if re.search(r'^\d{2}-\d{2}-\d{1}-\d{3}$', s): # 30-11-2-017
s = s[:7] + s[8:]
return (s)
def correct_wrong_dm (s):
s = re.sub(r'^30-02', '28-02', s)
s = re.sub(r'^31-06', '30-06', s)
s = re.sub(r'^31-09', '30-09', s)
s = re.sub(r'^31-11', '30-11', s)
return (s)
def add_start_day (s):
if re.search(r'^\d{2}-20\d{2}$', s):
s = '01-'+ s
if re.search(r'^20\d{2}$', s):
s = '01-01-'+ s
return (s)
sup_df['sup_date'] = sup_df['sup_date'].apply(clean_sup_date)
sup_df['sup_date'] = sup_df['sup_date'].apply(drop_trash)
sup_df['sup_date'] = sup_df['sup_date'].apply(transform_unique)
sup_df['sup_date'] = sup_df['sup_date'].apply(split_multiple)
sup_df['sup_date'] = sup_df['sup_date'].apply(add_sep)
sup_df['sup_date'] = sup_df['sup_date'].apply(correct_wrong_dm)
sup_df['sup_date'] = sup_df['sup_date'].apply(add_start_day)
sup_df['sup_date'] = sup_df['sup_date'].replace('', np.nan)
sup_df['sup_date'] = pd.to_datetime(sup_df['sup_date'], format='%d-%m-%Y')
def add_end_day (s):
if re.search(r'^02-20\d{2}$', s):
s = '28-'+ s
if re.search(r'^\d{2}-20\d{2}$', s):
s = '30-'+ s
if re.search(r'^20\d{2}$', s):
s = '31-12-'+ s
return (s)
def clean_goalstim (s):
i = len(str(s))
if i < 4:
s = ''
return (s)
def remove_complex (s):
s = re.sub(r'^136009$', '', s)
s = re.sub(r'^2-2-5-3532-18$', '', s)
s = re.sub(r'^2387$', '', s)
s = re.sub(r'^31-12$', '', s)
s = re.sub(r'^7-15-319-1009$', '', s)
s = re.sub(r'^3-09-536$', '', s)
s = re.sub(r'^28-06$', '', s)
s = re.sub(r'^30-30$', '', s)
s = re.sub(r'^9-8992$', '', s)
s = re.sub(r'^7-6-777$', '', s)
s = re.sub(r'^1232$', '', s)
s = re.sub(r'^24-01-2019-9-02-2019-03-2019$', '', s)
s = re.sub(r'^1-07$','1-07-2020', s)
s = re.sub(r'^31-12-202019$','31-12-2019',s)
s = re.sub(r'^1-01-20121$','1-01-2021',s)
s = re.sub(r'^2018-2017-2018$','01-06-2017',s)
s = re.sub(r'^01-20-2025$','01-01-2020',s)
s = re.sub(r'^3861-11-2019$','1-11-2019',s)
s = re.sub(r'^06-2016-2016$','30-06-2016',s)
s = re.sub(r'^12-1017$','31-12-2017',s)
s = re.sub(r'^03-2019-4$','31-03-2019', s)
s = re.sub(r'^2018-2$','31-12-2018',s)
s = re.sub(r'^20-16-2017$','31-12-2018', s)
return (s)
def split_dates (s):
s = re.sub(r'^201620172018$', '2016 2017 2018', s)
s = re.sub(r'^01-2021-01-09-20218$', '01-2021 01-09-2021', s)
s = re.sub(r'^1-01-2011-01-2019$', '01-01-2019', s)
s = re.sub(r'^09-20181-01-2020$', '09-2018 1-01-2020', s)
s = re.sub(r'^31-12-2023-2018-20192024-2025$', '31-12-2023 2019 2024 2025', s)
s = re.sub(r'^03-2019-03-2019-03-2019-03-2019-03-2019$', '31-03-2019', s)
s = re.sub(r'^05-2018-11-2018-11-2018-02-2018$', '05-2018 11-2018 11-2018 02-2018 ', s)
s = re.sub(r'^12-2019-12-20192020$', '12-2019 2020', s)
s = re.sub(r'^1-01-20192025$', '1-01-2019 2025', s)
s = re.sub(r'^12-2019-12-20192020$', '12-2019 2020', s)
s = re.sub(r'^1-01-2018-20192020$', '1-01-2018 2019 2020', s)
s = re.sub(r'^01-01-20162017-2017$', '01-01-2016 2017 2017', s)
s = re.sub(r'^2019-2019-2020-2020-2025$', '2019 2020 2025', s)
s = re.sub(r'^1-30-12-2017-2-1-05-2018$', '30-12-2017 1-05-2018', s)
s = re.sub(r'^1-01-2016-07-20161-07-2017$', '1-01-2016 1-07-2016 1-07-2017', s)
s = re.sub(r'^1-1-01-20202-1-01-2022$', '1-01-2020 1-01-2022', s)
s = re.sub(r'^1-01-20191-01-20201-01-2021$', '1-01-2019 1-01-2020 1-01-2021', s)
s = re.sub(r'^201920202021-1-03-1-01-2019-2019-1-03-2019$', '1-03-2019', s)
s = re.sub(r'^1-01-2020-7-21-271-7-21-575-1-01-2020$', '1-01-2020', s)
s = re.sub(r'^01-2021-01-09-2021$', '01-2021 01-09-2021', s)
s = re.sub(r'^1-01-2019-1-01-20201-01-2021$', '1-01-2019 1-01-2020 1-01-2021', s)
if re.search(r'^20\d{2}-\d{2}$', s): # 2017-18
s = s.replace('-', ' 20')
if re.search(r'^20-\d{2}-20\d{2}$', s): # 20-24-2025
s = s.replace('20-', '20').replace('-', ' ')
if re.search(r'^20\d{2}-20\d{2}$', s): # 2025-2028
s = re.sub(r'-', ' ', s)
if re.search(r'^\d{1}-\d{2}-20\d{4}-\d{2}-20\d{2}$', s): # 1-07-201931-12-2019
s = s[:9] + ' ' + s[9:]
if re.search(r'^\d{2}-\d{2}-20\d{4}-\d{2}-20\d{2}$', s): # 30-08-202030-12-2020
s = s[:10] + ' ' + s[10:]
if re.search(r'^\d{1}-\d{2}-20\d{3}-\d{2}-20\d{2}$', s): # 1-01-20191-01-2019
s = s[:9] + ' ' + s[9:]
if re.search(r'^\d{2}-20\d{2}-\d{2}-20\d{2}$', s): # 12-2017-03-2017
s = re.sub(r'(\d{2}-20\d{2})-(\d{2}-20\d{2})', r'\1 \2', s)
if re.search(r'^\d*-\d{2}-\d*-\d{2}-20\d{2}$', s): # 20-05-31-08-2019
s = re.sub(r'^\d*-\d{2}-', '', s)
if re.search(r'^\d*-\d{2}-20\d{2}-\d*-\d{2}-20\d{2}$', s): # 01-01-2020-31-12-2024
s = re.sub(r'(\d*-\d{2}-20\d{2})-(\d*-\d{2}-20\d{2})', r'\1 \2', s)
if re.search(r'^20\d{2}20\d{2}$', s): # 20202021
s = re.sub(r'(\d{4})(\d{4})', r'\1 \2', s)
return (s)
goalstim_df = pd.DataFrame({'id': goals['id'], 'goals_timing': goals['Установленные сроки достижения целей предлагаемого регулирования'], 'output': goals_t_num, 'period': goals_t_num2})
sup_df = sup_df[['id','sup_date']]
sup_df = sup_df.drop_duplicates(subset = ['id'])
goalstim_df = pd.merge(goalstim_df, sup_df, on = ["id"], how = 'left')
goalstim_df['output'] = goalstim_df['output'].replace('1-01', '')
goalstim_df['output'] = goalstim_df['output'].apply(clean_sup_date)
goalstim_df['output'] = goalstim_df['output'].apply(drop_trash)
goalstim_df['output'] = goalstim_df['output'].apply(transform_unique)
goalstim_df['output'] = goalstim_df['output'].apply(add_sep)
goalstim_df['output'] = goalstim_df['output'].apply(correct_wrong_dm)
goalstim_df['output'] = goalstim_df['output'].apply(add_end_day)
goalstim_df['output'] = goalstim_df['output'].apply(clean_goalstim)
goalstim_df['output'] = goalstim_df['output'].apply(remove_complex)
goalstim_df['output'] = goalstim_df['output'].apply(split_dates)
### one-to-many таблица
goalstim_df['all_dates'] = goalstim_df['output']
goalstim_df_otm = goalstim_df.assign(output = goalstim_df['output'].astype(str).str.split(' ')).explode('output')
goalstim_df_otm = goalstim_df_otm.drop(['period', 'sup_date', 'all_dates','goals_timing'], axis = 1)
goalstim_df_otm['output'] = goalstim_df_otm['output'].replace('', np.nan)
goalstim_df_otm['output'] = goalstim_df_otm['output'].astype(str).apply(add_end_day)
goalstim_df_otm['output'] = pd.to_datetime(goalstim_df_otm['output'], format ='%d-%m-%Y')
goalstim_df_otm = goalstim_df_otm.rename(columns = {'output': 'Установленные сроки достижения целей предлагаемого регулирования (дата)'})
goalstim_df_otm.head(15)
len(goalstim_df_otm)
### one-to-one таблица (для расчета сроков)
goalstim_df['output'] = goalstim_df['output'].str.split().str[-1]
goalstim_df['output'] = goalstim_df['output'].astype(str).apply(add_end_day)
goalstim_df['output'] = pd.to_datetime(goalstim_df['output'], format='%d-%m-%Y')
len(goalstim_df)
```
### Расчет периода (в днях) между двумя датами
Если заполнение — дата
```
goalstim_df['term'] = goalstim_df['output'] - goalstim_df['sup_date']
goalstim_df['term'] = (goalstim_df['term'] / np.timedelta64(1,'D')).astype('Int64')
goalstim_df['end_date'] = goalstim_df['output']
```
### Расчет конечной даты относительно указанного периода
Если заполнение — период
```
def split_period (s):
s = re.sub(r'(\d)([a-z])', r'\1 \2', s)
return (s)
period_calc = goalstim_df.copy()
period_calc['period'] = period_calc['period'].replace('1month1month', '1month').replace('180day2year', '2year').replace('3day', '')
period_calc['period'] = period_calc['period'].replace('6month2year', '2year').replace('1year25year20182022year', '5year').replace('70year70year', '')
period_calc['period'] = period_calc['period'].apply(split_period)
period_calc[['term', 'date_type']] = period_calc['period'].str.split(" ", expand=True)
period_calc = period_calc[(period_calc['term'] != '')]
period_calc['term'] = period_calc['term'].astype('float').astype('Int64')
period_calc['term'] *= np.where(period_calc['date_type'] =='year', 365, 1)
period_calc['term'] *= np.where(period_calc['date_type'] =='month', 30, 1)
period_calc['term'] = period_calc['term'].astype('Int64')
period_calc['end_date'] = period_calc['sup_date'] + period_calc['term'].apply(pd.offsets.Day)
period_calc = period_calc[['id', 'period', 'term', 'end_date']]
def insert_rus (s):
s = str(s)
s = re.sub(r'1 year', '1 год', s)
s = re.sub(r'1 month', '1 месяц', s)
if re.search(r'(2year)|(3year)(4year)', s):
s = re.sub(r'year', 'года', s)
else:
s = re.sub(r'year', 'лет', s)
if re.search(r'(2month)|(3month)(4month)', s):
s = re.sub(r'month', 'месяца', s)
else:
s = re.sub(r'month', 'месяцев', s)
if re.search(r'day', s):
s = re.sub(r'day', 'дней', s)
return (s)
goalstim_df = goalstim_df.drop(['period'], axis = 1)
period_calc = period_calc.drop_duplicates(subset = ['id'])
full_df = pd.merge(goalstim_df, period_calc, on = ["id"], how ='left')
full_df.end_date_x.fillna(full_df.end_date_y, inplace=True)
full_df.term_x.fillna(full_df.term_y, inplace=True)
full_df['period'] = full_df['period'].apply(insert_rus)
full_df = full_df.drop(columns = ['all_dates', 'goals_timing', 'end_date_y', 'term_y'])
full_df = full_df.rename(columns = {'output': 'Установленные сроки достижения целей предлагаемого регулирования (дата)',
'period': 'Установленные сроки достижения целей предлагаемого регулирования (период)',
'sup_date': 'Предполагаемая дата вступления в силу проекта акта',
'term_x': 'Срок достижения целей (дней)', 'end_date_x': 'Дата достижения'})
full_df = full_df[['Предполагаемая дата вступления в силу проекта акта',
'Установленные сроки достижения целей предлагаемого регулирования (дата)',
'Установленные сроки достижения целей предлагаемого регулирования (период)',
'Срок достижения целей (дней)', 'Дата достижения']]
full_df.tail(15)
full_df['Срок достижения целей (дней)'].describe()
```
### Отрицательные значения периодов
```
minus = full_df[full_df['Срок достижения целей (дней)'] < 0]
len(minus)
```
| github_jupyter |
## Authors: Benjamin Barinka, Snigdhayan Mahanta, Matthias Semmelmann (in alphabetical order of last name)
In this notebook we use tensorflow-quantum to train a quantum neural network (QNN) model for breast cancer analysis. We use the freely available breast cancer dataset (with labels) for this purpose. To reduce complexity we have restricted our attention to the top 4 most relevant features. This was ascertained separately via 'feature selection'. After standard preprocessing we prepared two training datasets as follows:
1. 'Amplitude Encoding' approach - each feature is modeled by one qubit. After rescaling all input values are between 0 and 1. So we apply the 'rotation along y-axis' proportional to the input value to each qubit state.
2. 'Binary Encoding' approach - each feature is modeled by one qubit. After rescaling all input values are between 0 and 1. We define an intermediate theshold value and map each input value less than the threshold value to 0 and greater than the threshold value to 1. In this manner we obtain a binary dataset and then we apply the X-gate to those qubits, whose corresponding input value is one.
Finally, we adapt the QNN construction in this tutorial https://www.tensorflow.org/quantum/tutorials/mnist to our setting and compare the results of the two approaches.
```
import pandas as pd
import tensorflow as tf
import tensorflow_quantum as tfq
import time
import cirq
import sympy
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
# The breast cancer dataset can be obtained from here - https://github.com/snigdhayan/NeuralNetworks/blob/master/Ludwig/breast_cancer_dataset.csv
df = pd.read_csv('./breast_cancer_dataset.csv')
df.head()
scaling_table = {}
for column in df:
column_max = df[column].max()
column_min = df[column].min()
df[column] = (df[column] - column_min) / (column_max - column_min)
scaling_table[column] = {'min': column_min, 'max': column_max}
df.head()
y_columns = df['label'].values
THRESHOLD = 0.5
worst_concave_points = df['worst_concave_points']
worst_perimeter = df['worst_perimeter']
mean_concave_points = df['mean_concave_points']
worst_radius = df['worst_radius']
worst_concave_points_bin = df['worst_concave_points'].apply(lambda x : 0 if x < THRESHOLD else 1)
worst_perimeter_bin = df['worst_perimeter'].apply(lambda x : 0 if x < THRESHOLD else 1)
mean_concave_points_bin = df['mean_concave_points'].apply(lambda x : 0 if x < THRESHOLD else 1)
worst_radius_bin = df['worst_radius'].apply(lambda x : 0 if x < THRESHOLD else 1)
sel_df = pd.concat([worst_concave_points, worst_perimeter, mean_concave_points, worst_radius], axis=1)
sel_df.rename(columns={0: 'wcp', 1: 'wp', 2: 'mcp', 3: 'wr'}, inplace=True)
sel_df_bin = pd.concat([worst_concave_points_bin, worst_perimeter_bin, mean_concave_points_bin, worst_radius_bin], axis=1)#pd.concat([mean,error,worst], axis=1)
sel_df_bin.rename(columns={0: 'wcp', 1: 'wp', 2: 'mcp', 3: 'wr'}, inplace=True)
train_test_split = 400
y_train = y_columns.reshape(1,-1)[0]
y_test = y_train[train_test_split:]
y_train = y_train[:train_test_split]
x_train = sel_df.values
x_test = x_train[train_test_split:]
x_train = x_train[:train_test_split]
x_train_bin = sel_df_bin.values
x_test_bin = x_train_bin[train_test_split:]
x_train_bin = x_train_bin[:train_test_split]
def convert_to_circuit(data):
"""Encode prepared data into quantum datapoint - Amplitude Encoding."""
values = np.ndarray.flatten(data)
qubits = cirq.GridQubit.rect(4, 1)
circuit = cirq.Circuit()
for i, value in enumerate(values):
rad = (value) * np.pi
circuit.append(cirq.ry(rad)(qubits[i]))
return circuit
def convert_to_circuit_bin(data):
"""Encode prepared data into quantum datapoint - Binary Encoding."""
values = np.ndarray.flatten(data)
qubits = cirq.GridQubit.rect(4, 1)
circuit = cirq.Circuit()
for i, value in enumerate(values):
if value == 1:
circuit.append(cirq.X(qubits[i]))
return circuit
x_train_circ = [convert_to_circuit(x) for x in x_train]
x_test_circ = [convert_to_circuit(x) for x in x_test]
SVGCircuit(x_train_circ[1])
x_train_circ_bin = [convert_to_circuit_bin(x) for x in x_train_bin]
x_test_circ_bin = [convert_to_circuit_bin(x) for x in x_test_bin]
SVGCircuit(x_train_circ_bin[1])
x_train_tfcirc = tfq.convert_to_tensor(x_train_circ)
x_test_tfcirc = tfq.convert_to_tensor(x_test_circ)
x_train_tfcirc_bin = tfq.convert_to_tensor(x_train_circ_bin)
x_test_tfcirc_bin = tfq.convert_to_tensor(x_test_circ_bin)
class CircuitLayerBuilder():
def __init__(self, data_qubits, readout):
self.data_qubits = data_qubits
self.readout = readout
def add_layer(self, circuit, gate, prefix):
for i, qubit in enumerate(self.data_qubits):
symbol = sympy.Symbol(prefix + '-' + str(i))
circuit.append(gate(qubit, self.readout)**symbol)
def create_quantum_model():
"""Create a QNN model circuit and readout operation to go along with it."""
data_qubits = cirq.GridQubit.rect(4, 1) # a 4x4 grid.
readout = cirq.GridQubit(-1, -1) # a single qubit at [-1,-1]
circuit = cirq.Circuit()
# Prepare the readout qubit.
circuit.append(cirq.X(readout))
circuit.append(cirq.H(readout))
builder = CircuitLayerBuilder(
data_qubits = data_qubits,
readout=readout)
# Then add layers (experiment by adding more).
builder.add_layer(circuit, cirq.XX, "xx1")
builder.add_layer(circuit, cirq.ZZ, "zz1")
# Finally, prepare the readout qubit.
circuit.append(cirq.H(readout))
return circuit, cirq.Z(readout)
model_circuit, model_readout = create_quantum_model()
SVGCircuit(model_circuit)
# Build the Keras model.
qnn_model = tf.keras.Sequential([
# The input is the data-circuit, encoded as a tf.string
tf.keras.layers.Input(shape=(), dtype=tf.string),
# The PQC layer returns the expected value of the readout gate, range [-1,1].
tfq.layers.PQC(model_circuit, model_readout),
])
# Build the Keras model.
qnn_model_bin = tf.keras.Sequential([
# The input is the data-circuit, encoded as a tf.string
tf.keras.layers.Input(shape=(), dtype=tf.string),
# The PQC layer returns the expected value of the readout gate, range [-1,1].
tfq.layers.PQC(model_circuit, model_readout),
])
y_train_hinge = 2.0*y_train-1.0
y_test_hinge = 2.0*y_test-1.0
def hinge_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true) > 0.0
y_pred = tf.squeeze(y_pred) > 0.0
result = tf.cast(y_true == y_pred, tf.float32)
return tf.reduce_mean(result)
qnn_model.compile(
loss=tf.keras.losses.Hinge(),
optimizer=tf.keras.optimizers.Adam(),
metrics=[hinge_accuracy])
qnn_model.summary()
qnn_model_bin.compile(
loss=tf.keras.losses.Hinge(),
optimizer=tf.keras.optimizers.Adam(),
metrics=[hinge_accuracy])
qnn_model_bin.summary()
EPOCHS = 5
BATCH_SIZE = 8
NUM_EXAMPLES = len(x_train_tfcirc)
x_train_tfcirc_sub = x_train_tfcirc[:NUM_EXAMPLES]
y_train_hinge_sub = y_train_hinge[:NUM_EXAMPLES]
x_train_tfcirc_sub_bin = x_train_tfcirc_bin[:NUM_EXAMPLES]
y_train_hinge_sub_bin = y_train_hinge[:NUM_EXAMPLES]
start_time = time.time()
qnn_history = qnn_model.fit(
x_train_tfcirc_sub, y_train_hinge_sub,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test_tfcirc, y_test_hinge))
qnn_results = qnn_model.evaluate(x_test_tfcirc, y_test)
qnn_training_time = time.time() - start_time
start_time = time.time()
qnn_history_bin = qnn_model_bin.fit(
x_train_tfcirc_sub_bin, y_train_hinge_sub_bin,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test_tfcirc_bin, y_test_hinge))
qnn_results_bin = qnn_model.evaluate(x_test_tfcirc_bin, y_test)
qnn_training_time_bin = time.time() - start_time
qnn_accuracy = qnn_results[1]
qnn_accuracy_bin = qnn_results_bin[1]
sns.barplot(x=['Amplitude Encoding','Binary Encoding'],y=[qnn_accuracy,qnn_accuracy_bin])
plt.title('Hinge Accuracy Score')
amplitude_training_time = round(qnn_training_time,2)
binary_training_time = round(qnn_training_time_bin,2)
sns.barplot(x=['Amplitude Encoding','Binary Encoding'],y=[amplitude_training_time,binary_training_time])
plt.title('Training Time (in seconds)')
plt.plot(qnn_history.history['val_loss'], label='Amplitude Encoding')
plt.plot(qnn_history_bin.history['val_loss'], label='Binary Encoding')
plt.title('Validation Loss')
plt.legend(('Amplitude Encoding', 'Binary Encoding'))
plt.plot(qnn_history.history['val_hinge_accuracy'])
plt.plot(qnn_history_bin.history['val_hinge_accuracy'])
plt.title('Validation Hinge Accuracy')
plt.legend(('Amplitude Encoding', 'Binary Encoding'))
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# 02. Distributed PyTorch with Horovod
In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training via [Horovod](https://github.com/uber/horovod).
## Prerequisites
* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)
* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:
* install the AML SDK
* create a workspace and its configuration file (`config.json`)
* Review the [tutorial](https://aka.ms/aml-notebook-pytorch) on single-node PyTorch training using the SDK
```
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
```
## Diagnostics
Opt-in diagnostics for better experience, quality, and security of future releases.
```
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
```
## Initialize workspace
Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
```
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
```
## Create a remote compute target
You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) to execute your training script on. In this tutorial, you create an [Azure Batch AI](https://docs.microsoft.com/azure/batch-ai/overview) cluster as your training compute resource. This code creates a cluster for you if it does not already exist in your workspace.
**Creation of the cluster takes approximately 5 minutes.** If the cluster is already in your workspace this code will skip the cluster creation process.
```
from azureml.core.compute import ComputeTarget, BatchAiCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "gpucluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = BatchAiCompute.provisioning_configuration(vm_size='STANDARD_NC6',
autoscale_enabled=True,
cluster_min_nodes=0,
cluster_max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`.
## Train model on the remote compute
Now that we have the cluster ready to go, let's run our distributed training job.
### Create a project directory
Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.
```
import os
project_folder = './pytorch-distr-hvd'
os.makedirs(project_folder, exist_ok=True)
```
Copy the training script `pytorch_horovod_mnist.py` into this project directory.
```
import shutil
shutil.copy('pytorch_horovod_mnist.py', project_folder)
```
### Create an experiment
Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed PyTorch tutorial.
```
from azureml.core import Experiment
experiment_name = 'pytorch-distr-hvd'
experiment = Experiment(ws, name=experiment_name)
```
### Create a PyTorch estimator
The AML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch).
```
from azureml.train.dnn import PyTorch
estimator = PyTorch(source_directory=project_folder,
compute_target=compute_target,
entry_script='pytorch_horovod_mnist.py',
node_count=2,
process_count_per_node=1,
distributed_backend='mpi',
use_gpu=True)
```
The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.
### Submit job
Run your experiment by submitting your estimator object. Note that this call is asynchronous.
```
run = experiment.submit(estimator)
print(run.get_details())
```
### Monitor your run
You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
```
from azureml.train.widgets import RunDetails
RunDetails(run).show()
```
Alternatively, you can block until the script has completed training before running more code.
```
run.wait_for_completion(show_output=True) # this provides a verbose log
```
| github_jupyter |
```
inputfile = 'dna.txt'
f = open(inputfile, 'r')
seq = f.read()
#string is a immutable object. Transfer the data to new string
seq = seq.replace('\n', '')
seq = seq.replace('\r', '')
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
table['CAA']
table["GCC"]
table['CCT']
#create a translate function
#check that the sequence lenght is divisible by 3
# loop over the sequence
#extract a single codon
#look up the codon and store the result
#module example, to check if the sequence lenght is divisible by 3
7/3
6/3
7%3
6%3
len(seq)
len(seq)%3
#check for s in seq
for s in seq:
print(s)
seq[0:3]
seq[3:6]
seq[6:9]
# use the sequence 0->3, 3->6, 6->9 use range
range(0,11,3)
list(range(0,11,3))
if len(seq) % 3 == 0:
protein = ''
for i in range(0, len(seq), 3):
codon = seq[i:i+3]
protein += table[codon]
#create a translate function
def translate(seq):
''' Translate a string containing a nucleotide sequence
into a string containing the correspoding sequence of
amino acids. Nucleotides are translated in triplets
using the table dictionary;
each amino acid is encoded with a sum of length 1. '''
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
#check that the sequence lenght is divisible by 3
if len(seq) % 3 == 0:
protein = ''
# loop over the sequence
for i in range(0, len(seq), 3):
#extract a single codon
codon = seq[i:i+3]
#look up the codon and store the result
protein += table[codon]
return protein
translate('ATA')
translate('AAA')
help(translate)
inputfile = 'dna_2.txt'
f = open(inputfile, 'r')
seq = f.read()
#string is a immutable object. Transfer the data to new string
seq = seq.replace('\n', '')
seq = seq.replace('\r', '')
```
#### Translating the DNA Sequence: Question 1
```table``` is as defined as in Video 3.1.4. The file ```table.py``` is available for download below the video. What is ```table["GCC"]```?
- **A**
- C
- L
- Y
#### Translating the DNA Sequence: Question 2
What is 138 % 13 ?
- 138
- 18
- 13
- **8**
- 3
#### Translating the DNA Sequence: Question 3
Open a session of Python and follow the instructions in Video 3.1.2 to read in the NCBI DNA sequence with the accession number NM_207618.2 and store as seq. What does seq[40:50] return?
```
seq
seq[40:50]
```
#### Translating the DNA Sequence: Question 4
What is a docstring?
- A string containing documentation for core Python, available at python.org
- **A string that describes details about a module, function, class, or method accessed by help()**
- An object type from the docstring library that contains useful functions for annotating strings
| github_jupyter |
## Bayesian Analysis of RCT Results
### C.V. Cosgriff
The [ANDROMEDA-SHOCK trial](https://jamanetwork.com/journals/jama/fullarticle/2724361) was recently published and has been a point of controversey. Despite results suggesting that capillary refill is safe, if not beneficial, when guiding treatment as compared to lactate level, the trial was deemed _negative_ based on the null-hypothesis testing framework that is standard across the current research literature.
A rich discussion ensued online and Dan Lane demonstrated a Bayesian analysis of the result [here](https://discourse.datamethods.org/t/andromeda-shock-or-how-to-intepret-hr-0-76-95-ci-0-55-1-02-p-0-06/1349/20) in `R` based on methods from [this paper](https://doi.org/10.1016/j.jclinepi.2008.07.006).
This notebook provides a `Python` implementation of Dan's code, examining the [ANDROMEDA-SHOCK trial](https://jamanetwork.com/journals/jama/fullarticle/2724361), and then applies the same approach to the [EOLIA trial](https://www.nejm.org/doi/full/10.1056/NEJMoa1800385). The latter has a more thorough Bayesian analysis published [here](https://www.ncbi.nlm.nih.gov/pubmed/30347031). The goal of this notebook is to provided clinicians keen on using this approach to interpret clinical trial results the requisite code.
```
import numpy as np
import pandas as pd
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
Using [Dan's post](https://discourse.datamethods.org/t/andromeda-shock-or-how-to-intepret-hr-0-76-95-ci-0-55-1-02-p-0-06/1349/20) and the appendix of [this paper](https://doi.org/10.1016/j.jclinepi.2008.07.006) as a guide, we implement functions for (1) representing a dichotomous outcome as normally distributed likelihood, (2) for representing the time-to-event outcome as a normally distributed likelihood, and (3) for calculating the posterior distribution from the prior and likelihood. The primary limitation of this method is the assumption of normal distribution of the parameters.
The definitions for the priors is also taken from [that paper](https://doi.org/10.1016/j.jclinepi.2008.07.006). Briefly, the skeptical prior has a mean of 0 and boundaries such that there is a 5% probability of exceeding the treatment effect that was assumed by the study investigators during sample size estimation, and the enthusiastic prior has a mean equal to the treatment effect assumed by the study investigators during sample size estimation and boundaries such that there is a 5% probability of no benefit.
_The code in function_ `get_prior` _may be modified to include other priors._
```
def odds_ratio(n, a, b):
'''
Given the sample size n, the number of those recieving intervention
and experiencing the outcome a, and the number of those not receiving
intervention (control) and experiencing the outcome b, an odds ratio
is calculated.
Input: n, sample size; float64
a, the number of those recieving intervention
and experiencing the outcome; float64
b, the number of those not receiving intervention
(control) and experiencing the outcome; float64
Returns: An odds ratio; float64
'''
c = n - a
d = n - b
return ((a+0.5)*(d+0.5))/((b+0.5)*(c+0.5))
def get_prior(MCID, prior_type='skeptical'):
'''
Given the MCID expressed on the odds ratio scale a skeptical or enthusiastic
prior is determined. The definition used for these is taken from
Journal of Clinical Epidemiology 62 (2009) 13-21.
Input: MCID, an odds ratio representing the minimally clinically interesting
difference.
prior_type: 'enthusiastic', or 'uninformative'; type string, defualt is 'skeptical'
Returns: (theta, sd) representing the prior; tuple of float64
'''
if (prior_type == 'skeptical'):
return (0, np.log(MCID)/norm.ppf(0.05))
if (prior_type == 'enthusiastic'):
return (np.log(MCID), (np.log(1.05) - np.log(MCID))/norm.ppf(0.975))
def tte_to_likelihood(hazard_ratio, uc):
'''
Determines the parameters for a normal distribution
based on the hazard ratio (HR) and 95% CI of said
HR from an RCT.
Inputs: hazard ratio, upper 95% CI; both float64
Returns: (theta, sd); a tuple of float64
'''
theta = np.log(hazard_ratio)
sd = (np.log(uc) - theta)/norm.ppf(0.975)
return (theta, sd)
def get_posterior(prior, likelihood):
'''
Determines the parameters the posterior distribution
by combining a supplied prior and likelihood. Assumes
all follow a normal distribution.
Inputs: prior and likelihood; both a tuple of (theta, sd) of type float64
Returns: tuple(theta, sd); a tuple of float64
'''
(prior_theta, prior_sd) = prior
(L_theta, L_sd) = likelihood
post_theta = ((prior_theta/prior_sd**2)+(L_theta/L_sd**2))/((1/prior_sd**2)+(1/L_sd**2))
post_sd = np.sqrt(1/((1/prior_sd**2)+(1/L_sd**2)))
return (post_theta, post_sd)
```
We'll also write a function for summarizing the result and generating plots for the prior, likelihood, and posterior distributions.
```
def summarize_distribution(distribution):
'''
Takes a posterior distribution and provides the median and 95% certainty
interval.
Input: (theta, sd) representing the distribution; tuple of float64
Return: None
'''
(theta, sd) = distribution
dist_summary = pd.DataFrame({' 2.5%' : [np.exp(norm.ppf(0.025, theta, sd))],
'50.0%' : [np.exp(norm.ppf(0.5, theta, sd))],
'97.5%' : [np.exp(norm.ppf(0.975, theta, sd))]},
index=['Summaries'])
prob_benefit = norm.cdf(0, theta, sd)
display(dist_summary)
print('The probability of any benefit (HR < 1.0) is: {0}'.format(round(prob_benefit, 3)))
def plot_bayes(prior, likelihood, posterior, title=None, save_file=False):
'''
Plots the prior, likelihood, and posterior distributions.
Inputs: prior, likelihood, and posterior distribution; tuples of (theta, sd) both type float64
title; string of text for title of plot
save_file; boolean. if True saves the file as plot_bayes.svg
Returns: None
'''
(prior_theta, prior_sd) = prior
(L_theta, L_sd) = likelihood
(post_theta, post_sd) = posterior
mu_plot = np.arange(-2, 2, 0.001)
prior_dist = norm.pdf(mu_plot, prior_theta, prior_sd)
likelihood_dist = norm.pdf(mu_plot, L_theta, L_sd)
posterior_dist = norm.pdf(mu_plot, post_theta, post_sd)
figure = plt.figure(figsize=(8, 8))
mu_plot = np.arange(-2, 2, 0.001)
sns.lineplot(x=np.exp(mu_plot), y=np.exp(prior_dist), label='Prior')
sns.lineplot(x=np.exp(mu_plot), y=np.exp(likelihood_dist), label='Likelihood')
sns.lineplot(x=np.exp(mu_plot), y=np.exp(posterior_dist), label='Posterior')
plt.xlim((0,2))
plt.xlabel('Hazard Ratio')
plt.ylabel('Probability Density')
if (title != None):
plt.title(title)
if (save_file):
plt.savefig('./plot_bayes.svg')
```
### Example 1: ANDROMEDA-SHOCK
In the ANDROMEDA-SHOCK trial the hazard ratio was 0.75 and the upper bound of the 95% confidence interval was 1.02; we use these to construct a likelihood.
```
L = tte_to_likelihood(0.75, 1.02)
```
We then use the numbers provided by the authors for the power calculation to express the minimally clinically interesting difference (MCID) as an odds ratio and use it to construct a skeptical prior.
```
n = 420 # Sample size
a = 0.3 * n # Intervention and Outcome
b = 0.45 * n # Control and Outcome
MCID = odds_ratio(n, a, b)
prior = get_prior(MCID, prior_type='skeptical')
```
We then use the `get_posterior` function to combine the prior and likelihood and visualize the result.
```
posterior = get_posterior(prior, L)
plot_bayes(prior, L, posterior, title='ANDROMEDA-SHOCK, Skeptical')
```
We then print the median and 95% certainty interval.
```
summarize_distribution(posterior)
```
We carry out the same calculations using an enthusiastic prior.
```
prior = get_prior(MCID, prior_type='enthusiastic')
posterior = get_posterior(prior, L)
plot_bayes(prior, L, posterior, title='ANDROMEDA-SHOCK, Enthusiastic')
summarize_distribution(posterior)
```
### Example 2: EOLIA
The EOLIA trial examined ECMO vs. ventilation alone and the result was determined to be a $HR = 0.76 [0.55, 1.04]$ for the outcome of death at 60 days from randomization.
```
L = tte_to_likelihood(0.76, 1.04)
```
Again, we use the provided numbers from the power calculation to derive the minimally clinically interesting difference (MCID) expressed as an odds ratio for constructing the priors; we first use a skeptical prior.
```
n = 331 # Sample size
a = 0.4 * n # Intervention and Outcome
b = 0.6 * n # Control and Outcome
MCID = odds_ratio(n, a, b)
prior = get_prior(MCID, prior_type='skeptical')
```
Then, as above, we determine the posterior and visualize the result.
```
posterior = get_posterior(prior, L)
plot_bayes(prior, L, posterior, title='EOLIA, Skeptical')
summarize_distribution(posterior)
```
And now with an enthusiastic prior.
```
prior = get_prior(MCID, prior_type='enthusiastic')
posterior = get_posterior(prior, L)
plot_bayes(prior, L, posterior, title='EOLIA, Enthusiastic')
summarize_distribution(posterior)
```
These examples may serve as a template for analyzing other RCTs with the approach suggested by [Wijeysundera et al.](https://doi.org/10.1016/j.jclinepi.2008.07.006).
| github_jupyter |
```
# We don't technically need this but it avoids a warning when importing pysis
import os
os.environ['ISISROOT'] = '/usgs/cpkgs/anaconda3_linux/envs/isis3.9.0'
```
<a id='toc'></a>
# AutoCNet Intro
As mentioned earlier AutoCNet is a method for storing control networks and has outlier detection functionality. AutoCNet also contains a suite of functions that parallelize network generation that leverages and compliments ISIS processing. The advantage of AutoCNet network generation is it takes advantage of elementwise cluster processing (these elements can be images, points, measures, etc.) and postgresql for data storage and quick relational querying.
In this notebook we are going to step through the network generation process in AutoCNet!
For Quick Access:
- [Load and apply configuration file](#configuration)
- [Ingest images and calculate overlaps](#ingest)
- [Distribute points in overlaps](#distribute)
- [Subpixel register points](#registration)
### Grab the Image Data
We are going to process Kaguya Terrian Camera (TC) images surrounding the Reiner Gamma Lunar Swirl (4.9° - 9.9° N Planetocentric Latitude and 61.3° - 56.3° W Longitude). The data is located in '/scratch/ladoramkershner/moon/kaguya/workshop/original/', please use the cell below to copy the data into a directory of your choosing.
```
import getpass
uid = getpass.getuser()
output_directory = f'/scratch/ladoramkershner/FY21_autocnet_workshop/workshop_scratch/{uid}' # put output directory path as string here
print(output_directory)
# copy over the data to the 'lvl1' subdirectory
!mkdir -p $output_directory/lvl1/
!cp -p /scratch/ladoramkershner/moon/kaguya/workshop/original/*cub $output_directory/lvl1/
```
We need to create a list of the cubes, to feed into AutoCNet. It is important that the cube list handed to AutoCNet contain **absolute** paths, as they will serve as an accessor for loading information from the cubes later.
```
!ls $output_directory/lvl1/*cub > $output_directory/cubes.lis
!head $output_directory/cubes.lis
```
<a id='configuration'></a>
# Parse the Configuration File
[Return To Top](#toc)
The configuration parameters are typically held in a configuration yaml file. A configuration file has been compiled for use internal to the USGS ASC facilities leveraging a shared cluster and database. Use AutoCNet's function 'parse_config' to read in the yaml file and output a dictionary variable.
```
from autocnet.config_parser import parse_config
config_path = '/scratch/ladoramkershner/FY21_autocnet_workshop/config_moon.yml'
config = parse_config(config_path)
```
The config is a nested dictionary, meaning it has a larger dictionary structure defining sections for the services above and then each service section is a dictionary defining the particular configuration parameters.
```
import numpy as np
print('configuration dictionary keys: ')
print(np.vstack(list(config.keys())), '\n')
print('cluster configuration dictionary keys: ')
print(np.vstack(list(config['cluster'].keys())))
```
Although the configuration file is set up for internal use, some fields need to be altered to point to user specific areas or unique strings.
```
config['cluster']['cluster_log_dir'] = f'/scratch/ladoramkershner/FY21_autocnet_workshop/workshop_scratch/{uid}/logs'
config['database']['name'] = f'workshop_{uid}_kaguyatc_reinergamma'
config['redis']['basename'] = f'{uid}_queue'
config['redis']['completed_queue'] = f'{uid}_queue:comp'
config['redis']['processing_queue'] = f'{uid}_queue:proc'
config['redis']['working_queue'] = f'{uid}_queue:work'
default_log = config['cluster']['cluster_log_dir']
print(f'your log directory: {default_log}')
print('your database name:', config['database']['name'])
```
### Create the NetworkCandidateGraph
The NetworkCandidateGraph (NCG) class can be instantiated to an object without any arguments. However, this NCG object requires configuration before it can be used for any meaningful work, so we have to run 'config_from_dict'.
```
from autocnet.graph.network import NetworkCandidateGraph
ncg = NetworkCandidateGraph()
ncg.config_from_dict(config)
ncg.from_database()
```
<a id="ingest"></a>
# Ingest Image Data and Calculate Overlaps
[Return To Top](#toc)
At this point our ncg variable is empty, so if we try to plot the contents we will get an empty plot.
```
ncg.plot()
```
We need to load the images into the ncg using 'add_from_filelist', which loads the images from a passed in list and then calculates the overlaps.
```
filelist = f'{output_directory}/cubes.lis' # this should contain absolute paths
ncg.add_from_filelist(filelist)
```
Now when we plot the ncg, we see the undirected graph, where the circles are the nodes/images and the lines are the edges/overlaps. The Kaguya TC data has a very regular overlap pattern in this area, seen by the large number of edges shared between nodes.
```
ncg.plot()
```
We have access to the image data through the ncg, but the ncg does not persist after the notebook is shut down. To persist the network, AutoCNet leverages a database for the storage of the networks images, points, and measures. The ncg has access to this database through the ncg's 'session_scope'. Through the session_scope you can interact and execute queries on your database in pure SQL.
```
with ncg.session_scope() as session:
img_count = session.execute("SELECT COUNT(*) FROM images").fetchall()
overlap_count = session.execute("SELECT COUNT(*) FROM overlay").fetchall()
print(' Number of images in database: ', img_count[0][0])
print('Number of overlaps in database: ', overlap_count)
print('Hello')
```
session.execute() is a convenient if you are already familiar with pure sql commands, however, the return values are messy. The ncg.session_scope() leverages a python module called sqlalchemy which allow pythonic calls to your database with clean output.
```
from autocnet.io.db.model import Images, Overlay
with ncg.session_scope() as session:
img_count = session.query(Images).count()
overlap_count = session.query(Overlay).count()
print(' Number of images in database: ', img_count)
print('Number of overlaps in database: ', overlap_count)
```
Additionally, session.execute() can be inconvenient if working with the actual data contained within the tables. For example, to access certain information you need to know the index where that information exists.
```
with ncg.session_scope() as session:
img = session.execute("SELECT * FROM images LIMIT 1").fetchall()
print('image index: ', img[0][0])
print('product id: ', img[0][1])
print('image path: ', img[0][2])
print('image serial number: ', img[0][3])
print('image ignore flag: ', img[0][4])
print('image geom: ', img[0][5]) # only uncomment after looking at other output
print('image camera type: ', img[0][7])
```
However, if the structure of the database changes (order of the columns or a column is added/removed) or you cannot remember the order of the columns, working with the database data in this way is be very inconvenient. So AutoCNet built models for each table of the database tables to help interface with them.
```
from autocnet.io.db.model import Measures, Points
with ncg.session_scope() as session:
img = session.query(Images).first()
print('image index: ', img.id)
print('product id: ', img.name)
print('image path: ', img.path)
print('image serial number: ', img.serial)
print('image ignore flag: ', img.ignore)
print('image geometry: ', img.geom) # only uncomment after looking at other output
print('image camera type: ', img.cam_type)
print(dir(Images))
```
Accessing the information off of the img object is more intuitive as it is property based instead of index based. Additionally, if you uncommented the geom prints (in the two previous cells) you see that the raw database geometry is stored as a binary string while the Images.geom property is a shapely Multipolygon, which has more directly accessible latitude, longitude information. For example, to plot the geometry all we have to do is...
```
import matplotlib.pyplot as plt
n = 25
with ncg.session_scope() as session:
imgs = session.query(Images).limit(n)
fig, axs = plt.subplots(1, 1, figsize=(5,10))
axs.set_title(f'Footprints of First {n} Images in Database')
for img in imgs:
x,y = img.geom.envelope.boundary.xy # this call!
axs.plot(x,y)
```
<a id="distribute"></a>
# Place Points in Overlap
[Return To Top](#toc)
The next step in the network generation process is to lay down points in the image overlaps. Before dispatching the function to the cluster, we need to make the log directory from our configuration file. If a SLURM job is submitted with a log directory argument that does not exist, the job will fail.
```
ppio_log_dir = default_log.replace('logs', 'ppio_logs')
print('creating directory: ', ppio_log_dir)
if not os.path.exists(ppio_log_dir):
os.mkdir(ppio_log_dir)
```
We are going to use the 'place_points_in_overlap' function to lay the points down. For now we will use the default size and distribution arguments, but we need to change our camera type from the default 'csm' to 'isis'.
```
from autocnet.spatial.overlap import place_points_in_overlap
njobs = ncg.apply('spatial.overlap.place_points_in_overlap',
on='overlaps', # start of function kwargs
cam_type='isis',
walltime='00:30:00', # start of apply kwargs
log_dir=ppio_log_dir,
arraychunk=50)
print(njobs)
!squeue -u $uid | head # helpful to grab job array id
```
This function first evenly distributes points spatially into a given overlap, then it back-projects the points into the 'top' image. Once in image space, the function searches the area surrounding the measures to find interesting features to shift the measures to (this increases the chance of subpixel registration passing). The shifted measures are projected back to the ground and these updated longitudes and latitudes are used to propagate the points into all images associated with the overlap. So, this function requires:
- An overlap (to evenly distribute points into)
- Distribution kwargs (to decide how points are distributed into the overlap)
- Size of the area around the measure (to search for the interesting feature)
- Camera type (so it knows what to expect as inputs/output for the camera model)
Since this function operates independently on each overlap, it is ideal for parallelization with the cluster. Notice that we are not passing in a single overlap to the apply call, instead we pass "on = 'overlaps'". The 'on' argument indicates which element (image, overlap, point, measure) to apply the function.
```
with ncg.session_scope() as session:
noverlay = session.query(Overlay).count()
print(noverlay)
```
### Multiple Ways to Check Job Array Process
#### Log Files
As jobs are put on the cluster, their corresponding log files are created. You can check how many jobs have been/ are being processed on the cluster by looking in the log directory.
```
!ls $ppio_log_dir | head -5
```
As more logs are placed in the log directory, you will have to specify which array job's logs you are checking on. The naming convention of the log files generated by AutoCNet are 'path.to.function.function_name-jobid.arrayid_taskid.out'
```
jobid = '29521163' # put jobid int here
!ls $ppio_log_dir/*${jobid}_*.out | wc -l
```
#### Slurm Account
Using 'sacct' allows you to check the exit status of the tasks from your job array.
```
!sacct -j $jobid -s 'completed' | wc -l
!sacct -j $jobid -s 'failed' | wc -l
!sacct -j $jobid -s 'timeout' | wc -l
!sacct -j $jobid -s 'cancelled' | wc -l
```
The return of '2' from the word count on the 'failed', 'timeout', and 'cancelled' job accounts are the header lines
```
!sacct -j $jobid -s 'failed' | head
```
#### NCG Queue Length
The queue holds the job packages in json files called 'queue messages' until the cluster is ready for the job. You can view how many messages are left on the queue with the 'queue_length' NCG property.
```
print("jobs left on the queue: ", ncg.queue_length)
```
### When your squeue is empty
Sometimes jobs fail to submit to the cluster, it is prudent to check the ncg.queue_length AFTER your squeue is empty.
```
!squeue -u $uid
print("jobs left on the queue: ", ncg.queue_length)
```
When reapplying a function to the cluster, you do not need to resubmit the function arguments, because those were already serialized into the queue message. However, the cluster submission arguments can be reformatted and the 'reapply' argument should be set to 'True'.
```
# njobs = ncg.apply('spatial.overlap.place_points_in_overlap',
# chunksize=redis_orphans,
# arraychunk=None,
# walltime='00:20:00',
# log_dir=ppio_log_dir,
# reapply=True)
# print(njobs)
```
One advantage of using of a database for data storage is that it allows for storage of and therefore quick access of geometries and how those geometries relate with other elements' geometries.
```
from autocnet.io.db.model import Overlay, Points, Measures
from geoalchemy2 import functions
from geoalchemy2.shape import to_shape
with ncg.session_scope() as session:
results = (
session.query(
Overlay.id,
Overlay.geom.label('ogeom'),
Points.geom.label('pgeom')
)
.join(Points, functions.ST_Contains(Overlay.geom, Points.geom)=='True')
.filter(Overlay.id < 10)
.all()
)
print('number of points: ', len(results))
fig, axs = plt.subplots(1, 1, figsize=(10,10))
axs.grid()
oid = []
for res in results:
if res.id not in oid:
oid.append(res.id)
ogeom = to_shape(res.ogeom)
ox, oy = ogeom.envelope.boundary.xy
axs.plot(ox, oy, c='k')
pgeom = to_shape(res.pgeom)
px, py = pgeom.xy
axs.scatter(px, py, c='grey')
```
Notice that the points are not in straight lines, this is because of the shifting place_points_in_overlap does to find interesting measure locations.
However, the default distribution of points in the overlaps looks sparse, so let’s rerun place_points_in_overlap with new distribution kwargs. Before rerunning place_point_in_overlap, the points and measures tables need to be cleared using ncg's 'clear_db' method.
```
from autocnet.io.db.model import Measures
with ncg.session_scope() as session:
npoints = session.query(Points).count()
print('number of points: ', npoints)
nmeas = session.query(Measures).count()
print('number of measures: ', nmeas)
ncg.clear_db(tables=['points', 'measures'])
with ncg.session_scope() as session:
npoints = session.query(Points).count()
print('number of points: ', npoints)
nmeas = session.query(Measures).count()
print('number of measures: ', nmeas)
```
The distribution argument for place_points_in_overlap requires two **function** inputs. Since overlaps are variable shapes and sizes, integers are not sufficient to determine proper gridding of all overlaps. Instead, the distribution of points along the N to S edge of the overlap and the E to W edge of the overlap are determined based on the edge's length and a grid is built from these edge distributions. This way a shorter edge will receive less points and a longer side will receive more points.
The default distribution functions are: <br />
nspts_func=lambda x: ceil(round(x,1)\*10) <br />
ewpts_func=lambda x: ceil(round(x,1)\*5) <br />
** NOTICE THE NS ACTUALLY GETS USED ON THE LONGER SIDE OF THE OVERLAP, NOT NECESSARILY THE NS SIDE**
```
from autocnet.cg.cg import distribute_points_in_geom
import matplotlib.pyplot as plt
def ns(x):
from math import ceil
return ceil(round(x,1)*15)
def ew(x):
from math import ceil
return ceil(round(x,1)*10)
total=0
with ncg.session_scope() as session:
srid = config['spatial']['latitudinal_srid']
overlaps = session.query(Overlay).filter(Overlay.geom.intersects(functions.ST_GeomFromText('LINESTRING(301.2 7.4, 303.7 7.4, 303.7 9.9, 301.2 9.9, 301.2 7.4)', srid))).all()
print('overlaps in selected area: ', len(overlaps))
for overlap in overlaps:
ox, oy = overlap.geom.exterior.xy
plt.plot(ox,oy)
valid = distribute_points_in_geom(overlap.geom, method='classic', nspts_func=ns, ewpts_func=ew, Session=session)
if valid:
total += len(valid)
px, py = list(zip(*valid))
plt.scatter(px, py, s=1)
print(' points in selected area: ', total)
distribute_points_kwargs = {'nspts_func':ns, 'ewpts_func':ew, 'method':'classic'}
njobs = ncg.apply('spatial.overlap.place_points_in_overlap',
on='overlaps', # start of function kwargs
distribute_points_kwargs=distribute_points_kwargs, # NEW LINE
cam_type='isis',
size=71,
walltime='00:30:00', # start of apply kwargs
log_dir=ppio_log_dir,
arraychunk=100)
print(njobs)
from autocnet.matcher.cpu_extractor import extract_most_interesting
extract_most_interesting?
```
Check the progress of your jobs
```
!squeue -u $uid | wc -l
!squeue -u $uid | head
```
Count number of jobs started by looking for generated logs
```
jobid = '29524102' # put jobid int here
! ls $ppio_log_dir/*$jobid* | wc -l
!sacct -j $jobid -s 'completed' | wc -l
!sacct -j $jobid -s 'failed' | wc -l
```
Check to see if the ncg redis queue is clear
```
redis_orphans = ncg.queue_length
print("jobs left on the queue: ", redis_orphans)
```
Reapply cluster job if there are still jobs left on the queue
```
# njobs = ncg.apply('spatial.overlap.place_points_in_overlap',
# chunksize=redis_orphans,
# arraychunk=None,
# walltime='00:20:00',
# log_dir=log_dir,
# reapply=True)
# print(njobs)
```
Visualize the new distribution
```
from autocnet.io.db.model import Overlay, Points, Measures
from geoalchemy2 import functions
from geoalchemy2.shape import to_shape
with ncg.session_scope() as session:
results = (
session.query(
Overlay.id,
Overlay.geom.label('ogeom'),
Points.geom.label('pgeom')
)
.join(Points, functions.ST_Contains(Overlay.geom, Points.geom)=='True')
.filter(Overlay.id < 10)
.all()
)
print('number of points: ', len(results))
fig, axs = plt.subplots(1, 1, figsize=(10,10))
axs.grid()
oid = []
for res in results:
if res.id not in oid:
oid.append(res.id)
ogeom = to_shape(res.ogeom)
ox, oy = ogeom.envelope.boundary.xy
axs.plot(ox, oy, c='k')
pgeom = to_shape(res.pgeom)
px, py = pgeom.xy
axs.scatter(px, py, c='grey')
```
<a id="registration"></a>
# Subpixel Registration
[Return To Top](#toc)
After laying down points, the next step is to subpixel register the measures on those points, to do this we are going to use the 'subpixel_register_point' function. As the name suggests, 'subpixel_register_point' registers the measures on a single point, which makes it parallelizable. Before we fire off the cluster jobs, let's create a new subpixel registration log directory.
```
subpix_log_dir = default_log.replace('logs', 'subpix_logs')
print('creating directory: ', subpix_log_dir)
if not os.path.exists(subpix_log_dir):
os.mkdir(subpix_log_dir)
```
## First Run
```
from autocnet.matcher.subpixel import subpixel_register_point
?subpixel_register_point
# ncg.apply?
subpixel_template_kwargs = {'image_size':(81,81), 'template_size':(51,51)}
njobs = ncg.apply('matcher.subpixel.subpixel_register_point',
on='points', # start of function kwargs
match_kwargs=subpixel_template_kwargs,
geom_func='simple',
match_func='classic',
cost_func=lambda x,y:y,
threshold=0.6,
verbose=False,
walltime="00:30:00", # start of apply kwargs
log_dir=subpix_log_dir,
arraychunk=200,
chunksize=20000) # maximum chunksize = 20,000
print(njobs)
```
Check the progress of your jobs
```
!squeue -u $uid | head
```
This function chooses a reference measure, affinely transforms the other images to the reference image, and clips an 'image' chip out of the reference image and a 'template' chip out of the transformed images. The template chips are marched across the image chip and the maximum correlation value and location is saved.
The solution is then evaluated to see if the maximum correlation solution is acceptable. The evaluation is done using the 'cost_func' and 'threshold' arguments. The cost_func is dependent two independent variables, the first is the distance that a point has shifted from the starting location and the second is the correlation coefficient coming out of the template matcher. The __order__ that these variables are passed in __matters__. We are not going to consider the distance the measures were moved and just look at the maximum correlation value returned by the matcher. So our function is simply: $y$.
If the cost_func solution is greater than the threshold value, the registration is successful and the point is updated. If not, the registration is unsuccessful, the point is not updated and is set to ignore.
So, 'subpixel_register_point' requires the following arguments:
- pointid
- match_kwargs (image size, template size)
- cost_func
- threshold
```
from autocnet.matcher.subpixel import subpixel_register_point
subpixel_register_point?
```
Count number of jobs started by looking for generated logs
```
jobid = '29525915' # put jobid int here
! ls $subpix_log_dir/*$jobid* | wc -l
!sacct -j $jobid -s 'completed' | wc -l
!sacct -j $jobid -s 'failed' | wc -l
```
Check to see if the ncg redis queue is clear
```
redis_orphans = ncg.queue_length
print("jobs left on the queue: ", redis_orphans)
```
Reapply cluster job if there are still jobs left on the queue
```
# job_array = ncg.apply('matcher.subpixel.subpixel_register_point',
# reapply=True,
# chunksize=redis_orphans,
# arraychunk=None,
# walltime="00:30:00",
# log_dir=subpix1_log_dir)
# print(job_array)
```
### Visualize Point Registration
```
from autocnet.io.db.model import Images
from plio.io.io_gdal import GeoDataset
from autocnet.transformation import roi
from autocnet.utils.utils import bytescale
with ncg.session_scope() as session:
measures = session.query(Measures).filter(Measures.template_metric < 0.8, Measures.template_metric!=1).limit(15)
for meas in measures:
pid = meas.pointid
source = session.query(Measures, Images).join(Images, Measures.imageid==Images.id).filter(Measures.pointid==pid, Measures.template_metric==1).all()
s_img = GeoDataset(source[0][1].path)
sx = source[0][0].sample
sy = source[0][0].line
destination = session.query(Measures, Images).join(Images, Measures.imageid==Images.id).filter(Measures.pointid==pid, Measures.template_metric!=1).limit(1).all()
d_img = GeoDataset(destination[0][1].path)
dx = destination[0][0].sample
dy = destination[0][0].line
image_size = (121,121)
template_size = (61,61)
s_roi = roi.Roi(s_img, sx, sy, size_x=image_size[0], size_y=image_size[1])
s_image = bytescale(s_roi.clip())
d_roi = roi.Roi(d_img, dx, dy, size_x=image_size[0], size_y=image_size[1])
d_template = bytescale(d_roi.clip())
fig, axs = plt.subplots(1, 2, figsize=(20,10));
axs[0].imshow(s_image, cmap='Greys');
axs[0].scatter(image_size[0], image_size[1], c='r')
axs[0].set_title('Reference');
axs[1].imshow(d_template, cmap='Greys');
axs[1].scatter(image_size[0], image_size[1], c='r')
axs[1].set_title('Template');
```
## Second run
We are going to rerun the subpixel registration with larger chips to attempt to register the measures that failed first run. 'subpixel_register_point' is set up so subsequent runs can use filters which only runs the function on points with a certain property value (e.g.: points where ignore=true). It can also be rerun on all points, if this is done AutoCNet checks for a previous subpixel registration result, if the new result is better the point is updated, if the previous result is better the point is left alone.
```
ncg.apply?
subpixel_template_kwargs = {'image_size':(221,221), 'template_size':(81,81)}
filters = {'ignore': 'true'}
# query = "SELECT UNIQUE(measures.pointid) FROM measures WHERE template_metric < 0.65"
njobs = ncg.apply('matcher.subpixel.subpixel_register_point',
on='points', # start of function kwargs
# filters=filters, ##### NEW LINE
query_string=query,
match_kwargs=subpixel_template_kwargs,
geom_func='simple',
match_func='classic',
cost_func=lambda x,y:y,
threshold=0.6,
verbose=False,
walltime="00:30:00", # start of apply kwargs
log_dir=subpix_log_dir,
arraychunk=50,
chunksize=20000) # maximum chunksize = 20,000
print(njobs)
```
Check the progress of your jobs
```
! squeue -u $uid | wc -l
! squeue -u $uid | head
```
Count number of jobs started by looking for generated logs
```
jobid = '' # put jobid int here
! ls $log_dir/*$jobid* | wc -l
```
Check to see if the ncg redis queue is clear
```
redis_orphans = ncg.queue_length
print("jobs left on the queue: ", redis_orphans)
```
Reapply cluster job if there are still jobs left on the queue
```
# njobs = ncg.apply('matcher.subpixel.subpixel_register_point',
# reapply = True,
# walltime="00:30:00",
# log_dir='/scratch/ladoramkershner/mars_quads/oxia_palus/subpix2_logs/',
# arraychunk=50,
# chunksize=20000) # maximum chunksize = 20,000
# print(njobs)
```
### subpix2: Write out Network
At this point you write out the network to begin work bundling the network!
```
cnet = 'reiner_gamma_morning_ns7_ew5_t121x61_t221x81.net'
ncg.to_isis(os.path.join(output_directory,cnet))
```
| github_jupyter |
# Using PowerShell with Intersight
If you are familiar or proficient with PowerShell or PowerShell Core, you can take advantage of Intersight modules available for download from the [PowerShell Gallery](https://www.powershellgallery.com/). Enter the word `Intersight` in `Search PowerShell packages:` and click the resulting link produced by the search named `Intersight.PowerShell`.

Notice the `Install Module -Name Intersight.PowerShell` command. That's the command used to install the module in your PowerShell environment.
> Be sure you are in PowerShell itself before attempting to install the `Intersight.PowerShell` module. In PowerShell Core, the command to start PowerShell is `pwsh`
```
pwsh
Install-Module -Name Intersight.PowerShell
```
## Working with the PowerShell Intersight module
Now that Intersight PowerShell module is installed, let's use it to connect to our Intersight environment. But first, you'll need credentials, such as your private key, from your Intersight account.
### Authenticate the user
The private key is available in your Intersight account. Copy/paste the values and add them to a blank file named `SecretKey.txt` and make it available in the same directory as this PowerShell file. Also, get a hold of your API key and copy/paste the value into the `api_key_id` variable below.
> The values below are ficticious and only serve as an example so they won't work in the real world. Be sure to enter the values for your account in the `ApiKeyId` variable below as well as the secret key in the `SecretKey.txt` otherwise the following steps will not work.
```
$onprem = @{
BasePath = "https://intersight.com"
ApiKeyId = "omitted on purpose add yours here"
ApiKeyFilePath = Resolve-Path('/Users/delgadm/Documents/intersight/intersight-powershell/SecretKey.txt')
HttpSingingHeader = @("(request-target)", "Host", "Date", "Digest")
}
```
### See available commands
Now that you are authenticated, let's explore the list of available commands with the command `Get-Command -Module Intersight.Powershell`. There are many commands and to see what each one does along with any dependencies, visit [CiscoDevNet/intersight-powershell in the docs directory](https://github.com/CiscoDevNet/intersight-powershell/tree/master/docs) on GitHub.
```
Get-Command -Module Intersight.Powershell
```
### Get a list of physical compute inventory
One simple yet useful example is that of retreiving physical inventory claimed in Intersight. Earlier we assigned a hash with key, value pairs containing the authentication credentials required for each PowerShell session.
#### Set Intersight configuration
To set the Intersight configuration, use the command `Set-IntersightConfiguration @onprem` where `@onprem` contains the hash set 2 steps earlier.
```
Set-IntersightConfiguration @onprem
```
#### Get Intersight configuration
You can inspect the configuration settings after they are set with the `Set-IntersightConfiguration` command with `Get-IntersightConfiguration`.
#### Getting inventory of physical compute devices claimed in Intersight
The command `Get-IntersightComputePhysicalSummary` returns all the physical compute devices claimed in Intersight along with details about each one.
```
Get-IntersightComputePhysicalSummary
Get-IntersightComputePhysicalSummary
```
#### Using help to learn more about a command
There are a number of parameters you can pass to the command but for simplicity we run the command without parameters and inspect the output. As mentioned earlier, the documentation avaialble on GitHub provides a description of each command but you can also get information with the `help Get-IntersightComputePhysicalSummary` command.
```
help Get-IntersightComputePhysicalSummary
```
#### Getting the number of physical compute devices claimed in Intersight
One way to get a list of physical devices claimed in Intersight is to add `-InlineCount allpages` as a parameter to the `Get-IntersightComputePhysicalSummary` command.
```
Get-IntersightComputePhysicalSummary -InlineCount allpages
```
### Final result - get a list of of physical compute devices claimed in Intersight
Alas, we reach the goal of this exercise which is to display each physical device claimed in Intersight along with details such as the available memory, serial number, and its managed mode.
> Intersight Managed Mode (IMM) is a new architecture that manages the UCS Fabric Interconnected systems through a Redfish-based standard model. If you are familiar with the UCS blades, it means the Fabric Interconnect is fully managed by Intersight. Instead of having the familiar UCSM (UCS Manager) interface available directly from the Fabric Interconnect, the interface and all of the Fabric Interconnect operations are managed by Intersight.
```
Get-IntersightComputePhysicalSummary | select ChassisId,Serial,ManagementMode,Model,AvailableMemory,CpuCapacity
```
| github_jupyter |
# Best Options to Invest
## 1. Exploratory Data Analysis
### 1.1. Location Data
```
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
from scipy.spatial.distance import cdist
from sklearn import preprocessing
from sklearn.cluster import KMeans
import folium
import requests
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib import pyplot as plt
import seaborn as sns
# downloading the data in csv format
! wget -q -O 'ca_counties_location.csv' https://data.edd.ca.gov/api/views/bpwh-bcb3/rows.csv?accessType=DOWNLOAD
ca_counties_df = pd.read_csv('ca_counties_location.csv')
ca_counties_df.head()
# dropping the unnecessary column
ca_counties = ca_counties_df.drop('the_geom', axis=1)
# renaming a coulumn
ca_counties.rename(columns={'Name': 'County'}, inplace=True)
ca_counties.head()
ca_counties.shape
```
As expected, there are 58 rows in the counties' location dataframe.
A Map of the counties of California.
```
# generating a map of California
map_ca = folium.Map(location=[34.36159, -118.21698], zoom_start=6)
# add markers to map
for lat, lng, county in zip(ca_counties['Latitude'], ca_counties['Longitude'], ca_counties['County']):
label = '{}'.format(county)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat, lng],
radius=5,
popup=label,
color='#350091',
fill=True,
fill_color='#5e03fc',
fill_opacity=0.7,
parse_html=False).add_to(map_ca)
map_ca
```
### 1.2. Population Data
```
! wget -q -O 'ca_counties_population.csv' https://www.dropbox.com/s/rdi7kebqo9dxq5r/ca_population.csv?dl=1
population_df = pd.read_csv('ca_counties_population.csv')
population_df.head()
population_df['County'] = population_df['County'].apply(lambda x: x.replace(" County, California", ""))
population_df['County'] = population_df['County'].apply(lambda x: x.replace(".", ""))
population_df.dtypes
population_df.head()
```
A graph of Population vs. The Counties of California
```
figure, ax = plt.subplots(figsize=(20,10))
pop_bplot = sns.barplot(x='County', y='Population', ax=ax, data=population_df)
pop_bplot.set_xticklabels(population_df['County'], rotation=90)
pop_bplot.set_title('Population of Counties')
pop_bplot
```
We can see, there is a high variance in data, and we need to be aware of this.
```
var_in_pop = np.var(population_df['Population'])
var_in_pop
```
### 1.3. Real GDP Data
#### 1.3.1. Obtaining Data
```
! wget -q -O 'ca_counties_gdp.csv' https://www.dropbox.com/s/hlbrcmoksxshc6k/ca_real_gdp.csv?dl=1
gdp_df = pd.read_csv('ca_counties_gdp.csv')
gdp_df.rename(columns={'county': 'County', 'real_gdp': 'Real_GDP'}, inplace=True)
gdp_df.head()
print(gdp_df.shape)
```
Getting a sense of GDP of different counties by plotting graph.
```
figure, ax = plt.subplots(figsize=(20,10))
gdp_bplot = sns.barplot(x='County', y='Real_GDP', ax=ax, data=gdp_df)
gdp_bplot.set_xticklabels(gdp_df['County'], rotation=90)
gdp_bplot.set_title('GDP of Counties')
gdp_bplot
```
#### 1.3.2 Forming a unified dataframe with Location Data, GDP Data and Population Data
```
ca_counties_gdp_pop = population_df.merge(gdp_df, on='County')
print(ca_counties_gdp_pop.shape)
ca_counties_gdp_pop.head()
ca_counties_df = ca_counties_gdp_pop.merge(ca_counties, on='County')
ca_counties_df.head()
ca_counties_df.shape
```
#### 1.3.3. Correlation Between GDP and Economy of a County
I would like to argue that a county with higher economical prosperity is very likely to be a county with high population. Furthermore, a county with high population **or** high GDP is a good investment destination for a new eatery.
I will use the [Pearson Coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) for calculating the correlation.
```
# using the pearsonr method from SciPy Stats to calculate correlation index and the p-value
pearson_coef, pval = pearsonr(ca_counties_df['Population'], ca_counties_df['Real_GDP'])
print(f'Pearson Coefficient: {pearson_coef}')
print(f'p-value: {pval}')
```
The Pearson Coefficient is about $0.952$ which is very close to $+1$ and suggests a very strong correlation between counties' GDPs and Populations.
The p-value is $1.32 \times 10^{-30}$ which is extremely low and much much lesser than $0.05$ ( $<<< 0.05$).
So it is safe to conclude that conties with higher population tend to have a higher GDP and are better destinations for new business.
Visualizing the correlation between GDP and Population.
```
plt.figure(figsize=(20, 10))
plt.scatter(ca_counties_df['Population'], ca_counties_df['Real_GDP'])
plt.xlabel('Population')
plt.ylabel('Real GDP')
plt.title('Real GDP vs. Population')
plt.show()
# forming a dataframe consisting of gdp data and population data
plot_ca_df = ca_counties_df[['Real_GDP', 'Population']]
# scaling data
min_max_scaler = preprocessing.MinMaxScaler()
x = plot_ca_df.values
x_scaled = min_max_scaler.fit_transform(x)
plot_ca_df_scaled = pd.DataFrame(x_scaled)
plot_ca_df_scaled.columns = ['scaled_gdp', 'scaled_population']
# plotting data
plt.figure(figsize=(20,10))
gdp_plot = plt.plot(ca_counties_df['County'], plot_ca_df_scaled['scaled_gdp'], 'd-')
population_plot = plt.plot(ca_counties_df['County'], plot_ca_df_scaled['scaled_population'], 'd-')
plt.legend(['Scaled GDP', 'Scaled Population'])
plt.xlabel('County')
plt.ylabel('GDP and Population (both scaled)')
plt.title('GDP (scaled) and Population (scaled) of Counties')
plt.xticks(rotation=90)
plt.show()
```
### 1.4. Foursquare Data
Client ID and Client Secret has been stored in two variables `client_id` and `client_secret`. The code cell has been deleted for obvious security reasons.
```
CLIENT_ID = client_id
CLIENT_SECRET = client_secret
VERSION = '20200101'
```
Now, I shall call the Foursquare API to provide an outlook of the data.
```
# choosing the Los Angeles county of California and showing its venues
ca_counties.loc[52, 'County']
# retrieving information about the county from dataframe
county_name = ca_counties.loc[52, 'County']
county_lat = ca_counties.loc[52, 'Latitude']
county_long = ca_counties.loc[52, 'Longitude']
print(f'The {county_name} county\'s latitude is {county_lat} and longitude is {county_long}')
```
Let's see the eateries listed in Foursquare in Los Angeles.
Now let's retrieve the list of venues which are in the vicinity of the center of the county. A list of venues within 10 km will be retrieved.
```
# getting venues from Foursquare API
RADIUS = 10000
category_id = '4d4b7105d754a06374d81259' # category id for food as provided in Foursquare API documentation
url = 'https://api.foursquare.com/v2/venues/search?&client_id={}&client_secret={}&v={}&ll={},{}&categoryId={}&radius={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
county_lat,
county_long,
category_id,
RADIUS,
)
url
results = requests.get(url).json()
results
venues = results['response']['venues']
venues
nearby_venues = pd.json_normalize(venues)
nearby_venues.head(3)
# generating a map of Los Angeles
map_la = folium.Map(location=[34.36159, -118.21698], zoom_start=10)
# add markers to map
for lat, lng, county in zip(nearby_venues['location.lat'], nearby_venues['location.lng'], nearby_venues['name']):
label = '{}'.format(county)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat, lng],
radius=5,
popup=label,
color='#350091',
fill=True,
fill_color='#5e03fc',
fill_opacity=0.7,
parse_html=False).add_to(map_la)
map_la
nearby_venues.shape
# defining function to extract category from the row 'categories' in the nearby_venues dataframe
def get_category(entry):
return entry[0]['name']
# forming a clean dataframe
# keeping only relevant columns and truncating the rest
filtered_columns = ['name', 'categories', 'location.lat', 'location.lng']
nearby_venues = nearby_venues.loc[:, filtered_columns]
# getting relevant information from categories
nearby_venues['categories'] = nearby_venues['categories'].apply(get_category)
# renaming columns
nearby_venues.rename(columns={
'categories': 'category',
'location.lat': 'latitude',
'location.lng': 'longitude'
}, inplace=True)
nearby_venues.head()
```
Now, a function will be built for a repeating the same process for all the counties in California.
```
def get_nearby_venues(names, latitudes, longitudes, gdps, populations, radius=10000):
venues_list = []
category_id = '4d4b7105d754a06374d81259'
for name, lat, lng, gdp, pop in zip(names, latitudes, longitudes, gdps, populations):
# print(name + ' county\'s venues are being added to the list.') TESTD OK
# form the URL for API call
url = 'https://api.foursquare.com/v2/venues/search?&client_id={}&client_secret={}&v={}&ll={},{}&categoryId={}&radius=10000&limit=50'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
category_id)
# print(url) #TESTED OK
# call the API, store the relevant result
results = requests.get(url).json()
results = results['response']['venues']
# print(results) # TESTED OK
for row in results:
if (results):
venues_list.append([(name,
lat,
lng,
gdp,
pop,
row['name'],
row['categories'][0]['name'],
row['location']['lat'],
row['location']['lng'])])
else:
venues_list.append([(name,
lat,
lng,
gdp,
pop,
0,
0,
0,
0)])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['county',
'county_latitude',
'county_longitude',
'county_gdp',
'county_pop',
'name',
'category',
'venue_latitude',
'venue_longitude']
return(nearby_venues)
california_venues = get_nearby_venues(ca_counties_df['County'], latitudes=ca_counties_df['Latitude'], longitudes=ca_counties_df['Longitude'], gdps=ca_counties_df['Real_GDP'], populations=ca_counties_df['Population'])
california_venues['county_gdp'] = california_venues['county_gdp'].astype('float')
california_venues['county_pop'] = california_venues['county_pop'].astype('float')
california_venues.dtypes
california_venues.head()
california_venues.shape
```
There are $1329$ unique venues in California City.
## 2. Analysing California's Eateries
### 2.1 Number of Eateries in Each County
```
california_venues.groupby('county').count()
california_venues.groupby('county').count().shape
```
There are 45 rows for which the Foursquare API has returned venues.
```
# putting county name and total number of eateries in one dataframe
county_no_df = pd.DataFrame(california_venues.groupby('county').count())
# county_no_df = county_no_df[['county', 'name']]
# county_no_df.columns = ['county'] + county_no_df.columns
county_no_df.head()
venues_in_each_county = (california_venues.groupby('county').agg({'name': 'count'})).reset_index()
venues_in_each_county.rename(columns={'name': 'no_of_venues'}, inplace=True)
venues_in_each_county.head()
```
Plot of number of venues in each county of California
```
figure, ax = plt.subplots(figsize=(20,10))
eatery_bplot = sns.barplot(x='county', y='no_of_venues', ax=ax, data=venues_in_each_county)
eatery_bplot.set_xticklabels(gdp_df['County'], rotation=90)
eatery_bplot.set_title('Number of Eateries in Each County')
eatery_bplot
print('There are {} uniques categories.'.format(len(california_venues['category'].unique())))
```
### 2.2. Analysing the Data
```
# using one hot encoding
california_onehot = pd.get_dummies(california_venues[['category']], prefix="", prefix_sep="")
# adding the columns- county name, real gdp and population to the dataframe with the dummy indices
california_onehot['county'] = california_venues['county']
california_onehot['county_gdp'] = california_venues['county_gdp']
california_onehot['county_pop'] = california_venues['county_pop']
# making the 'county', 'county_gdp', 'county_pop' column as the first coulumn of the dataframe
for i in range(3):
fixed_columns = [california_onehot.columns[-1]] + list(california_onehot.columns[:-1])
california_onehot = california_onehot[fixed_columns]
california_onehot.head()
california_onehot.shape
```
Creating a dataframe with the mean of frequencies of each categories for every county.
```
california_grouped = california_onehot.groupby('county').mean().reset_index()
california_grouped
```
We see that this dataframe has only 45 rows. We can say that, there are no listed eateries in the other 5 counties.
Let's see the top five categories of eateries for each county.
```
num_top_venues = 5
for county in california_grouped['county']:
print("----"+county+"----")
temp = california_grouped[california_grouped['county'] == county].T.reset_index()
temp.columns = ['venue','freq']
temp = temp.iloc[3:]
temp['freq'] = temp['freq'].astype(float)
temp = temp.round({'freq': 4})
print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues))
print('\n')
```
We will form a dataframe from this data.
```
def get_common_venues(row, num_top_venues):
row_categories = row.iloc[3:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
california_grouped.shape
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['county']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
counties_venues_sorted = pd.DataFrame(columns=columns)
counties_venues_sorted['county'] = california_grouped['county']
for ind in np.arange(california_grouped.shape[0]):
counties_venues_sorted.iloc[ind, 1:] = get_common_venues(california_grouped.iloc[ind, :], num_top_venues)
counties_venues_sorted.head()
```
## 3. Applying Machine Learning
### 3.1. Choosing Features
Forming a dataframe with number of eateries, population, and Real GDP of each county.
```
gdp_df.rename(columns={'County': 'county', 'Real_GDP': 'real_gdp'}, inplace=True)
ca_eateries_gdp = venues_in_each_county.merge(gdp_df, on='county')
ca_eateries_gdp_pop = ca_eateries_gdp.merge(population_df, left_on='county', right_on='County')
ca_eateries_gdp_pop.drop(columns='County', inplace=True)
ca_eateries_gdp_pop.rename(columns={'Population': 'population'}, inplace=True)
ca_eateries_gdp_pop.head()
# ca_counties_df.drop(columns=['Latitude', 'Longitude'], inplace=True)
ca_counties_df.head()
```
As there are data which range differently, we need to normalize the data.
```
min_max_scaler = preprocessing.MinMaxScaler()
ca_counties_df_clustering = ca_counties_df
needed_columns = ['Real_GDP', 'Population']
x = ca_counties_df_clustering[needed_columns].values
x_scaled = min_max_scaler.fit_transform(x)
ca_counties_df_clustering_scaled = pd.DataFrame(x_scaled)
ca_counties_df_clustering_scaled.head()
```
### 3.2. Determining the Best $k$
As I want to apply KMeans Clustering algorithm to this data, I have to determine the best $k$ for this task. And I will use the *elbow method* for this. I will plot the inertia and distortion for the dataset for different values of $k$. And will accept the $k$ for which noise and distortion are the lowest.
```
california_grouped_scaled_clustering = ca_counties_df_clustering_scaled
distortions = []
inertias = []
mapping1 = {}
mapping2 = {}
k_values = range(2,9)
for k in k_values:
kmeans_model_test = KMeans(n_clusters=k).fit(california_grouped_scaled_clustering)
kmeans_model_test.fit(california_grouped_scaled_clustering)
distortions.append(sum(np.min(cdist(california_grouped_scaled_clustering, kmeans_model_test.cluster_centers_,
'euclidean'),axis=1)) / california_grouped_scaled_clustering.shape[0])
inertias.append(kmeans_model_test.inertia_)
mapping1[k] = sum(np.min(cdist(california_grouped_scaled_clustering, kmeans_model_test.cluster_centers_,
'euclidean'),axis=1)) / california_grouped_scaled_clustering.shape[0]
mapping2[k] = kmeans_model_test.inertia_
```
Plot of distortion vs. different k values
```
plt.plot(k_values, distortions, 'bd-')
plt.xlabel('Values of K')
plt.ylabel('Distortion')
plt.title('Distortion vs. Values of K')
plt.show()
```
Plot of inertia vs. different k values
```
plt.plot(k_values, inertias, 'bd-')
plt.xlabel('Values of K')
plt.ylabel('Inertia')
plt.title('Inertia vs. Values of K')
plt.show()
```
We can conclude that $4$ clusters are best suited for this dataset. Above that, data will be overfit.
### 3.3. Applying kMeans Clustering Algorithm
```
from sklearn.cluster import KMeans
# set number of clusters
kclusters = 4
# california_grouped_scaled_clustering = ca_counties_df_clustering_scaled
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(ca_counties_df_clustering_scaled)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:10]
ca_counties_df.rename(columns={'County': 'county',
'Population': 'population',
'Real_GDP': 'real_gdp',
'Latitude': 'latitude',
'Longitude': 'longitude'}, inplace=True)
ca_counties_df.head()
print(ca_counties_df.shape)
print(counties_venues_sorted.shape)
print(len(kmeans.labels_))
# relevant_counties = [item for item in counties_venues_sorted['county'].values]
# ca_counties_df_relevant = ca_counties_df[relevant_counties]
# ca_counties_df_relevant
# relevant_counties
counties_venues_sorted.shape
ca_counties_df.insert(0, 'Cluster Labels', kmeans.labels_)
california_merged = ca_counties_df
california_merged = california_merged.merge(counties_venues_sorted, left_on='county', right_on='county', how='right')
california_merged.head() # check the last columns!
california_merged = california_merged.dropna()
california_merged.shape
california_merged.head()
# create map
map_clusters = folium.Map(location=[36.7783, -119.4179], zoom_start=5.9)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(california_merged['latitude'], california_merged['longitude'], california_merged['county'], california_merged['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[int(cluster)-1],
fill=True,
fill_color=rainbow[int(cluster)-1],
fill_opacity=0.9).add_to(map_clusters)
map_clusters
```
### 3.3. Analyzing The Clusters
#### Cluster 1:
```
cluster_1 = pd.DataFrame(california_merged.loc[california_merged['Cluster Labels'] == 0, california_merged.columns[[0, 1, 2, 3] + list(range(6, california_merged.shape[1]))]])
cluster_1
```
#### Cluster 2:
```
cluster_2 = pd.DataFrame(california_merged.loc[california_merged['Cluster Labels'] == 1, california_merged.columns[[0, 1, 2, 3] + list(range(6, california_merged.shape[1]))]])
cluster_2
```
#### Cluster 3:
```
cluster_3 = pd.DataFrame(california_merged.loc[california_merged['Cluster Labels'] == 2, california_merged.columns[[0, 1, 2, 3] + list(range(6, california_merged.shape[1]))]])
cluster_3
```
#### Cluster 4:
```
# california_merged.loc[california_merged['Cluster Labels'] == 3, california_merged.columns[[0, 1, 2, 3] + list(range(6, california_merged.shape[1]))]]
cluster_4 = pd.DataFrame(california_merged.loc[california_merged['Cluster Labels'] == 3, california_merged.columns[[0, 1, 2, 3] + list(range(6, california_merged.shape[1]))]])
cluster_4
```
## 4. Recommendations Outline
In clusters 2, 3 we have counties with high population and high GDP. In these counties, it will be profitable to invest in any eatery while it is advisable to invest in a eatery which is not in top 3 venues.
In cluster 4, population and GDP of counties are higher than those of the counties in cluster 1, but lower than those of counties in 2 or 3. Investment in these counties is preferred after county in cluster 2 and cluster 3, in that order. Investment should be done in uncommon eateries so that they face lesser competition.
Cluster 1 is dominated by lower population counties. Investment in these counties, should be preferred after investments in counties in clusters 2 or 3 or cluster 4. Investment in most common eateries is not advised at all. Investment in these counties is least advised.
### 4.1. Cluster 1:
```
cluster_1.loc[:, cluster_1.columns[[0, 1] + list(range(6, cluster_1.shape[1]))]]
```
### 4.1. Cluster 2:
```
cluster_2.loc[:, cluster_2.columns[[0, 1] + list(range(6, cluster_2.shape[1]))]]
```
### 4.3. Cluster 3:
```
cluster_3.loc[:, cluster_3.columns[[0, 1] + list(range(6, cluster_3.shape[1]))]]
```
### 4.4. Cluster 4:
```
cluster_4.loc[:, cluster_4.columns[[0, 1] + list(range(6, cluster_4.shape[1]))]]
```
| github_jupyter |
**This notebook is an exercise in the [Introduction to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/dansbecker/model-validation).**
---
## Recap
You've built a model. In this exercise you will test how good your model is.
Run the cell below to set up your coding environment where the previous exercise left off.
```
# Code you have previously used to load data
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
home_data = pd.read_csv(iowa_file_path)
y = home_data.SalePrice
feature_columns = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[feature_columns]
# Specify Model
iowa_model = DecisionTreeRegressor()
# Fit Model
iowa_model.fit(X, y)
print("First in-sample predictions:", iowa_model.predict(X.head()))
print("Actual target values for those homes:", y.head().tolist())
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex4 import *
print("Setup Complete")
```
# Exercises
## Step 1: Split Your Data
Use the `train_test_split` function to split up your data.
Give it the argument `random_state=1` so the `check` functions know what to expect when verifying your code.
Recall, your features are loaded in the DataFrame **X** and your target is loaded in **y**.
```
# Import the train_test_split function and uncomment
from sklearn.model_selection import train_test_split
# fill in and uncomment
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Check your answer
step_1.check()
# The lines below will show you a hint or the solution.
# step_1.hint()
# step_1.solution()
```
## Step 2: Specify and Fit the Model
Create a `DecisionTreeRegressor` model and fit it to the relevant data.
Set `random_state` to 1 again when creating the model.
```
# You imported DecisionTreeRegressor in your last exercise
# and that code has been copied to the setup code above. So, no need to
# import it again
# Specify the model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit iowa_model with the training data.
iowa_model.fit(train_X, train_y)
# Check your answer
step_2.check()
# step_2.hint()
# step_2.solution()
```
## Step 3: Make Predictions with Validation data
```
# Predict with all validation observations
val_predictions = iowa_model.predict(val_X)
# Check your answer
step_3.check()
# step_3.hint()
# step_3.solution()
```
Inspect your predictions and actual values from validation data.
```
# print the top few validation predictions
print(val_predictions)
# print the top few actual prices from validation data
print(val_X)
```
What do you notice that is different from what you saw with in-sample predictions (which are printed after the top code cell in this page).
Do you remember why validation predictions differ from in-sample (or training) predictions? This is an important idea from the last lesson.
## Step 4: Calculate the Mean Absolute Error in Validation Data
```
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_predictions, val_y)
# uncomment following line to see the validation_mae
print(val_mae)
# Check your answer
step_4.check()
# step_4.hint()
# step_4.solution()
```
Is that MAE good? There isn't a general rule for what values are good that applies across applications. But you'll see how to use (and improve) this number in the next step.
# Keep Going
You are ready for **[Underfitting and Overfitting](https://www.kaggle.com/dansbecker/underfitting-and-overfitting).**
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161285) to chat with other Learners.*
| github_jupyter |
```
!ls data/*/*.csv
import pandas as pd
df = pd.read_csv('data/McGill-Billboard/billboard-2.0-index.csv')
df.head()
# check date range of songs
years = pd.DatetimeIndex(df.chart_date).year
print(f'{years.min()}-{years.max()}')
# for our application we're only interested in ID, title, and artist
df_songs = df[df.title.notnull() & df.artist.notnull()][['id', 'title', 'artist']]
df_songs.head()
# check duplicates
dups = df_songs[df_songs.duplicated(subset=['title','artist'], keep=False)]
dups.head()
```
#### Check available features
```
base_dir = 'data/McGill-Billboard'
sample_dir = f'{base_dir}/0003'
!ls {sample_dir} -l
!cat {sample_dir}/tuning.csv
!cat {sample_dir}/majmin7.lab | head -n 5
!cat {sample_dir}/bothchroma.csv | head -n 5
# check if duplicates are exact match
NUM_CHECKS = 5
for ix in dups.index[:NUM_CHECKS]:
row = dups.loc[ix]
ids = df_songs[(df_songs.title == row.title)
& (df_songs.artist == row.artist)].id.values
ref_id = ids[0]
dirname = f'{base_dir}/{ref_id:04d}'
with open(f'{dirname}/tuning.csv', 'r') as f:
ref_tuning = f.read()
with open(f'{dirname}/majmin7.lab', 'r') as f:
ref_chords = f.read()
with open(f'{dirname}/bothchroma.csv', 'r') as f:
ref_chroma = f.read()
for _id in ids[1:]:
dirname = f'{base_dir}/{_id:04d}'
with open(f'{dirname}/tuning.csv', 'r') as f:
tuning = f.read()
with open(f'{dirname}/majmin7.lab', 'r') as f:
chords = f.read()
with open(f'{dirname}/bothchroma.csv', 'r') as f:
chroma = f.read()
print(f'{_id:04d} to {ref_id:04d} [tuning]: {ref_tuning == tuning}')
print(f'{_id:04d} to {ref_id:04d} [chords]: {ref_chords == chords}')
print(f'{_id:04d} to {ref_id:04d} [chroma]: {ref_chroma == chroma}')
print()
```
This builds the confidence that we can just remove duplicates.
```
# we remove duplicated songs (except first occurrence)
unique_filter = ~df_songs.duplicated(subset=['title','artist'], keep='first')
df_songs_unique = df_songs[unique_filter]
print(len(df_songs_unique))
df_songs_unique.head()
# check again
any(df_songs_unique.duplicated(subset=['title', 'artist']))
df_songs_unique.to_csv(f'{base_dir}/billboard-2.0-unique.csv', index=False)
```
#### Pick songs for feature analysis
```
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-unique.csv'
df_songs_unique = pd.read_csv(f'{base_dir}/{data_index}')
df_songs_unique.head()
df_songs_unique.tail()
df_songs_unique[df_songs_unique.title.str.contains("kiss", case=False)]
df_songs_unique.artist.sample(n=10)
# after going around the list, picked the following:
# - 1289 - There She Goes - The La's
# - 736 - Do I Do - Stevie Wonder
# - 637 - Human Nature - Michael Jackson
# - 270 - In My Room - The Beach Boys
# - 18 - Kiss On My List - Daryl Hall & John Oates
picked_ids = [1289, 736, 637, 270, 18]
```
We download these songs to analyse if we can extract chroma features closely similar to those provided by dataset.
```
# check duration
for _id in picked_ids:
tuning = f'{base_dir}/{_id:04d}/tuning.csv'
title = df_songs_unique[df_songs_unique.id == _id].iloc[0].title
contents = pd.read_csv(tuning, header=None)
duration = contents[2].iloc[0]
print(f'{title}: {duration:.2f}s, {int(duration/60)}:{int(duration%60.0):02d}')
# convert to wav
from glob import glob
import codecs
for _id in picked_ids:
mp3_pattern = f'data/audio/{_id:04d}/*.mp3'
mp3_list = glob(mp3_pattern) + glob(mp3_pattern.replace('mp3','m4a'))
if len(mp3_list) == 0:
print(f'{_id}: Missing MP3')
continue
mp3_fn = f'"{mp3_list[0]}"'
audio_fn = f'data/audio/{_id:04d}/audio.wav'
print(mp3_fn, audio_fn)
!ffmpeg -y -i {mp3_fn} {audio_fn}
```
#### Chroma feature analysis
```
def load_billboard_chroma(_id):
""" Load bothchroma(bass-treble) vectors from Billboard dataset """
fn = f'{base_dir}/{_id:04d}/bothchroma.csv'
contents = pd.read_csv(fn, header=None)
# we only get 3rd column onwards
# (first column empty, 2nd column time tick)
bothchroma = contents[contents.columns[2:]].values
return bothchroma
# chroma_samp = load_billboard_chroma(1289)
# chroma_samp.shape
from scipy.signal import resample
import librosa
import vamp
import torch
from torchaudio.transforms import Resample
_SAMPLE_RATE = 44100
def generate_chroma(_id, resampler='scipy', params={}):
""" Generate chroma from raw audio """
audio_fn = f'data/audio/{_id:04d}/audio.wav'
X, fs = librosa.load(audio_fn, sr=None, mono=True)
if fs != _SAMPLE_RATE:
if resampler == 'scipy':
X = resample(X, num=int(len(X)*_SAMPLE_RATE/fs))
elif resampler == 'torch':
resampling_transform = Resample(orig_freq=fs,
new_freq=_SAMPLE_RATE)
X = resampling_transform(torch.Tensor([X])).squeeze().numpy()
else:
raise Exception('unsupported resampler')
out = vamp.collect(X, _SAMPLE_RATE, 'nnls-chroma:nnls-chroma',
output='bothchroma', parameters=params)
chroma = out['matrix'][1]
return chroma
# chroma_gen = generate_chroma(1289)
# chroma_gen.shape
# chroma_gen = generate_chroma(1289, resampler='torch')
# chroma_gen.shape
picked_ids.sort()
picked_ids
def normalize_chroma(chroma):
return (chroma / (chroma.sum(axis=1)[:, np.newaxis] + np.finfo(float).eps))
from librosa.sequence import dtw
def compare_chroma(picked_ids, normalize=True):
print('**Feature comparison scores: (lower, better)**')
print(picked_ids)
for ix, _id in enumerate(picked_ids):
#_, fs = librosa.load(audio_fn, sr=None, mono=True)
#print(f'{df_songs_unique[df_songs_unique.id == _id].iloc[0].title} ({fs} Hz)')
chroma_samp = load_billboard_chroma(_id)
if normalize:
chroma_samp = normalize_chroma(chroma_samp)
# if ix == 0:
# ref_id = picked_ids[-1]
# else:
# ref_id = picked_ids[ix-1]
print(f'{_id} : ', end='')
for ref_id in picked_ids:
chroma_gen = generate_chroma(ref_id, params={'rollon': 1.0})
if normalize:
chroma_gen = normalize_chroma(chroma_gen)
D, wp = dtw(chroma_samp.T, chroma_gen.T, subseq=True)
#print(f'- baseline {_id} vs. {ref_id}: {D[-1,-1]}, ({(D[-1,-1]/len(wp)):.4f})')
#print(f'{D[-1,-1]:.2f}/{len(wp)} ({(D[-1,-1]/len(wp)):.4f}) | ', end='')
print(f'{(D[-1,-1]/len(wp)):.4f} | ', end='')
# chroma_gen = generate_chroma(_id)
# D, wp = dtw(chroma_samp.T, chroma_gen.T, subseq=True)
# print(f'- default params: {D[-1,-1]}, ({(D[-1,-1]/len(wp)):.4f})')
# chroma_gen = generate_chroma(_id, params={'rollon': 1.0})
# D, wp = dtw(chroma_samp.T, chroma_samp.T, subseq=True)
# print(f'- rollon=1.0: {D[-1,-1]}, ({(D[-1,-1]/len(wp)):.4f})')
# chroma_gen = generate_chroma(_id, resampler='torch')
# D, wp = dtw(chroma_samp.T, chroma_gen.T, subseq=True)
# print(f'- (torch) default params: {D[-1,-1]}, ({(D[-1,-1]/len(wp)):.4f})')
# chroma_gen = generate_chroma(_id, resampler='torch', params={'rollon': 1.0})
# D, wp = dtw(chroma_samp.T, chroma_gen.T, subseq=True)
# print(f'- (torch) rollon=1.0: {D[-1,-1]}, ({(D[-1,-1]/len(wp)):.4f})')
print('')
print('------------')
compare_chroma(picked_ids, normalize=True)
print('------------')
compare_chroma(picked_ids, normalize=False)
```
#### Prep for loading the data
```
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-unique.csv'
df_songs = pd.read_csv(f'{base_dir}/{data_index}')
df_songs.head()
len(df_songs)
!ls {base_dir}/1289
!tail {base_dir}/1289/bothchroma.csv
def get_chroma_matrix(_id, return_timestamps=False):
""" Load bothchroma(bass-treble) vectors from Billboard dataset """
fn = f'{base_dir}/{_id:04d}/bothchroma.csv'
contents = pd.read_csv(fn, header=None)
# we only get 3rd column onwards
# (first column empty, 2nd column time tick)
bothchroma = contents[contents.columns[2:]].values
if not return_timestamps:
return bothchroma
start_times = contents[contents.columns[1]].values
step_size = start_times[1]
end_times = np.append(start_times[1:], [start_times[-1]+step_size], axis=0)
timestamps = np.vstack((start_times, end_times)).T
return timestamps, bothchroma
get_chroma_matrix(1289).shape # num frames, chroma feature size
ts, cmatrix = get_chroma_matrix(1289, return_timestamps=True)
print(ts.shape, cmatrix.shape)
```
#### Feature description
From reading bothchroma and chroma feature comparison:
- sample rate: **44100 Hz**
- step size for **each** chroma vector: 0.046439909s or **~50ms**
- equivalent to 0.046439909 * 44100 = **~2048 samples**
- 64 chroma vectors ~= 3s
- 128 chroma vectors ~= 6s
```
lab_fn = f'{base_dir}/1289/majmin.lab'
!head {lab_fn}
import mir_eval
def get_chord_labels(_id, label_type='majmin'):
""" Load chord labels from .LAB files
label_type: majmin, majmin7, majmininv, majmin7inv, full
"""
lab_fn = f'{base_dir}/{_id:04d}/{label_type}.lab'
# any line starting w/ \n is ignored e.g. blank lines
timestamps, chord_labels = mir_eval.io.load_labeled_intervals(lab_fn, comment='\n')
return timestamps, chord_labels
timestamps, chord_labels = get_chord_labels(1289)
for ix, (ts, lab) in enumerate(zip(timestamps, chord_labels)):
print(ts, lab)
if ix == 5:
break
```
Now we have to map EACH chroma vector to these labels.
Considerations:
* *Time steps of vectors does not match time steps in labels* - for first iteration, we can **ignore** chroma vectors spanning multiple labels / occuring at chord transitions
* *Numerical representation of chords* - `mir_eval.chords.encode()` converts chord labels to (semitone, quality (maj,min,etc.); for first iteration, we **limit to majmin and "squash" both semitone and quality in a single label**. We also **add 1** to the representation to shift -1 encoding for N (no chord) to 0
Before we perform actual encoding of labels, let's perform some checks first.
```
from tqdm import tqdm
majmin_chordset = set()
majmin7_chordset = set()
for _id in tqdm(df_songs.id):
_, chord_labels = get_chord_labels(_id, label_type='majmin')
majmin_chordset.update(chord_labels)
_, chord_labels = get_chord_labels(_id, label_type='majmin7')
majmin7_chordset.update(chord_labels)
temp = list(majmin_chordset)
temp.sort()
print(temp)
temp = list(majmin7_chordset)
temp.sort()
print(temp)
```
Looks good. We can interpret X and N as same class.
```
for _id in tqdm(df_songs.id):
ts, _ = get_chord_labels(_id)
# check if intervals are contiguous
assert(sum(ts[:-1, 1] - ts[1:, 0]) < 1e-9)
# check if time values are all positive
assert((ts >= 0.0).all())
```
Looks good. We now proceed to encoding the labels.
```
import numpy as np
# bitmaps of chord qualities
_MAJ_BITMAP = mir_eval.chord.quality_to_bitmap('maj')
_MIN_BITMAP = mir_eval.chord.quality_to_bitmap('min')
_MAJ7_BITMAP = mir_eval.chord.quality_to_bitmap('maj7')
_MIN7_BITMAP = mir_eval.chord.quality_to_bitmap('min7')
_NUM_SEMITONE = 12
def encode_chords_single_label(chord_labels):
""" Encode chord labels to a single label (semitone/root, quality in one) """
# third array is bass number, which we ignore
root_classes, quality_classes, _ = mir_eval.chord.encode_many(chord_labels)
root_classes += 1 # add 1 to shift No Chord (-1) to 0
min_chords_filt = np.all(quality_classes == _MIN_BITMAP, axis=1)
maj7_chords_filt = np.all(quality_classes == _MAJ7_BITMAP, axis=1)
min7_chords_filt = np.all(quality_classes == _MIN7_BITMAP, axis=1)
root_classes[min_chords_filt] += _NUM_SEMITONE
root_classes[maj7_chords_filt] += _NUM_SEMITONE*2
root_classes[min7_chords_filt] += _NUM_SEMITONE*3
return root_classes
chord_classes = encode_chords_single_label(get_chord_labels(1289)[1])
chord_classes[:5]
# similar check from last time; this time the encodings are checked
from tqdm import tqdm
majmin_chordset = set()
majmin7_chordset = set()
for _id in tqdm(df_songs.id):
_, chord_labels = get_chord_labels(_id, label_type='majmin')
majmin_chordset.update(encode_chords_single_label(chord_labels))
_, chord_labels = get_chord_labels(_id, label_type='majmin7')
majmin7_chordset.update(encode_chords_single_label(chord_labels))
temp = list(majmin_chordset)
temp.sort()
print(temp)
temp = list(majmin7_chordset)
temp.sort()
print(temp)
```
Looks great! We can proceed to matching the chroma vectors and these labels for model training.
```
import numpy as np
def get_chord_features_and_labels(_id):
""" Get chroma vectors and chord labels """
chroma_timestamps, chroma_vectors = get_chroma_matrix(_id, return_timestamps=True)
chord_timestamps, chord_labels_str = get_chord_labels(_id)
chord_labels = encode_chords_single_label(chord_labels_str)
assert(len(chroma_timestamps) == len(chroma_vectors))
assert(len(chord_timestamps) == len(chord_labels))
# label for each chroma vector
chromavec_labels = np.zeros(len(chroma_vectors)).astype(np.int)-1 # all -1's
st_ix = 0 # lower bound for updating labels
for i, (ts, chord_label) in enumerate(zip(chord_timestamps, chord_labels)):
# get indices of chroma timestamps within duration of current chord
in_cur_chord = (chroma_timestamps[st_ix:, 0] >= ts[0]) \
& (chroma_timestamps[st_ix:, 1] <= ts[1])
chromavec_labels[st_ix:][in_cur_chord] = chord_label
# update lower bound
in_cur_chord = in_cur_chord.astype(int)
transitions = in_cur_chord[1:] - in_cur_chord[:-1]
# we get index of first occurence of True->False transition
# False-True = -1
TtoF_ixs = np.where(transitions==-1)[0]
if len(TtoF_ixs) > 0:
st_ix += (TtoF_ixs[0] + 1) # +1 due to offset by diffing to get transitions
remove_ambiguous_mask = (chromavec_labels != -1)
return chroma_vectors[remove_ambiguous_mask], chromavec_labels[remove_ambiguous_mask]
chroma_vectors, chord_labels = get_chord_features_and_labels(1289)
chroma_vectors.shape, chord_labels.shape
chord_labels[:100]
```
Now we use the feature & label extraction function on all data. (Try if we can fit all of it into memory i.e. a single NumPy array).
```
len(df_songs), chroma_vectors.nbytes, chord_labels.nbytes
print(f'estimate memory size: {(len(df_songs)*(chroma_vectors.nbytes+chord_labels.nbytes)/(1024*1024)):.2f} MB')
from tqdm import tqdm
all_chroma_vectors = []
all_chord_labels = []
for _id in tqdm(df_songs.id):
chroma_vectors, chord_labels = get_chord_features_and_labels(_id)
if len(all_chroma_vectors) == 0:
all_chroma_vectors = chroma_vectors
all_chord_labels = chord_labels
else:
all_chroma_vectors = np.concatenate((all_chroma_vectors, chroma_vectors))
all_chord_labels = np.concatenate((all_chord_labels, chord_labels))
# check 569?
print(f'actual memory size: {((all_chroma_vectors.nbytes+all_chord_labels.nbytes)/(1024*1024)):.2f} MB')
np.save('data/01_all_chroma_vectors.npy', all_chroma_vectors)
np.save('data/01_all_chord_labels.npy', all_chord_labels)
all_chroma_vectors.shape, all_chord_labels.shape
```
With this data, we can proceed to first training iteration.
#### Feature check
```
import numpy as np
from scipy.signal import resample
import librosa
import vamp
_SAMPLE_RATE = 44100
def generate_chroma_from_wav(audio_fn, params={'rollon': 1.0}):
""" Generate chroma from raw audio """
X, fs = librosa.load(audio_fn, sr=None, mono=True)
if fs != _SAMPLE_RATE:
X = resample(X, num=int(len(X)*_SAMPLE_RATE/fs))
out = vamp.collect(X, _SAMPLE_RATE, 'nnls-chroma:nnls-chroma',
output='bothchroma', parameters=params)
chroma = out['matrix'][1]
return chroma
import matplotlib.pyplot as plt
%matplotlib inline
#notes = ['C','Db','D','Eb','E','F','Gb','G','Ab','A','Bb','B'] # this is WRONG!!!!
notes = ['A','Bb','B', 'C','Db','D','Eb','E','F','Gb','G','Ab']
cmx = generate_chroma_from_wav('data/freesound/c_major.wav')
print(cmx.shape)
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/a_major.wav')
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/e4.wav')
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/c.m4a')
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/b.m4a')
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/a_minor.wav')
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
```
Realizations from here:
- Chordino bothchroma is intepreted as: [0:12], [12:24] -> [A up to Ab], bass, treble
- Chordino output can have "incorrect" chroma values (perhaps due to tuning issues...) e.g. C maj interpreted as Dbmaj
- BUT, the quality of the chord is retained
- Also try to visualize each individual vector (instead of only mean/sum) to confirm
```
# try on a larger scale
all_chroma_vectors = np.load('data/01_all_chroma_vectors.npy')
all_chord_labels = np.load('data/01_all_chord_labels.npy')
all_chroma_vectors.shape, all_chord_labels.shape
filt = [all_chord_labels == 0] # no chord
plt.barh(range(24), np.mean(all_chroma_vectors[filt], axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
filt = [all_chord_labels == 1] # c maj
plt.barh(range(24), np.mean(all_chroma_vectors[filt], axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
filt = [all_chord_labels == 3] # d maj
plt.barh(range(24), np.mean(all_chroma_vectors[filt], axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
filt = [all_chord_labels == 13] # c min
plt.barh(range(24), np.mean(all_chroma_vectors[filt], axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
```
Here we can see that the SUM of the vectors for each class "activate" appropriately at the chord class's pitch classes.
```
# automated check
import mir_eval
def encode_to_chordino_chroma(label):
root, quality_map, _ = mir_eval.chord.encode(label)
root = (root+3)%12 # add 3 to map to chordino chroma
chord_bitmap = mir_eval.chord.rotate_bitmap_to_root(quality_map, root)
return root, chord_bitmap
encode_to_chordino_chroma('A:maj')
# from importlib import reload
# import dataloader
# reload(dataloader)
from dataloader import _CHROMA_FEAT_NAMES, _MAJMIN_CLASSES
for class_index, chord_name in enumerate(_MAJMIN_CLASSES):
if class_index == 0:
continue
root, chord_bitmap = encode_to_chordino_chroma(chord_name)
filt = (all_chord_labels == class_index)
vector_mean = np.mean(all_chroma_vectors[filt], axis=0)
vector_topk = set(np.argpartition(vector_mean[12:], -3)[-3:])
bitmap_topk = set(np.argpartition(chord_bitmap, -3)[-3:])
bass_match = (root == np.argmax(vector_mean[:12]))
chord_match = (vector_topk == bitmap_topk)
if (not bass_match) or (not chord_match):
print(f'{chord_name} - Bass match: {bass_match} - '
f'Chord match: {chord_match}')
# now check individual vectors
# N.B. add to lazycats
import numpy as np
def top_k_indices(arr, k, axis=1):
if axis != 1:
raise NotImplementedError
temp = np.array(arr) # copy
top_k_ixs = []
for _ in range(k): # no checking if k > (array length along axis of argmax)
max_ixs = np.argmax(temp, axis=axis)
top_k_ixs.append(max_ixs)
temp[(range(len(temp)), max_ixs)] = -np.inf # remove from next comparison
top_k_ixs = np.hstack([np.array([ixs]).T for ixs in top_k_ixs])
return top_k_ixs
from collections import Counter
for class_index, chord_name in enumerate(_MAJMIN_CLASSES):
if class_index == 0:
continue
root, chord_bitmap = encode_to_chordino_chroma(chord_name)
filt = (all_chord_labels == class_index)
vecs = all_chroma_vectors[filt]
bass_vecs = vecs[:, :12]
treble_vecs = vecs[:, 12:]
# remove all zeros for comparison
bass_vecs = bass_vecs[~np.all(bass_vecs <= 0.01, axis=1)]
treble_vecs = treble_vecs[~np.all(treble_vecs <= 0.01, axis=1)]
basses = np.argmax(bass_vecs, axis=1)
bass_match = np.mean(basses == root)*100
ext_bass_match = bass_match + np.mean(basses == (root-1)%12)*100
ext_bass_match += np.mean(basses == (root+1)%12)*100
# get only on treble
vec_ixs = top_k_indices(treble_vecs, 4, axis=1)
label_ixs = np.where(chord_bitmap)[0]
chord_match = np.mean(np.all(np.isin(vec_ixs[:, :3], label_ixs), axis=1))
ext_match = np.mean((np.sum(np.isin(vec_ixs, label_ixs), axis=1))==3) # ext chord
print(f'{chord_name} - Bass match: {bass_match:.2f} - Ext bass match: {ext_bass_match:.2f} - '
f'Chord match: {chord_match:.2f} - Ext match: {ext_match:.2f}')
#print(Counter(basses))
```
Either there is a lot of mislabels or our features aren't very rich.
#### Exploring tuning
```
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-unique.csv'
df_songs = pd.read_csv(f'{base_dir}/{data_index}')
df_songs.head()
def get_tuning(_id):
""" Load tuning data from Billboard dataset """
fn = f'{base_dir}/{_id:04d}/tuning.csv'
contents = pd.read_csv(fn, header=None)
return contents
get_tuning(1289)
from tqdm import tqdm
tunings_len = []
for _id in tqdm(df_songs.id):
tuning_data = get_tuning(_id)
tunings_len.append(len(tuning_data.loc[0]))
tuning_vals
from collections import Counter
Counter(tunings_len)
# ALL tunings file are 1 line, with 5 values
tuning_data
from scipy.signal import resample
import librosa
import vamp
_SAMPLE_RATE = 44100
def generate_tuning(_id, params={}):
""" Generate chroma from raw audio """
audio_fn = f'data/audio/{_id:04d}/audio.wav'
X, fs = librosa.load(audio_fn, sr=None, mono=True)
if fs != _SAMPLE_RATE:
X = resample(X, num=int(len(X)*_SAMPLE_RATE/fs))
out = vamp.collect(X, _SAMPLE_RATE, 'nnls-chroma:tuning',
output='tuning', parameters=params)
return out
generate_tuning(1289, {'rollon':1.0})
# https://github.com/librosa/librosa/blob/main/examples/adjust_tuning.py
def adjust_tuning(y, sr):
'''Load audio, estimate tuning, apply pitch correction, and save.'''
y_harm = librosa.effects.harmonic(y)
print('Estimating tuning ... ')
# Just track the pitches associated with high magnitude
tuning = librosa.estimate_tuning(y=y_harm, sr=sr)
print('{:+0.2f} cents'.format(100 * tuning))
print('Applying pitch-correction of {:+0.2f} cents'.format(-100 * tuning))
y_tuned = librosa.effects.pitch_shift(y, sr, -tuning)
return y_tuned
def generate_tuning_from_wav(audio_fn, params={}, correct_tuning=False):
""" Generate chroma from raw audio """
X, fs = librosa.load(audio_fn, sr=None, mono=True)
if fs != _SAMPLE_RATE:
X = resample(X, num=int(len(X)*_SAMPLE_RATE/fs))
if correct_tuning:
X = adjust_tuning(X, _SAMPLE_RATE)
out = vamp.collect(X, _SAMPLE_RATE, 'nnls-chroma:tuning',
output='tuning', parameters=params)
return out
generate_tuning_from_wav('data/freesound/c_major.wav', params={'rollon':1.0}, correct_tuning=True)
def generate_chroma_from_wav(audio_fn, params={'rollon': 1.0}, correct_tuning=False):
""" Generate chroma from raw audio """
X, fs = librosa.load(audio_fn, sr=None, mono=True)
if fs != _SAMPLE_RATE:
X = resample(X, num=int(len(X)*_SAMPLE_RATE/fs))
if correct_tuning:
X = adjust_tuning(X, _SAMPLE_RATE)
out = vamp.collect(X, _SAMPLE_RATE, 'nnls-chroma:nnls-chroma',
output='bothchroma', parameters=params)
chroma = out['matrix'][1]
return chroma
import matplotlib.pyplot as plt
%matplotlib inline
notes = ['A','Bb','B', 'C','Db','D','Eb','E','F','Gb','G','Ab']
cmx = generate_chroma_from_wav('data/freesound/c_major.wav', correct_tuning=False)
print(cmx.shape)
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
cmx = generate_chroma_from_wav('data/freesound/c_major.wav', correct_tuning=True)
print(cmx.shape)
plt.barh(range(24), np.sum(cmx,axis=0))
plt.yticks(range(24), notes+notes)
plt.tight_layout()
# tuning doesn't seem to do much
```
#### Exploring temporal-based features
```
import dataloader
#from importlib import reload
#reload(dataloader)
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(1289, label_type='majmin')
chroma_vectors.shape, chord_labels.shape
# how often do chords change?
arr = chord_labels[:100]
arr
import numpy as np
# N.B. lazycats entry # 2
def contiguous_lengths(arr):
assert(len(arr.shape) == 1)
change_points = np.where(arr[1:]-arr[:-1])[0] + 1 # find where values change
if len(arr) not in change_points:
change_points = np.append(change_points, len(arr))
# compute change point relative to previous change point; this essentially computes
# the length before the value changes
return np.concatenate(([change_points[0]], change_points[1:]-change_points[:-1]))
contiguous_lengths(arr)
# compute for all songs
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-unique.csv'
df_songs = pd.read_csv(f'{base_dir}/{data_index}')
df_songs.head()
# before we proceed, let's check first if there are songs that are predominantly no-chords / X-chords
df_songs['no_chord_percent'] = 0
for ix in tqdm(df_songs.index):
_id = df_songs.loc[ix].id
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(_id, label_type='majmin')
df_songs.loc[ix, 'no_chord_percent'] = np.mean(chord_labels == 0)
threshold = 0.5
print(sum(df_songs.no_chord_percent >= threshold))
df_songs[df_songs.no_chord_percent >= threshold][['id','title','artist','no_chord_percent']].head()
```
For now, let's ignore songs with 50% or more no-chord/X labels.
```
df_songs = df_songs[~(df_songs.no_chord_percent >= threshold)]
df_songs.to_csv(f'{base_dir}/billboard-2.0-manychords.csv', index=False)
len(df_songs)
# lazycats #3
def squash_consecutive_duplicates(arr):
assert(len(arr.shape) == 1)
# find where values change
# this is the first index of any consecutive sequence of same values (except element 0)
change_points = np.where(arr[1:]-arr[:-1])[0] + 1
return np.concatenate((arr[0:1], arr[change_points]))
assert(len(squash_consecutive_duplicates(chord_labels))==len(contiguous_lengths(chord_labels)))
squash_consecutive_duplicates(chord_labels)
from tqdm import tqdm
classes = dataloader._MAJMIN_CLASSES
df_songs['first_chord'] = ''
df_songs['min_chord'] = ''
df_songs['min_chord_length'] = 0
df_songs['min_chord_length_index'] = -1
for ix in tqdm(df_songs.index):
_id = df_songs.loc[ix].id
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(_id, label_type='majmin')
# get contiguous chord lengths; ignore start & end chords as they might be truncated
chord_lengths = contiguous_lengths(chord_labels)[1:-1]
chord_sequence = squash_consecutive_duplicates(chord_labels)
if len(chord_lengths) == 0:
print(f'{_id}: empty chord length')
continue
df_songs.loc[ix, 'first_chord'] = classes[chord_labels[0]]
min_index = np.argmin(chord_lengths)
df_songs.loc[ix, 'min_chord_length_index'] = 1+min_index # offset from ignoring start chord
df_songs.loc[ix, 'min_chord'] = classes[chord_sequence[1+min_index]]
df_songs.loc[ix, 'min_chord_length'] = chord_lengths[min_index]
df_songs.first_chord.value_counts()
```
Here we know most songs start with "No chord".
```
df_songs.min_chord_length_index.value_counts(bins=[0, 1, 5, 20, 100, 500])
df_songs.min_chord_length.value_counts(bins=[0, 1, 5, 20, 100, 500])
df_songs[df_songs.min_chord_length <= 5]
```
Here we observe, most chords will last 5-20 chroma vectors (~250ms-1s). We'll probably use 4 chroma vectors as the base unit for identifying a chord.
#### Temporal features
Next, let's observe how good our chroma features are with time component. It will be good to start with those with most ambiguity.
```
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-manychords.csv'
df_songs = pd.read_csv(f'{base_dir}/{data_index}')
import dataloader
import numpy as np
import lazycats.np as catnp
# per song feature analysis
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(1289, label_type='majmin')
for chord_label in set(chord_labels):
if chord_label == 0:
continue
chord_name = dataloader._MAJMIN_CLASSES[chord_label]
root, chord_bitmap = dataloader.encode_to_chordino_chroma(chord_name)
filt = (chord_labels == chord_label)
vecs = chroma_vectors[filt]
bass_vecs = vecs[:, :12]
treble_vecs = vecs[:, 12:]
# remove all zeros for comparison
bass_vecs = bass_vecs[~np.all(bass_vecs <= 0.01, axis=1)]
treble_vecs = treble_vecs[~np.all(treble_vecs <= 0.01, axis=1)]
basses = np.argmax(bass_vecs, axis=1)
bass_match = np.mean(basses == root)*100
ext_bass_match = bass_match + np.mean(basses == (root-1)%12)*100
ext_bass_match += np.mean(basses == (root+1)%12)*100
# get only on treble
vec_ixs = catnp.top_k_indices(treble_vecs, 4, axis=1)
label_ixs = np.where(chord_bitmap)[0]
chord_match = np.mean(np.all(np.isin(vec_ixs[:, :3], label_ixs), axis=1))
ext_match = np.mean((np.sum(np.isin(vec_ixs, label_ixs), axis=1))==3) # ext chord
print(f'{chord_name} (B:{len(bass_vecs)}, T: {len(treble_vecs)}) - '
f'Bass match: {bass_match:.2f} - Ext bass match: {ext_bass_match:.2f} - '
f'Chord match: {chord_match:.2f} - Ext match: {ext_match:.2f}')
# bit of visualization
from librosa.display import specshow
from dataloader import _CHROMA_NOTES_CHORDINO
import matplotlib.pyplot as plt
%matplotlib inline
chromagram = chroma_vectors[chord_labels==dataloader._MAJMIN_CLASS_INDEX_MAP['D:maj']][:500, :12].T
fig, ax = plt.subplots(figsize=(15, 5))
specshow(chromagram, x_axis='time', sr=44100)
_ = ax.set_yticks(range(len(chromagram)))
notes = _CHROMA_NOTES_CHORDINO
if len(chromagram) == 24:
notes += _CHROMA_NOTES_CHORDINO
_ = ax.set_yticklabels(notes)
filt = (chord_labels==dataloader._MAJMIN_CLASS_INDEX_MAP['E:min'])
print(catnp.contiguous_lengths(filt.astype(int)))
print(catnp.squash_consecutive_duplicates(filt.astype(int)))
chromagram = chroma_vectors[chord_labels==dataloader._MAJMIN_CLASS_INDEX_MAP['E:min']].T
fig, ax = plt.subplots(figsize=(15, 5))
specshow(chromagram, x_axis='time', sr=44100)
_ = ax.set_yticks(range(len(chromagram)))
notes = _CHROMA_NOTES_CHORDINO
if len(chromagram) == 24:
notes += _CHROMA_NOTES_CHORDINO
_ = ax.set_yticklabels(notes)
step_size, chroma_timestamps, chroma_vectors = dataloader.get_chroma_matrix(308,
return_timestamps=True,return_step_size=True)
chroma_timestamps[2417:2420]
chord_timestamps, chord_labels_str = dataloader.get_chord_labels(_id, label_type='majmin')
chord_labels = dataloader.encode_chords_single_label(chord_labels_str)
chord_timestamps[-5:]
from importlib import reload
reload(dataloader)
import lazycats.np as catnp
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(308, label_type='majmin',
remove_ambiguous=False)
len(chroma_vectors), len(chord_labels)
# now check ff.
# histogram of chroma count before chord changes (i.e. chord duration)
# min & max
# where min and max occurs
# what transition occurs at min/max
df_songs
from tqdm import tqdm
per_song_chord_dur = []
all_chord_durs = []
for ix in tqdm(df_songs.index):
_id = df_songs.loc[ix].id
try:
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(_id, label_type='majmin',
remove_ambiguous=False)
except Exception as e:
print(f'Error {e} at {_id}')
break
chord_durs = catnp.contiguous_lengths(chord_labels)
squashed_labels = catnp.squash_consecutive_duplicates(chord_labels)
n_chords = len(chord_durs)
assert(n_chords==len(squashed_labels))
per_song_chord_dur.append(chord_durs)
all_chord_durs.extend(chord_durs)
min_ix = np.argmin(chord_durs)
min_dur = chord_durs[min_ix]
max_ix = np.argmax(chord_durs)
max_dur = chord_durs[max_ix]
min_trans = (squashed_labels[min_ix], squashed_labels[min_ix+1] if min_ix < (n_chords-1) else -1)
max_trans = (squashed_labels[max_ix], squashed_labels[max_ix+1] if max_ix < (n_chords-1) else -1)
df_songs.loc[ix, 'n_chords'] = n_chords
df_songs.loc[ix, 'min_ix'] = min_ix
df_songs.loc[ix, 'min_dur'] = min_dur
df_songs.loc[ix, 'max_ix'] = max_ix
df_songs.loc[ix, 'max_dur'] = max_dur
df_songs.loc[ix, 'min_trans'] = str(min_trans)
df_songs.loc[ix, 'max_trans'] = str(max_trans)
#break
df_songs.to_csv(f'{base_dir}/billboard-2.0-manychords-chorddurations.csv', index=False)
all_chord_durs = np.array(all_chord_durs)
from collections import Counter
dur_counts = Counter(all_chord_durs)
dur_counts.most_common()
np.mean(all_chord_durs)
```
Maybe a sequence length of 64/128 makes sense
```
df_songs.head(n=3)
df_songs.min_dur.mean(), df_songs.max_dur.mean()
df_songs.min_trans.value_counts()[:10] # most minimum duration start from no-chord
df_songs.max_trans.value_counts()[:10] # most maximum duration is from no-chord to ending
```
Now, proceed to training a bi-LSTM-CRF w/ sequence length of 64. To do that, we need to prep the data in sequences.
```
# prepend zeros in chord labels; need to shift labels by 1; support pre-pad/post-pad
# need to add zeros??? or mean??? in features
# possible lazycats #4
import dataloader
import numpy as np
import lazycats.np as catnp
# per song feature analysis
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(1289, label_type='majmin')
# to know what value to pad, it might be worth looking at the feats/labels
all_chroma_vectors = np.load('data/01_all_chroma_vectors.npy')
all_chord_labels = np.load('data/01_all_chord_labels.npy')
all_chroma_vectors.shape, all_chord_labels.shape
mean_no_chord_vec = np.mean(all_chroma_vectors[all_chord_labels == 0], axis=0)
mean_no_chord_vec, np.mean(mean_no_chord_vec)
from collections import Counter
weak_feat_labels = all_chord_labels[np.all(all_chroma_vectors <= 0.0, axis=1)]
print(len(weak_feat_labels))
Counter(weak_feat_labels)
```
Based on info above, we pad zero vectors for feature subsequences, and zeros for label subsequences (no need to shift the label).
```
(1,2,*np.array([1]).shape[1:])
def divide_to_subsequences(seq, sub_len, pad=0, pre_pad=True):
"""
Divide a sequence array into subsequences across outermost axis,
padding the last subsequence as needed
"""
seq = np.array(seq)
pad = np.array(pad)
assert(pad.shape == seq.shape[1:])
pad_len = 0
rem = len(seq)%sub_len
if rem > 0:
pad_len = sub_len - rem
n_subseq_nopads = int(len(seq)/sub_len) # num. of subseq. that need no pads
n_nopads = sub_len*n_subseq_nopads
subseq = seq[:n_nopads].reshape((n_subseq_nopads, sub_len, *seq.shape[1:]))
if pad_len > 0:
if pre_pad:
padded_subseq = np.append(np.array([pad]*pad_len), seq[n_nopads:], axis=0)
else:
padded_subseq = np.append(seq[n_nopads:], np.array([pad]*pad_len), axis=0)
subseq = np.append(subseq, padded_subseq[np.newaxis, :], axis=0)
return subseq
print(divide_to_subsequences(chord_labels, sub_len=64, pad=0).shape)
print(divide_to_subsequences(chroma_vectors, sub_len=64, pad=[0]*24).shape)
print(divide_to_subsequences(np.array([1,1,1,1,2,2,2,2,3,3,3]), sub_len=8, pad=0))
print(divide_to_subsequences(np.array([1,1,1,1,2,2,2,2,3,3,3]), sub_len=8, pad=0, pre_pad=False))
# create dataset
import pandas as pd
base_dir = 'data/McGill-Billboard'
data_index = 'billboard-2.0-manychords.csv'
df_songs = pd.read_csv(f'{base_dir}/{data_index}')
df_songs.set_index('id', inplace=True)
len(df_songs)
df_songs.head(n=3)
# pre-generate sequences
import os
import pickle
import joblib
from tqdm import tqdm
import lazycats.np as catnp
import dataloader
_DUMP_DIR = 'data/chordseq'
_LABEL_TYPE = 'majmin'
_SEQ_LEN = 128 #64 #32
_CHROMA_NUM_FEATS = 24
os.makedirs(_DUMP_DIR, exist_ok=True)
chordseq_dict = {}
for _id in tqdm(df_songs.index):
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(_id, label_type=_LABEL_TYPE)
assert(chroma_vectors.shape[-1] == _CHROMA_NUM_FEATS)
chordseq_vectors = catnp.divide_to_subsequences(chroma_vectors, sub_len=_SEQ_LEN)
chordseq_labels = catnp.divide_to_subsequences(chord_labels, sub_len=_SEQ_LEN)
chordseq_dict[_id] = {'feats': chordseq_vectors, 'labels': chordseq_labels}
#break
#joblib.dump(chordseq_dict, f'{_DUMP_DIR}/{_LABEL_TYPE}_{_SEQ_LEN}.pkl')
with open(f'{_DUMP_DIR}/{_LABEL_TYPE}_{_SEQ_LEN}.pkl', 'wb') as f:
pickle.dump(chordseq_dict, f)
len(chordseq_dict)
```
#### Slicing (not used)
```
from itertools import islice
dict(islice(b.items(), 1, 3))
import joblib # lazycats.py
from itertools import islice
dump_dict = {}
dump_ix = 0
_DUMP_SIZE = 100
st = 0
ed = _DUMP_SIZE
while st < len(chordseq_dict):
dump_dict = dict(islice(chordseq_dict.items(), st, ed))
joblib.dump(dump_dict, f'{_DUMP_DIR}/{_LABEL_TYPE}_{_SEQ_LEN}_{dump_ix}.pkl')
dump_ix += 1
st = dump_ix*_DUMP_SIZE
ed = st+_DUMP_SIZE
# read
load_ix = 0
loaded_dict = {}
while True:
fn = f'{_DUMP_DIR}/{_LABEL_TYPE}_{_SEQ_LEN}_{load_ix}.pkl'
if not os.path.exists(fn):
break
loaded_dict.update(joblib.load(fn))
load_ix += 1
for k1, k2 in zip(chordseq_dict, loaded_dict):
assert(k1==k2)
assert(chordseq_dict[k1]==chordseq_dict[k2])
break
# with open(f'{_DUMP_DIR}/{_LABEL_TYPE}_{_SEQ_LEN}.pkl', 'rb') as f:
# c = pickle.load(f)
```
#### Onset/Tempo exploration
```
# visualize song first
chromagram = chroma_vectors[:200].T
#print(chord_labels[:200])
fig, ax = plt.subplots(figsize=(15, 5))
# xcoords = [0.22058956, 0.33088437, 2.20589566]
# for xc in xcoords:
# plt.axvline(x=xc, color='black')
specshow(chromagram, x_axis='time', sr=21, hop_length=1, cmap='coolwarm') # wrong SR
_ = ax.set_yticks(range(len(chromagram)))
notes = _CHROMA_NOTES_CHORDINO
if len(chromagram) == 24:
notes += _CHROMA_NOTES_CHORDINO
_ = ax.set_yticklabels(notes)
help(specshow)
# check if echonest might help
import json
def get_echonest_features(_id):
""" Load chord labels from .LAB files
label_type: majmin, majmin7, majmininv, majmin7inv, full
"""
feat_fn = f'{base_dir}/{_id:04d}/echonest.json'
feats = {}
with open(feat_fn, 'r') as f:
feats = json.loads(f.read())
return feats
feats = get_echonest_features(1289)
feats.keys()
for k in feats:
print(len(feats[k]), type(feats[k]))
print(feats['bars'][0])
print(feats['beats'][0])
print(feats['tatums'][0])
print(feats['sections'][0])
print(feats['segments'][0])
print(feats['meta'])
print({k:feats['track'][k] for k in feats['track'] if 'string' not in k})
```
We can use tempo and time signature here. However it seems the Echonest is not open-source anymore; hence we use these values simply as reference for the tempo estimation method we'll find/implement.
```
import librosa
def estimate_tempo(_id):
audio_fn = f'data/audio/{_id:04d}/audio.wav'
X, fs = librosa.load(audio_fn, sr=None, mono=True)
onset_env = librosa.onset.onset_strength(X, sr=fs)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=fs)
return tempo
estimate_tempo(1289)
picked_ids = [1289, 736, 637, 270, 18] # those w/ audio
for _id in picked_ids:
feats = get_echonest_features(_id)
print(f"Echonest: {feats['track']['tempo']:.2f} ({feats['track']['tempo_confidence']:.2f})"
f", Librosa: {estimate_tempo(_id)[0]:.2f}")
audio_fn = f'data/audio/1289/audio.wav'
X, fs = librosa.load(audio_fn, sr=None, mono=True)
onset_env = librosa.onset.onset_strength(X, sr=fs)
chroma_vectors, chord_labels = dataloader.get_chord_features_and_labels(637, label_type='majmin')
onset_env = librosa.onset.onset_strength(np.mean(chroma_vectors, axis=1), sr=2048)
librosa.beat.tempo(onset_envelope=onset_env, sr=2048)
np.mean(chroma_vectors, axis=1).shape
```
#### Scratch
```
!head {dirname}/bothchroma.csv
dirname
```
| github_jupyter |
> **Note:** In most sessions you will be solving exercises posed in a Jupyter notebook that looks like this one. Because you are cloning a Github repository that only we can push to, you should **NEVER EDIT** any of the files you pull from Github. Instead, what you should do, is either make a new notebook and write your solutions in there, or **make a copy of this notebook and save it somewhere else** on your computer, not inside the `sds` folder that you cloned, so you can write your answers in there. If you edit the notebook you pulled from Github, those edits (possible your solutions to the exercises) may be overwritten and lost the next time you pull from Github. This is important, so don't hesitate to ask if it is unclear.
# Exercise Set 12: Linear regression models.
*Afternoon, August 20, 2018*
In this Exercise Set 12 we will work with regression models.
We import our standard stuff. Notice that we are not interested in seeing the convergence warning in scikit-learn so we suppress them for now.
```
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
```
## Exercise Section 12.1: Implementing OLS solved with gradiant decent
In this exercise we will try to implement the OLS-estimator from scratch, and solve using numerical optimation using the gradiant decent algorith. Then we will fit it to some data, and compare our own solution to the standard solution from `sklearn`
> **Ex. 12.1.1**: Import the dataset `tips` from the `seaborn`.
*Hint*: use the `load_dataset` method in seaborn
```
# [Answer to Ex. 12.1.1]
# Load the example tips dataset
tips = sns.load_dataset("tips")
```
> **Ex. 12.1.2**: Convert non-numeric variables to dummy variables for each category (remember to leave one column out for each catagorical variable, so you have a reference). Restructure the data so we get a dataset `y` containing the variable tip, and a dataset `X` containing the
features.
>> *Hint*: You might want to use the `get_dummies` method in pandas, with the `drop_first = True` parameter.
```
# [Answer to Ex. 12.1.2]
tips_num = pd.get_dummies(tips, drop_first=True)
X = tips_num.drop('tip', axis = 1)
y = tips_num['tip']
```
> **Ex. 12.1.3**: Divide the features and target into test and train data. Make the split 50 pct. of each. The split data should be called `X_train`, `X_test`, `y_train`, `y_test`.
>> *Hint*: You may use `train_test_split` in `sklearn.model_selection`.
```
# [Answer to Ex. 12.1.3]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=.5)
```
> **Ex. 12.1.4**: Normalize your features by converting to zero mean and one std. deviation.
>> *Hint 1*: Take a look at `StandardScaler` in `sklearn.preprocessing`.
>> *Hint 2*: If in doubt about which distribution to scale, you may read [this post](https://stats.stackexchange.com/questions/174823/how-to-apply-standardization-normalization-to-train-and-testset-if-prediction-i).
```
# [Answer to Ex. 12.1.4]
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
norm_scaler = StandardScaler().fit(X_train)
X_train = norm_scaler.transform(X_train)
X_test = norm_scaler.transform(X_test)
```
> **Ex. 12.1.5**: Make a function called `compute_error` to compute the prediction errors given input target `y_`, input features `X_` and input weights `w_`. You should use matrix multiplication.
>
>> *Hint 1:* You can use the net-input fct. from yesterday.
>>
>> *Hint 2:* If you run the following code,
>> ```python
y__ = np.array([1,1])
X__ = np.array([[1,0],[0,1]])
w__ = np.array([0,1,1])
compute_error(y__, X__, w__)
```
>> then you should get output:
```python
array([0,0])
```
```
# [Answer to Ex. 12.1.5]
def net_input(X_, w_):
''' Computes the matrix product between X and w. Note that
X is assumed note to contain a bias/intercept column.'''
return np.dot(X_, w_[1:]) + w_[0] # We have to add w_[0] separately because this is the constant term. We could also have added a constant term (columns of 1's to X_ and multipliced it to all of w_)
def compute_error(y_, X_, w_):
return y_ - net_input(X_, w_)
```
> **Ex. 12.1.6**: Make a function to update the weights given input target `y_`, input features `X_` and input weights `w_` as well as learning rate, $\eta$, i.e. greek `eta`. You should use matrix multiplication.
```
# [Answer to Ex. 12.1.6]
def update_weight(y_, X_, w_, eta):
error = compute_error(y_, X_, w_)
w_[1:] += eta * (X_.T.dot(error))
w_[0] += eta * (error).sum()
```
> **Ex. 12.1.7**: Use the code below to initialize weights `w` at zero given feature set `X`. Notice how we include an extra weight that includes the bias term. Set the learning rate `eta` to 0.001. Make a loop with 50 iterations where you iteratively apply your weight updating function.
>```python
w = np.zeros(1+X.shape[1])
```
```
# [Answer to Ex. 12.1.7]
w = np.zeros(1+X.shape[1])
error_train, error_test = [], []
for i in range(50):
update_weight(y_train, X_train, w, 10**-3)
```
> **Ex. 12.1.8**: Make a function to compute the mean squared error. Alter the loop so it makes 100 iterations and computes the MSE for test and train after each iteration, plot these in one figure.
>> Hint: You can use the following code to check that your model works:
>>```python
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
assert((w[1:] - reg.coef_).sum() < 0.01)
```
```
# [Answer to Ex. 12.1.8]
def MSE(y_, X_, w_):
error_squared = compute_error(y_, X_, w_)**2
return error_squared.sum() / len(y_)
w = np.zeros(X.shape[1]+1)
MSE_train = [MSE(y_train, X_train, w)]
MSE_test = [MSE(y_test, X_test, w)]
for i in range(100):
update_weight(y_train, X_train, w, 10**-3)
MSE_train.append(MSE(y_train, X_train, w))
MSE_test.append(MSE(y_test, X_test, w))
pd.Series(MSE_train).plot()
pd.Series(MSE_test).plot()
```
> **Ex. 12.1.9 (BONUS)**: Implement your linear regression model as a class.
> ANSWER: A solution is found on p. 320 in Python for Machine Learning.
> **Ex. 12.1.10 (BONUS)**: Is it possible to adjust our linear model to become a Lasso? Is there a simple fix?
> ANSWER: No, we cannot exactly solve for the Lasso with gradient descent. However, we can make an approximate solution which is pretty close and quite intuitive - see good explanation [here](https://stats.stackexchange.com/questions/177800/why-proximal-gradient-descent-instead-of-plain-subgradient-methods-for-lasso).
## Exercise Section 12.2: Houseprices
In this example we will try to predict houseprices using a lot of variable (or features as they are called in Machine Learning). We are going to work with Kaggle's dataset on house prices, see information [here](https://www.kaggle.com/c/house-prices-advanced-regression-techniques). Kaggle is an organization that hosts competitions in building predictive models.
> **Ex. 12.2.0:** Load the california housing data with scikit-learn using the code below. Inspect the data set.
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
cal_house = fetch_california_housing()
X = pd.DataFrame(data=cal_house['data'],
columns=cal_house['feature_names'])\
.iloc[:,:-2]
y = cal_house['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=1)
print(X_train.head(3))
```
> **Ex.12.2.1**: Generate interactions between all features to third degree, make sure you **exclude** the bias/intercept term. How many variables are there? Will OLS fail?
> After making interactions rescale the features to have zero mean, unit std. deviation. Should you use the distribution of the training data to rescale the test data?
>> *Hint 1*: Try importing `PolynomialFeatures` from `sklearn.preprocessing`
>> *Hint 2*: If in doubt about which distribution to scale, you may read [this post](https://stats.stackexchange.com/questions/174823/how-to-apply-standardization-normalization-to-train-and-testset-if-prediction-i).
```
# [Answer to Ex. 12.2.1]
# This will be in assignment 2
```
> **Ex.12.2.2**: Estimate the Lasso model on the train data set, using values of $\lambda$ in the range from $10^{-4}$ to $10^4$. For each $\lambda$ calculate and save the Root Mean Squared Error (RMSE) for the test and train data.
> *Hint*: use `logspace` in numpy to create the range.
```
# [Answer to Ex. 12.2.2]
# This will be in assignment 2
```
> **Ex.12.2.3**: Make a plot with on the x-axis and the RMSE measures on the y-axis. What happens to RMSE for train and test data as $\lambda$ increases? The x-axis should be log scaled. Which one are we interested in minimizing?
> Bonus: Can you find the lambda that gives the lowest MSE-test score?
```
# [Answer to Ex. 12.2.3]
# This will be in assignment 2
```
| github_jupyter |
# Intro: from Categorical Features to Text Data
- Computers are good with numbers, but not that much with textual data.
- Text Analysis is a major application field for machine learning algorithms.
- However the raw data, a sequence of symbols cannot be fed directly to the algorithms themselves
- Most of them expect numerical feature vectors with a fixed size rather than the raw text documents with variable
- Most automatic mining of social media data relies on some form of encoding the text as numbers.
```
import sklearn as sk
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
```
- How do we approach this problem?
- One common type of non-numerical data is *categorical* data.
- For example, imagine you are exploring some data on housing prices,
and along with numerical features like "price" and "rooms", you also have "neighborhood" information.
- For example, your data might look something like this:
```
data = [
{'price': 850000, 'rooms': 4, 'neighborhood': 'Queen Anne'},
{'price': 700000, 'rooms': 3, 'neighborhood': 'Fremont'},
{'price': 650000, 'rooms': 3, 'neighborhood': 'Wallingford'},
{'price': 600000, 'rooms': 2, 'neighborhood': 'Fremont'}
]
```
You might be tempted to encode this data with a straightforward numerical mapping:
```
{'Queen Anne': 1, 'Fremont': 2, 'Wallingford': 3};
```
- One inconvenience: models make the fundamental assumption that numerical features reflect algebraic quantities.
-Thus such a mapping would imply, for example, **order** i.e. that *Queen Anne < Fremont < Wallingford*, or
even that *Wallingford - Queen Anne = Fremont*, which does not make much sense.
- Use *one-hot encoding*,
- Which effectively creates extra columns
indicating the presence or absence of a category with a value of 1 or 0, respectively.
<img src="figures/hotencoding.png" width="30%">
When your data comes as a list of dictionaries, Scikit-Learn's ``DictVectorizer`` will do this for you:
```
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer(sparse=False, dtype=int)
vec.fit_transform(data)
```
- The 'neighborhood' column is expanded into three separate columns representing the three neighborhood labels
- Each row has a 1 in the column associated with its neighborhood.
- With these categorical features thus encoded, you can proceed as normal with fitting a Scikit-Learn model.
To see the meaning of each column, you can inspect the feature names:
```
vec.get_feature_names()
```
- There is one clear disadvantage of this approach: if your category has many possible values, this can *greatly* increase the size of your dataset.
- However, because the encoded data contains mostly zeros, a sparse output can be a very efficient solution:
```
vec = DictVectorizer(sparse=True, dtype=int)
vec.fit_transform(data)
```
- Many (though not yet all) of the Scikit-Learn estimators accept such sparse inputs when fitting and evaluating models.
- ``sklearn.preprocessing.OneHotEncoder`` and ``sklearn.feature_extraction.FeatureHasher`` are two additional tools that Scikit-Learn includes to support this type of encoding.
## Text Feature Extraction
- Then, How do we convert text to a set of representative numerical values?.
- One of the simplest methods of encoding data is by *word counts*:
- You take each snippet of text...
- Then count the occurrences of each word within it, and put the results in a table.
For example, consider the following set of three phrases:
```
sample = ['problem of evil',
'evil queen',
'horizon problem']
```
- For a vectorization of this data based on word count, we could construct a column representing the word "problem," the word "evil," the word "horizon," and so on.
- While doing this by hand would be possible, the **tedium** can be avoided by using Scikit-Learn's ``CountVectorizer``:
```
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform(sample)
X
print(X) #Shows only the ONEs in each doce.g. in doc 0 only word 0,2 and 3 appear as 1
```
The result is a sparse matrix recording the number of times each word appears; it is easier to inspect if we convert this to a ``DataFrame`` with labeled columns:
```
import pandas as pd
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
```
- Main ISSUE with this approach, however: **the raw word counts lead to features which put
too much weight on words that appear very frequently**
- and this can be sub-optimal in some classification algorithms.
- One approach to fix this is known as *term frequency-inverse document frequency* (*TF–IDF*)
- It weights the word counts by a measure of how often they appear in the documents.
| github_jupyter |
```
import numpy as np
from cmdstanpy import cmdstan_path, CmdStanModel
from pathlib import Path
import matplotlib.pyplot as plt
import json
import pandas as pd
```
## Nile example
Let us consider here the following system fitted to the Nile river dataset often used to illustrate time series modeling. Check <url>https://mjlaine.github.io/dlm/dlmtut.html</url> for some more details about the example.
<img src="https://latex.codecogs.com/svg.image?A&space;=&space;\begin{bmatrix}1&space;&&space;1&space;\\0&space;&&space;1\end{bmatrix},&space;\&space;C&space;=&space;&space;\begin{bmatrix}1&space;&&space;0&space;\end{bmatrix},&space;\&space;R&space;=&space;\theta_1^2,&space;\&space;Q&space;=&space;&space;\begin{bmatrix}\theta_2^2&space;&&space;0&space;\\0&space;&&space;\theta_3^2&space;\end{bmatrix}" title="A = \begin{bmatrix}1 & 1 \\0 & 1\end{bmatrix}, \ C = \begin{bmatrix}1 & 0 \end{bmatrix}, \ R = \theta_1^2, \ Q = \begin{bmatrix}\theta_2^2 & 0 \\0 & \theta_3^2 \end{bmatrix}" align="left"/>
### 1) Define the functions that build the needed matrices
Here, we write a Stan functions block that builds the matrices given the parameters. Note that the noise parameters are separated from the "other" parameters so that we can assign priors separately for them (e.g. enforce positivity). Note that here we don't have any model parameters, just some noise parameters. Note also that we don't have the $B$ matrix here, so we just define a dummy zero matrix so that we can apply the general DLM code.
```
nile_functions = """
functions {
matrix build_A(vector theta) {
matrix[2,2] A;
A[1,1] = 1;
A[1,2] = 1;
A[2,1] = 0;
A[2,2] = 1;
return A;
}
matrix build_B(vector theta) {
matrix[2,1] B;
B[1,1] = 0;
B[2,1] = 0;
return B;
}
matrix build_C(vector theta) {
matrix[1,2] C;
C[1,1] = 1;
C[1,2] = 0;
return C;
}
matrix build_Q(vector noise_theta) {
matrix[2,2] Q;
Q[1,1] = square(noise_theta[2]);
Q[2,2] = square(noise_theta[3]);
Q[1,2] = 0;
Q[2,1] = 0;
return Q;
}
matrix build_R(vector noise_theta) {
matrix[1,1] R;
R[1,1] = square(noise_theta[1]);
return R;
}
}
"""
```
### 2) Compile the code with `CmdStanPy`
First, we read the general DLM stan code and append the user-defined functions to the model file.
```
dlm_code = Path('../dlm.stan').read_text()
Path('nile.stan').write_text(nile_functions + dlm_code);
nile_model = CmdStanModel(stan_file='nile.stan')
```
### 3) Construct the data
The general DLM code always needs the same input data variables. Note that the vectors and matrices need to be given as python lists so that the serialization to JSON works.
```
y = [1120, 1160, 963, 1210, 1160, 1160, 813, 1230, 1370, 1140, 995, 935,
1110, 994, 1020, 960, 1180, 799, 958, 1140, 1100, 1210, 1150, 1250,
1260, 1220, 1030, 1100, 774, 840, 874, 694, 940, 833, 701, 916,
692, 1020, 1050, 969, 831, 726, 456, 824, 702, 1120, 1100, 832, 764,
821, 768, 845, 864, 862, 698, 845, 744, 796, 1040, 759, 781, 865,
845, 944, 984, 897, 822, 1010, 771, 676, 649, 846, 812, 742, 801,
1040, 860, 874, 848, 890, 744, 749, 838, 1050, 918, 986, 797, 923,
975, 815, 1020, 906, 901, 1170, 912, 746, 919, 718, 714, 740]
nile_data = {
'N_obs': len(y),
'N_theta': 1, # dummy theta
'N_noise_theta': 3,
'state_dim': 2,
'input_dim': 1, # dummy input
'obs_dim': 1,
'Y_obs': [[yi] for yi in y],
'U_obs': len(y)*[[0.]], # dummy
'm0': [1100.0, 0.0],
'P0': [[200.**2, 0], [0, 1.**2]],
'theta_mu': [0.], # dummy
'theta_Sig': [[1.]], # dummy
'noise_mu': [100., 10., 10.],
'noise_Sig': [[10.**2, 0, 0], [0, 10**2, 0], [0,0,10**2]]
}
with open('nile_data.json', 'w') as f:
json.dump(nile_data, f)
```
## 4) Fit the model with HMC
Save the output files to a separate output folder to keep things clean. Print the fit object to get some details about the files that were produced.
```
nile_fit = nile_model.sample(data='nile_data.json', output_dir='output')
print(nile_fit)
```
## 5) Access the parameters and plot results
Let us draw the sampled states and parameters. Sampled parameters can be accessed with `CmdStanPy`:s helper `stan_variable`.
```
draws_noise = nile_fit.stan_variable(name='noise_theta')
draws_x = nile_fit.stan_variable(name='x_samples')
plt.figure(figsize=(7,4))
plt.plot(range(len(y)), draws_x[0::10,:,0].T, c='0.5', alpha=0.2)
plt.plot(range(len(y)), np.quantile(draws_x[0:,:,0].T, [0.025, 0.975], axis=1).T, 'k--')
plt.plot(range(len(y)), np.quantile(draws_x[0:,:,0].T, 0.5, axis=1), 'k-')
plt.plot(range(len(y)), y, 'r.')
plt.grid(True)
plt.show()
pd.plotting.scatter_matrix(pd.DataFrame(draws_noise));
```
| github_jupyter |
```
print("hello world")
```
print("hello world")
#大字号
### 大字号
# 大
```
import matplotlib.pyplot as plt
x=[1,2,3,4]
y=[10,20,30,40]
plt.plot(x,y)
plt.show()
from matplotlib import pyplot
x=[1,2,3,4]
y=[10,40,30,40]
pyplot.plot(x,y)
pyplot.show()
x=[12,33,42,123,12,3,21,12,3]
y=(range(0,8))
pyplot.plot(x,y)
pyplot.show()
x=[12,33,42,123,12,3,21,12,3]
y=[range(1,8)]
pyplot.plot(x,y)
pyplot.show()
y=[17,17,18,15,11,11,13]
x=list(range(0,8))
pyplot.plot(x,y)
pyplot.show()
pyplot.figure(figsize=(20,8),dpi=100)
y=[15,13,14.5,17,20,25,26,26,27,22,18,15]
x=range(2,26,2)
pyplot.plot(x,y)
pyplot.show()
pyplot.figure(figsize=(20,8),dpi=100)
y=[15,13,14.5,17,20,25,26,26,27,22,18,15]
x=range(2,26,2)
pyplot.plot(x,y)
pyplot.savefig("./test2.png")
pyplot.show()
pyplot.figure(figsize=(20,8),dpi=100)
y=[15,13,14.5,17,20,25,26,26,27,22,18,15]
x=range(2,26,2)
pyplot.plot(x,y)
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
from matplotlib import pyplot
pyplot.figure(figsize=(20,8),dpi=100)
y=[15,13,14.5,17,20,25,26,26,27,22,18,15]
x=range(2,26,2)
pyplot.plot(x,y)
pyplot.xticks(x[::4])
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
from matplotlib import pyplot
pyplot.figure(figsize=(20,8),dpi=100)
y=[15,13,14.5,17,20,25,26,26,27,22,18,15]
x=range(2,26,2)
pyplot.plot(x,y)
pyplot.yticks(y)
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
from matplotlib import pyplot
from matplotlib import font_manager
# 利用字体管理器加载字体
my_font=font_manager.FontProperties(fname="mmd.ttf")
pyplot.figure(figsize=(10,8),dpi=200)
import random
y=[random.randint(20,35) for i in range(120)]
x=range(120)
pyplot.plot(x,y)
# 将刻度值变换成刻度的内容:字符串
_xticks=["10点{}分".format(i) for i in range(120) if i<60]
_xticks+=["11点%s分"%i for i in range(60)]
print(_xticks)
pyplot.xticks(x[::4],_xticks[::4],rotation=45,font_properties=my_font)
pyplot.yticks(y)
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
from matplotlib import pyplot
from matplotlib import font_manager
# 利用字体管理器加载字体
# my_font=font_manager.FontProperties(fname="mmd.ttf")
pyplot.figure(figsize=(10,8),dpi=200)
import random
y=[random.randint(20,35) for i in range(120)]
x=range(120)
pyplot.plot(x,y)
# 将刻度值变换成刻度的内容:字符串
_xticks=["10点{}分".format(i) for i in range(120) if i<60]
_xticks+=["11点%s分"%i for i in range(60)]
print(_xticks)
pyplot.xticks(x[::4],_xticks[::4],rotation=45) # font_properties=my_font
pyplot.yticks(y)
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
from matplotlib import pyplot
from matplotlib import font_manager
# 利用字体管理器加载字体
my_font=font_manager.FontProperties(fname="mmd.ttf")
pyplot.figure(figsize=(10,8),dpi=200)
import random
y=[random.randint(20,35) for i in range(120)]
x=range(120)
pyplot.plot(x,y)
# 将刻度值变换成刻度的内容:字符串
_xticks=["10点{}分".format(i) for i in range(120) if i<60]
_xticks+=["11点%s分"%i for i in range(60)]
print(_xticks)
pyplot.xticks(x[::4],_xticks[::4],rotation=45,font_properties=my_font)
pyplot.yticks(y)
pyplot.savefig("/Users/camelia-dev/Desktop/1111111111111.png")
pyplot.show()
```
| github_jupyter |
# Simulating Language, Lab 8, Convergence to the Prior
This simulation implements a simplified version of the language model from Kirby, Dowman & Griffiths (2007) using an explicit agent-based simulation.
```
import random
%matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf')
from math import log, log1p, exp
from scipy.special import logsumexp
from numpy import mean # This is a handy function that calculate the average of a list
```
Following Kirby, Dowman & Griffiths (2007), we assume a language is made up of a set of *variables*, each of which can exist in a number of different *variant* forms. This is a rather general characterisation that actually applies well to a number of linguistic phenomena. For example, we can think of the variables as different syntactic categories, and the variants as word orders. Alternatively, the variables could be verb-meanings and the variants different realisations of the past tense, and so on. Agents will produce (and learn from) data which simply exemplifies which variant they have for a particular variable (with the possibility of noise on transmission). We will group languages into two classes: regular languages (where the same variant is used for all variables) and irregular languages (where more than one variant is used).
```
variables = 2 # The number of different variables in the language
variants = 2 # The number of different variants each variable can take
```
## Functions for dealing with log probabilities
Here are our standard functions for dealing with logs, as before.
```
def log_subtract(x,y):
return x + log1p(-exp(y - x))
def normalize_logprobs(logprobs):
logtotal = logsumexp(logprobs) #calculates the summed log probabilities
normedlogs = []
for logp in logprobs:
normedlogs.append(logp - logtotal) #normalise - subtracting in the log domain
#equivalent to dividing in the normal domain
return normedlogs
def log_roulette_wheel(normedlogs):
r = log(random.random()) #generate a random number in [0,1), then convert to log
accumulator = normedlogs[0]
for i in range(len(normedlogs)):
if r < accumulator:
return i
accumulator = logsumexp([accumulator, normedlogs[i + 1]])
```
We're also going to have a function that works a little like the `log_roulette_wheel` but instead always picks the most probable index (instead of picking indices proportional to their probability). We sometimes call this a "winner take all" function. While `log_roulette_wheel` can be used to implement *sampling*, `wta` can be used to implement *MAP* hypothesis selection. If there is more than one winner, then we pick randomly among them.
```
def wta(probs):
maxprob = max(probs) # Find the maximum probability (works if these are logs or not)
candidates = []
for i in range(len(probs)):
if probs[i] == maxprob:
candidates.append(i) # Make a list of all the indices with that maximum probability
return random.choice(candidates)
```
## Production of data
```
def produce(language, log_error_probability):
variable = random.randrange(len(language)) # Pick a variant to produce
correct_variant = language[variable]
if log(random.random()) > log_error_probability:
return variable, correct_variant # Return the variable, variant pair
else:
possible_error_variants = list(range(variants))
possible_error_variants.remove(correct_variant)
error_variant = random.choice(possible_error_variants)
return variable, error_variant
```
The function produce takes a language, selects a random variable, and produces the relevant variant from the language, with a certain probability of error.
- By looking at this code, can you tell how languages are represented in the simulation?
- Can you see how errors on production work?
1. * The way I do this is to find the `language` variable, which is one of the input arguments on the first line here, and then see what the function does with it.
* It gets used straight away - in the second line - to pick a `variable`. The function in that line is `random.randrange()`: this function picks a random **integer** between `0` and some number `n`, and the only *argument* it needs is an integer, which tells it how many integers to choose from (so if you want a number between `0` and `n`, you use `n+1` s the argument, because Python blah blah.
* This means that `len(language)` must be an integer (which we know must be true because `len()` returns an integer), and that we're asking `random.randrange()` to pick from a number which is equal to the length of the list of `language`.
* So, this must mean that the number of possible `variable`s is the same as the length of `language`
* So it's a pretty good bet that every `language` is a list of `variable`s, where the *index* of each item in the list represents a *different* `variable`.
* So the index is the variant, but what do the values of the list mean?
* The next line tells us what values mean: `correct_variant` is picked using the index of `language` which we picked in the previous line.
* So, to summarise - we know that `language` is a list, and we know that the *index* of every item in the list represents a different `variable`, and that the *value* of every item in the list represents the `variant`.
2. * Stepping through the code, we first see an if statement which checks some log probabilities to make a decision. Let's ignore their loggyness as they're not important here.
* We check whether `random.random()` - a random number between 0 and 1 - is bigger than (log)`error_probability`, which is usually quite small, say 0.001 for example.
* If `error_probability` is quite small, then `random.random` will almost always be bigger, and in this case the function just returns the intended `variable`/`variant` pair and the function exits.
* In the rare case that it's actually smaller than `error_probability`, then... there's been an error! Then we go into the `else` condition, which will tell us what to do.
* We see that `possible_error_variants` creates a list with the same length as the number of possible `variant`s, and goes on to `remove` the `correct_variant`
* This means we will pick an `error_variant` which can be anything *except* the intended variant, which makes sense.
* `random.choice` now comes out to play, which simply picks a member of a list at random. In this case, it will pick an `error_variant`, whih can be any of the variants except the intended one.
* Finally, the function exits and returns a pair with the correct `variable` but the *wrong* `variant`, and the sneaky error has entered the system. That's it!
## Classifying languages
In this language model, prior probability is determined by language class: regular languages differ from irregular languages in their prior probability, and ultimately we are interested in the proportion of our simulated population who use regular languages. We therefore need a function to take a language and classify it as regular or not - the function `regular` does this.
At this point, I should highlight a bit of a problematic bit of terminology that is used in the field. We've already seen the word "regular" in this course with our beta-binomial simulations, referring to whether or not something is given a variable label or a regular one. We're not using "regular" in that sense here. Instead we're using it in the same way linguists refer to "regular verbs" or "irregular verbs", for example. Regular verbs in English are ones that use a general strategy for forming the past tense (for example: *walk/walked*) whereas irregular verbs are ones that use an idiosyncratic strategy for forming the past tense (for example: *go/went*). Our language is "regular" in this sense if it uses the same strategy for each of its meanings.
```
def regular(language):
first_variant = language[0]
for variant in language:
if variant != first_variant:
return False # The language can only be regular if every variant is the same as the first
return True
```
* The purpose of this function is to check whether a particular language is regular or not. The function uses the fact that we know that, in a completely regular language, every `variable` must be the same type of `variant`.
* So all we need to do here is check that every item in the list is the same type, i.e. just a list of the same value repeated every time.
* There are a lot of ways that we *could* do this, but the one used here is sensible:
* `first_variant` records the value of the first item in the `language` list by checking index `0`
* The for loop sets up a check for every `variant` in the `language` list
* If at any point the `variant` being checked is not the same as `first_variant`, that means that the language has more than one value in it.
* In code language, if `variant != first_variant`, then we immediately know that the language is not regular, and can exit with an authoritative `False`, as there is no need to check any more items.
* On the other hand, if we manage to get all te way to the end of the list without finding any non-conformist variants, we finish the for loop knowing that the language must be regular, and can exit with `True`, hurrah
## The Bayesian bits
```
def logprior(language, log_bias):
if regular(language):
number_of_regular_languages = variants
return log_bias - log(number_of_regular_languages) #subtracting logs = dividing
else:
number_of_irregular_languages = variants ** variables - variants # the double star here means raise to the power
# e.g. 4 ** 2 is four squared
return log_subtract(0, log_bias) - log(number_of_irregular_languages)
# log(1) is 0, so log_subtract(0, bias) is equivalent to (1 - bias) in the
# non-log domain
```
* We know that a regular language is just a repeated list of the same value.
* This means there is only one possible regular language for each variant: if languages are represented by a list of 5 variants, and the only variants are `0` and `1`, then the possible regular languages would be `[0,0,0,0,0]` and `[1,1,1,1,1]`.
* So the number of possible regular languages is just the same as the number of variants, as we see in `number_of_regular_languages = variants`
* How many *irregular* languages are there? Well, first we can calculate the total number of possible languages overall...
* This is just like counting outcomes of a coin-flip or a dice-roll: the number of possibilities for the first variable (coin-flip/dice-roll), times the numer of possibilities for the second, times etc etc
* So say languages are represented by 5 `variable`s, and there are 3 `variant`s, the total number of possible languages is `3 * 3 * 3 * 3 * 3`
* A quicker way of writing that is $3^5$, or in Python `3**5`
* So the total number of possible langages is `variants ** variables`
* BUT: that includes all regular **and** irregular languages.
* But since we know that the number of *regular* languages is just the same as the number of `variants`, we can just subtract that from the total number, hence `number_of_irregular_languages = variants ** variables - variants`
The function `logprior` returns the prior probability (as a log probability) of a particular language. The strength of preference for regular languages is given as the second argument - if bias is over 0.5 (when converted back from a log probability), regular languages have higher prior probability.
- Why are we dividing the bias by the number of regular and irregular languages in this function? Check you understand how these numbers are calculated.
- How does this function differ from the prior from the Kirby, Dowman & Griffiths (2007) paper? (Hint: consider the case of more than two variables.)
1. * First, it's important to know that this function calculates the prior probability of a *particular* language
* Second, `log_bias` is the *overall* bias of a system towards regular languages *in general*, i.e. not a particular regular language but *all* of them lumped together.
* This means that all of the prior probability which is allocated towards regular languages, `log_bias`, needs to be split between them.
* So, to calculate the bias towards any *particular* regular language, we need to spread the total regularity bias equally between them.
* So, when we divide `log_bias` by the numer of regular langugages, we're just sharing the total *general* bias for regular languages between the actual *particular* ones, that's all.
* Oh no its a log! Don't worry, remember that logs are our friends, and subtracting in logworld is the same as dividing normal numbers.
* But what about irregular languages? Well, if the bias towards regular languages *overall*, *in general*, is $bias$, then the bias towards *irregular* languages must be $1 -bias$.
* Oh dear, it's the dreaded logs again - what do we do with `log_bias`? Don't worry, all `log_subtract(0, log_bias)` is doing is $1 - bias$ but off in logland (if you're worried about why it's using `0` and not `1`, it's because $log(1) = 0$, but just don't worry anyway).
* Finally, the `- log(number_of_irregular_languages)` bit is just doing the same thing as above, i.e. sharing out the general bias - but this time towards irregular, not regular languages, evenly across all the possible irregular languages.
2. The prior in the KDS paper is a lot fancier, but put simply, it measures *how* regular/irregular languages are, and the regularity bias then applies differently to specific languages depending on how regular they are. So, for example, if there is a strong bias for regularity, it will result in a strong bias for not just the completely regular languages, but also quite a strong preference for the *almost* regular languages; equally, it would have a very strong preference against maximally irregular languages (i.e. with equal numbers of each variant), and a little less strong for the slightly less irregular ones.
```
def loglikelihood(data, language, log_error_probability):
loglikelihoods = []
logp_correct = log_subtract(0, log_error_probability) #probability of producing correct form
logp_incorrect = log_error_probability - log(variants - 1) #logprob of each incorrect variant
for utterance in data:
variable = utterance[0]
variant = utterance[1]
if variant == language[variable]:
loglikelihoods.append(logp_correct)
else:
loglikelihoods.append(logp_incorrect)
return sum(loglikelihoods) #summing log likelihoods = multiplying likelihoods
```
The function `loglikelihood` takes a language and a list of data and works out the (log) likelihood of the data given the language. We allows some small probability (given by the third argument) that a speaker will produce the ‘wrong’ variant, i.e. a variant other than that specified by their language.
## Learning
Bayesian learners calculate the posterior probability of each language based on some data, then select a language (‘learn’) based on those posterior probabilities. `learn` implements this. As discussed in the lecture, there are two ways you could select a language based on the posterior probability distribution:
- You could pick the best language - i.e. the language with the highest posterior probability. This is called MAP (“maximum a posteriori”) learning.
- Alternatively, you could pick a language probabilistically based on its posterior probability, without necessarily going for the best one every time (e.g. if language 0 has twice the posterior probability of language 1, you are twice as likely to pick it). This is called sampling (for “sampling from the posterior distribution”).
The next bits of code implements both these ways of learning, using the `wta` function to do MAP learning and using `log_roulette_wheel` to do sampling (from previous labs, which assumed learners sample from the posterior). `all_languages` enumerates all possible languages for expressing `variables` variables and `variants` variants using a cute recursive method.
```
def all_languages(variables, variants):
if variables == 0:
return [[]] # The list of all languages with zero variables is just one language, and that's empty
else:
result = [] # If we are looking for a list of languages with more than zero variables,
# then we'll need to build a list
smaller_langs = all_languages(variables - 1, variants) # Let's first find all the languages with one
# fewer variables
for language in smaller_langs: # For each of these smaller languages, we're going to have to create a more
# complex language by adding each of the possible variants
for variant in range(variants):
result.append(language + [variant])
return result
all_languages(2,3)
```
Don’t worry too much if you can’t figure out how it works, but you might get an idea if you figure out what steps it would take when called with different arguments, like `all_languages(0, 2)`, `all_languages(1, 2)`, `all_languages(2, 2)`, `all_languages(0, 3)`, `all_languages(1, 3)` and so on. Try that out in the cell below. You should also now be able to see if you were right with your answer to the question above as to how languages are represented in this code.
* Honestly, don't worry about how the recursive function works - they're cool and interesting in their own right, and recursion is a big topic in language, but this recursive function is just a fancy way of building the list of all possible languages for the model. If your interest has been piqued, look up 'recursive functions Python simple explanation' or something like that on Google!
Finally, `learn` implements hypothesis selection.
```
def learn(data, log_bias, log_error_probability, learning_type):
list_of_all_languages = all_languages(variables, variants) # uses the parameters we set above
list_of_posteriors = []
for language in list_of_all_languages:
this_language_posterior = loglikelihood(data, language, log_error_probability) + logprior(language, log_bias)
list_of_posteriors.append(this_language_posterior)
if learning_type == 'map':
map_language_index = wta(list_of_posteriors) # For MAP learning, we pick the best language
map_language = list_of_all_languages[map_language_index]
return map_language
if learning_type == 'sample':
normalized_posteriors = normalize_logprobs(list_of_posteriors)
sampled_language_index = log_roulette_wheel(normalized_posteriors) # For sampling, we use the roulette wheel
sampled_language = list_of_all_languages[sampled_language_index]
return sampled_language
```
## Iterated learning
`iterate`, is the top-level function which actually runs the simulation. It starts with a random language from the set of possible languages, and then each generation a learner learns from data produced by the previous agent. This function returns a list indicating whether the language is regular or not each generation. For convenience we encode the regular languages as `1` in this list and `0` otherwise. It also returns a second list, showing what each language was per generation in case you want more details. (Generally, we're just going to be using the first list for plotting graphs etc.)
```
def iterate(generations, bottleneck, log_bias, log_error_probability, learning_type):
language = random.choice(all_languages(variables, variants))
if regular(language):
accumulator = [1]
else:
accumulator = [0]
language_accumulator = [language]
for generation in range(generations):
data = []
for i in range(bottleneck):
data.append(produce(language, log_error_probability))
language = learn(data, log_bias, log_error_probability, learning_type)
if regular(language):
accumulator.append(1)
else:
accumulator.append(0)
language_accumulator.append(language)
return accumulator, language_accumulator
```
So, for example, to do an iterated learning simulation for 100 generations, a bottleneck of 5, with a bias in favour of regularity of 0.6, an error probability of 0.05, with sampling as the learning type, you'd use:
```
iterate(1000, 5, log(0.6), log(0.05), 'sample')
```
## Questions
**Note:** To answer some of these questions, you're going to have to run quite long simulation runs (up to 100000 generations to get super accurate results). In general, you probably want to keep the bottleneck values between 1 and 10.
1. Using the default parameters suggested above, can you demonstrate convergence to the prior? You may want to start with a line graph of the results to get a sense of what's going on in the simulation. You could also plot a histogram to get the overall proportion of regular languages in the whole run, but notice that this is just the same as the average of your whole list so you don't really need a graph for this! You can get the average directly by using, for example, `print(mean(data[0]))` if `data` is the result of your simulation run (remember the `iterate` function returns whether the language at the each generation is regular or not as the first part of the data it returns, which is why we use `data[0]`.
*Let's start out using a bottleneck of 5 and run a bunch of runs and plot them all on the same graph*
```
for i in range(10):
data = iterate(1000, 5, log(0.6), log(0.05), 'sample')
plt.plot(data[0])
plt.xlabel("generations")
plt.ylabel("regularity")
```
*Hmmm... that's not very useful, is it? Let's do a histogram of one run instead!*
```
data = iterate(1000, 5, log(0.6), log(0.05), 'sample')
plt.hist(data[0])
plt.xlabel("regularity")
```
*Ok, that seems a bit more informative. Notice that it does look like the prior bias is reflected here - it's 0.6 by default, so we'd expect approximately 60 of our 100 generations should be regular. But it's a bit of a silly graph. It's really only reflecting a single number (the proportion of 1's in the list). So let's just find out the mean instead.*
```
print(mean(data[0]))
```
*This doesn't look quite right. Maybe we need to run the simulation for longer?*
```
data = iterate(10000, 5, log(0.6), log(0.05), 'sample')
print(mean(data[0]))
```
*OK, that's better! Now we can try some different values for the prior. Remember that the prior is given as a log.*
```
biases = [0.2, 0.4, 0.6, 0.8]
for bias in biases:
print(bias, mean(iterate(10000, 5, log(bias), log(0.05), 'sample')[0]))
```
*It looks like the average of a long run is the same as the prior.*
2. How does changing the error rate and bottleneck size affect the results for the sample learner? Make sure you run the simulation long enough to get repeatable results.
```
errors = [0.0005, 0.005, 0.05, 0.5]
for error in errors:
print(error, mean(iterate(10000, 5, log(0.6), log(error), 'sample')[0]))
```
*Our convergence to the prior result appears to be unaffected by errors. That's a bit odd. It seems like it really doesn't matter what the speaker is saying! Let's try that with different bottleneck sizes too...*
```
bottlenecks = range(1,11)
for bottleneck in bottlenecks:
print(bottleneck, mean(iterate(10000, bottleneck, log(0.6), log(0.05), 'sample')[0]))
```
*Just like with the error term, the size of the bottleneck seems to be irrelevant for the convergence result! Are they doing anything at all? Let's look at indivdual runs instead...*
```
plt.plot(iterate(10000, 10, log(0.6), log(0.0005), 'sample')[0], label='low error, large bottleneck')
plt.xlabel('generations')
plt.ylabel('regularity')
plt.legend()
plt.plot(iterate(10000, 10, log(0.6), log(0.05), 'sample')[0], label='medium error, large bottleneck')
plt.xlabel('generations')
plt.ylabel('regularity')
plt.legend()
plt.plot(iterate(10000, 7, log(0.6), log(0.005), 'sample')[0], label='low error, medium bottleneck')
plt.xlabel('generations')
plt.ylabel('regularity')
plt.legend()
plt.plot(iterate(10000, 7, log(0.6), log(0.05), 'sample')[0], label='medium error, medium bottleneck')
plt.xlabel('generations')
plt.ylabel('regularity')
plt.legend()
```
*What these results show is that although the iterated learning chain spends the same proption of time in the set of regular languages across all these different parameter settings and this is determined only by the prior bias (which is what we mean by "convergence to the prior"), the other parameters change how quickly the language changes from one state to another.*
3. Switch the learning type to MAP learning, and rerun the simulation. Can you show the amplification of prior bias that is shown in the paper?
```
biases = [0.2, 0.4, 0.6, 0.8]
for bias in biases:
print(bias, mean(iterate(10000, 5, log(bias), log(0.05), 'map')[0]))
```
*This looks like amplification of the prior bias. For biases in favour of regular languages (i.e. over 0.5), we get **more** regular languages than we would expect. For biases favouring the non-regular languages, we get **fewer** regular languages than we would expect.*
4. How is bias amplification affected by the bottleneck size? Can you run a range of simulations for different bottleneck sizes, find the means, and plot these nicely in a graph?
*I'm going to try this with longer runs, because the numbers above look a bit messy to me, so maybe we need a longer run to get a more accurate reading of the proportions.*
```
results = []
bottlenecks = range(1,11)
for bottleneck in bottlenecks:
results.append(mean(iterate(100000, bottleneck, log(0.6), log(0.05), 'map')[0]))
```
*Notice that I've got a separate cell for running the simulation and plotting the results. That's because it's taking several minutes to run the simulations now, and I don't want to have to redo them every time I want to tweak my graph!*
```
plt.plot(range(1, 11), results, label="bias=0.6")
plt.xlabel("bottleneck size")
plt.ylabel("proportion regular")
plt.legend()
```
*We're clearly seeing bias amplifcation (all these numbers are higher than 0.6), and it's affected by the bottleneck (tighter bottlenecks lead to more amplification). Let's try different priors (this really does take a while to run).*
```
biases = [0.2, 0.4, 0.6, 0.8]
all_results = []
for bias in biases:
results = []
for bottleneck in range(1, 11):
results.append(mean(iterate(100000, bottleneck, log(bias), log(0.05), 'map')[0]))
all_results.append(results)
for i in range(len(all_results)):
plt.plot(range(1, 11), all_results[i],
label="bias=" + str(biases[i])) # trick to convert number to a string
plt.xlabel("bottleneck size")
plt.ylabel("proportion regular")
plt.legend()
```
*Well, look at that! This is almost the opposite of the result for samplers. Now, **the strength of the bias doesn't matter** - just which side of 0.5 it is, and as we saw about the size of the bottleneck **does** matter... Just for completeness, let's add the results for samplers just to make it super clear! I'm also going to take a bit more care with colours so the graph is a bit more readable.*
*N.B. This simulation is very slow, I assume because there's a lot more maths involved in doing the roulette wheel compared to just picking the best. To stop myself getting bored waiting, I put a print in the loop so I had something to watch...*
```
biases = [0.2, 0.4, 0.6, 0.8]
all_results2 = []
for b in biases:
results = []
bias = b
for bottleneck in range(1, 11):
print(b, bottleneck)
results.append(mean(iterate(100000, bottleneck, log(bias), log(0.05), 'sample')[0]))
all_results2.append(results)
colours = ['red', 'orange', 'purple', 'blue']
for i in range(len(all_results)):
plt.plot(range(1, 11), all_results[i], color = colours[i],
label="map, bias=" + str(biases[i]))
for i in range(len(all_results2)):
plt.plot(range(1, 11), all_results2[i], color = colours[i], linestyle='dashed',
label="sampler, bias=" + str(biases[i]))
plt.xlabel("bottleneck size")
plt.ylabel("proportion regular")
plt.legend()
```
| github_jupyter |
# Disambiguating Advection and KdV
Any single soliton travelling wave solution of the KdV equation (1) also solves the advection equation with $c$ depending on the speed of translation, which for KdV depends on the amplitude. However, if two waves have different amplitudes and speeds, they may both solve the KdV equation but will not solve the same advection equation.
\begin{align}
u_t &+ 6uu_x + u_{xxx} = 0 \hspace{2cm} (1)\\
u_t &+ cu_x = 0\hspace{3.5cm} (2)
\end{align}
Here we show that looking at a single travelling wave solution, we recover the advection equation (at least for the two amplitudes shown), but when we look at both we get the KdV equation. Observing that waves with different amplitudes travel at different rates indicates the need for nonlinearity in the evolution equation.
```
%pylab inline
pylab.rcParams['figure.figsize'] = (12, 8)
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from PDE_FIND import *
```
# Generate a few solutions to each equation
For any $c$, the function below is a very simple closed form solution to the KdV and advection equations. However, it always solves the KdV equation regardless of the choice of $c$ while solutions with different values of $c$ will each solve an advection equation but they won't be the same one.
```
# Soliton solution to KdV/advection equations
def soliton(x,t,c,a):
return c/2*np.cosh(np.sqrt(c)/2*(x-c*t-a))**-2
c1 = 5.0
c2 = 1.0
n = 256
m = 50
x = linspace(-10, 12, n)
dt = 0.025
dx = x[1]-x[0]
t = linspace(dt,m*dt,m)
U1 = np.zeros((n,m))
U2 = np.zeros((n,m))
for i in range(n):
for j in range(m):
U1[i,j] = soliton(x[i],t[j],c1,-3)
U2[i,j] = soliton(x[i],t[j],c2,-1)
X, T = np.meshgrid(x, t)
fig1 = figure()
ax = fig1.gca(projection='3d')
surf = ax.plot_surface(X, T, U1.T, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
X, T = np.meshgrid(x, t)
fig1 = figure()
ax = fig1.gca(projection='3d')
surf = ax.plot_surface(X, T, U2.T, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
```
## Look at the solutions individually
For each of the travelling waves, we get an advection equation.
```
# First look at the soliton with speed = 5
Ut1, R1, rhs_des1 = build_linear_system(U1, dt, dx, D=3, P=2, time_diff = 'FD', space_diff = 'FD')
lam = 10**-5
d_tol = 5
w = TrainSTRidge(R1,Ut1,lam,d_tol)
print "PDE derived using STRidge with L0 penalty"
print_pde(w, rhs_des1)
# Now with speed 1
Ut2, R2, _ = build_linear_system(U2, dt, dx, D=3, P=2, time_diff = 'FD', space_diff = 'FD')
w = TrainSTRidge(R2,Ut2,lam,d_tol)
print "PDE derived using STRidge with L0 penalty"
print_pde(w, rhs_des1)
```
## Both solutions $\rightarrow$ KdV
Taking the two linear systems and simply stacking them so that we're looking for a PDE that accuractely reflects the dynamics of each yields the KdV equation.
```
# Now look for a solution for both linear systems -> kdv
R = np.vstack([R1, R2])
Ut = np.vstack([Ut1,Ut2])
w = TrainSTRidge(R,Ut,lam,d_tol)
print "PDE derived using STRidge with L0 penalty"
print_pde(w, rhs_des1)
```
| github_jupyter |
# GCB535 - Creating a pipeline for analysis ... from scratch!
## Instructions
In this adventure, you will create your own analysis pipeline to analyze the included set of data from scratch!
For this purpose, don't worry about making an Rscript pipeline. Use your notebooks (or save/utilized UNIX command) to do your work.
We have provided data for you to analyze:
* Median expression (summarized from n=226 subjects) for n=56,200 transcripts (TPM) from GTEx (v8) in Liver
30_Data_Pipelines-II/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_tpm_liveronly_medonly.gct
* ATAC-Seq Peak calls (PMID: 30890710) from HEPG2 cells:
30_Data_Pipelines-II/HEPG2_encode.bed
We'd like to do the following:
1. Obtain the set of genes that are in the top 10% of genes expressed in Liver from GTEx v7 as measured by the median TPM expression. (Refer to this as the set of "highly expressed" liver genes.)
2. Obtain the gene boundaries (i.e., `start_position` to `end_position`) for this list of highly expressed liver genes. Remove any entries that have strange chromosome labels (e.g. are not "1", "2", ..., "Y"; but may be "CHR_XXX").
3. Identify the set of regions of accessible chromatin ascertained in the HEPG2 cell line that overlap with the start / end positions of the list of highly expressed liver genes.
4. Assess the enrichment of overlap between the regions of accessible chromatin from HEPG2 with the gene boundaries of highly expressed liver genes.
5. Identify the subset of genes which are both highly expressed in liver *and* have regions of accessible chromatin in HEPG2 overlapping with them, as observed from your analysis in item 3, above.
**Provide your pipeline / code below.**
**This may feel like a daunting task! But you're ready and you have everything that you need to get started.**
Some points that may help you:
* Starting by writing down what you need to accomplish. Use comments (`#`) to articulate these tasks.
* Then, take each task and break them down further. If you get to a point where you have a specific piece of code in mind, you are in good shape.
* For ease, you might also consider making each of these "points" in their own "cell" in your notebook. That way, you don't need to re-execute all of your code each time you want to test if a *specific* block of code you write does what you want.
* Remember when you are coding, write a *little* bit, then test it. Once a little bit of code works, build upon it until you get a longer chain that works and complete the task you want to accomplish.
* Print statements are your friend! `print()`, or `head()` to make sure the data, table, or variables you have created *actually* are what you want / intend.
* For parts of item 3/4/5, you might consider using `bedtools` for this, which means that your pipeline might have a series of R that you run, then you step out into UNIX to do some work. You can always do that work, then return to your R pipeline in your notebook once that work is complete!
* For Item 5, you might check out the `-wa` argument in `bedtools intersect`. This might help you output the specific list of gene intervals that you can use as a cross-reference!
| github_jupyter |
<img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/logo-bdc.png" align="right" width="64"/>
# <span style="color: #336699">Land use and land cover classification in the Brazilian Cerrado biome using Brazil Data Cube</span>
<hr style="border:2px solid #0077b9;">
<br/>
<div style="text-align: center;font-size: 90%;">
Rolf E. O. Simões <sup><a href="mailto:rolf.simoes@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-0953-4132"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Alber H. Sanchez <sup><a href="mailto:alber.ipia@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7966-2880"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Felipe M. Carlos <sup><a href="mailto:felipe.carlos@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3334-4315"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Leonardo S. Vieira <sup><a href="mailto:leonardo.vieira@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3397-6232"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>,<br/>
Karine R. Ferreira <sup><a href="mailto:karine.ferreira@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-2656-5504"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Lubia Vinhas <sup><a href="mailto:lubia.vinhas@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-1104-3607"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Gilberto R. Queiroz<sup>* <a href="mailto:gilberto.queiroz@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7534-0219"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>
<br/><br/>
Earth Observation and Geoinformatics Division, National Institute for Space Research (INPE)
<br/>
Avenida dos Astronautas, 1758, Jardim da Granja, São José dos Campos, SP 12227-010, Brazil
<br/><br/>
<sup>*</sup> Author to whom correspondence should be addressed.
<br/><br/>
February 24, 2021
</div>
<br/>
<div style="text-align: justify; margin-left: 10%; margin-right: 10%;">
<b>Abstract.</b> This Jupyter Notebook compendium contains useful information for the creation of land use and land cover (LULC) maps using Earth observations data cubes and machine learning (ML) techniques. The code is based on the research pipeline described in the paper <em>Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products</em>. These notebooks access open data available in the Brazil Data Cube platform.
</div>
<br/>
<div style="text-align: justify; margin-left: 15%; margin-right: 15%;font-size: 75%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;">
<b>This Jupyter Notebook is supplement to the <a href="https://www.mdpi.com/2072-4292/12/24/4033/htm#sec5-remotesensing-12-04033" target="_blank">Section 5</a> of the following paper:</b>
<div style="margin-left: 10px; margin-right: 10px">
Ferreira, K.R.; Queiroz, G.R.; Vinhas, L.; Marujo, R.F.B.; Simoes, R.E.O.; Picoli, M.C.A.; Camara, G.; Cartaxo, R.; Gomes, V.C.F.; Santos, L.A.; Sanchez, A.H.; Arcanjo, J.S.; Fronza, J.G.; Noronha, C.A.; Costa, R.W.; Zaglia, M.C.; Zioti, F.; Korting, T.S.; Soares, A.R.; Chaves, M.E.D.; Fonseca, L.M.G. 2020. Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products. Remote Sens. 12, no. 24: 4033. DOI: <a href="https://doi.org/10.3390/rs12244033" target="_blank">10.3390/rs12244033</a>.
</div>
</div>
# <span style="color: #336699">Extracting time series from sample locations</span>
<hr style="border:1px solid #0077b9;">
This document describes the process of extracting time series from EO data cubes associated with LULC samples. This information is required during the creation and training of the model based on a MultiLayer Perceptron neural network described later in the notebooks related to the classification process.
In general, at this stage, as shown in the Figure 1, for each sample, a temporal spectral signature is associated considering different bands and vegetation indices. This allows each sample's labels to be linked to a specific temporal variation pattern, which is used by the algorithms for classification.
<div align="center">
<img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/bdc-article/land-trajectory.png" width="600px">
</div>
<br/>
<center><b>Figure 1</b> - Temporal patterns based on spectral signature.<br/><b>Source:</b> Victor Maus</center>
# <span style="color: #336699">The SITS R package</span>
<hr style="border:1px solid #0077b9;">
For the time series extraction process we use the [SITS R package](https://github.com/e-sensing/sits).It can be loaded with the `library` command:
```
library(sits)
```
The access to the Brazil Data Cube (BDC) data products requires an **access key**. If you have an account for the Brazil Data Cube services, [sign in](https://brazildatacube.dpi.inpe.br/auth-app) and replace the key value in the code cell below. If you do not have an account, [sign up](https://brazildatacube.dpi.inpe.br/auth-app) to create an account and get the access key.
```
MY_ACCESS_KEY <- "change-me"
Sys.setenv(BDC_ACCESS_KEY = MY_ACCESS_KEY)
```
## <span style="color: #336699">Crop Calendar</span>
<hr style="border:0.5px solid #0077b9;">
We use the region's agricultural calendar year that ranges from September 2018 to August 2019:
```
start_date <- "2018-09-01"
end_date <- "2019-08-31"
```
## <span style="color: #336699">Sample Data Points</span>
<hr style="border:0.5px solid #0077b9;">
The sample set that will be used to train the MLP model described in the manuscript contains 922 data points organized into three classes:
* Crop (242)
* Natural Vegetation (422)
* Pasture (258)
The file `training-samples.csv` in the folder `training-samples` contains the labeled data points.
```
sample_file <- "https://brazildatacube.dpi.inpe.br/geo-knowledge-hub/bdc-article/training-samples/training-samples.csv"
```
As one can see, this file contains the location (`latitude`/`longitude`), a timestamp (`start_date`/`end_date`) and a label:
```
head( read.csv(sample_file) )
```
The SITS package contains some facilities to read the location of these samples and to extract the time series from data cubes. This process will result in a dataframe where each sample point is associated to time series from spectral bands and vegetation indices.
## <span style="color: #336699">Creating the Output Directory</span>
<hr style="border:0.5px solid #0077b9;">
Let's create an output folder for the datasets generated by this notebook. We are going to choose a folder named `training-samples`:
```
output_dir <- "training-samples"
dir.create(
path = output_dir,
showWarnings = FALSE,
recursive = TRUE
)
```
## <span style="color: #336699">Time Series Extraction</span>
<hr style="border:0.5px solid #0077b9;">
### <span style="color: #336699">CBERS-4/AWFI (16 days 'stack')</span>
The `sits_cube` function can be used to define the working data cube and the interest spectral bands and vegetation indices:
```
cb4_cube <- sits_cube(
type = "BDC",
url = "https://brazildatacube.dpi.inpe.br/stac/",
collection = "CB4_64_16D_STK-1",
start_date = start_date,
end_date = end_date,
bands = c("BAND15", "BAND14", "BAND13", "BAND16", "NDVI", "EVI")
)
```
In the above cell we provided the following arguments
* `type`: Inform that the data cube will be based on the Brazil Data Cube definitions.
* `url`: The SpatioTemporal Asset Catalog address for BDC data cubes. In this sandbox you can use the following address: https://brazildatacube.dpi.inpe.br/stac/.
* `collection`: Defines the use of CBERS-4/AWFI data cube.
* `start_date` and `end_date`: The temporal interval of interest.
* `bands`: The list of spectral bands and spectral indices that will be accessed.
The `sits_get_data` retries the time series from the defined data cube `cb4_cube` using the locations in the sample file:
```
cb4_samples_with_ts <- sits_get_data(cube = cb4_cube, file = sample_file)
```
The structure of the returned dataframe can be seen below:
```
head(cb4_samples_with_ts, 2)
```
If you want to plot the NDVI time series for all the LULC classes, use the following `plot` function:
```
plot(sits_select(cb4_samples_with_ts, bands = c("NDVI")))
```
If you are interested only in a single class:
```
plot(sits_select(cb4_samples_with_ts[cb4_samples_with_ts$label == "Pasture",], bands = c("NDVI")))
```
Finally, let's save the dataframe with the samples and time series in a file named `CB4_64_16D_STK_1.rds`. We are going to use it later on the classification notebook.
```
saveRDS(cb4_samples_with_ts, paste0(output_dir, "/CB4_64_16D_STK_1.rds"))
```
### <span style="color: #336699">Landsat-8/OLI (16 days 'stack')</span>
The following code cells repeat the process described on the previous section to the Landsat-8 data cube.
```
l8_cube <- sits_cube(
type = "BDC",
name = "bdc-l8",
url = "https://brazildatacube.dpi.inpe.br/stac/",
collection = "LC8_30_16D_STK-1",
start_date = start_date,
end_date = end_date,
bands = c("band4", "band3", "band2", "band5", "NDVI", "EVI")
)
l8_samples_with_ts <- sits_get_data(cube = l8_cube, file = sample_file)
saveRDS(l8_samples_with_ts, paste0(output_dir, "/LC8_30_16D_STK_1.rds"))
```
### <span style="color: #336699">Sentinel-2/MSI (16 days 'stack')</span>
The following code cells repeat the process described on the CBERS-4/AWFI data cube section to the Sentinel-2 data cube.
```
s2_cube <- sits_cube(
type = "BDC",
name = "bdc-s2",
url = "https://brazildatacube.dpi.inpe.br/stac/",
collection = "S2_10_16D_STK-1",
start_date = start_date,
end_date = end_date,
bands = c("band4", "band3", "band2", "band8", "NDVI", "EVI")
)
s2_samples_with_ts <- sits_get_data(cube = s2_cube, file = sample_file)
saveRDS(s2_samples_with_ts, paste0(output_dir, "/S2_10_16D_STK_1.rds"))
```
# <span style="color: #336699">Final Remarks</span>
<hr style="border:1px solid #0077b9;">
* A copy of the training samples with the time series for each satellite/sensor data cube can be found in the folder `training-samples/rds`.
* If you want to download and run this notebook in a workflow as a script, you can perform its parameterization through the [papermill library](https://github.com/nteract/papermill).
* The data cubes used to extract the time series are also available on [BDC-STAC](https://brazildatacube.dpi.inpe.br/stac/).
| github_jupyter |
## Title :
Exercise: CS109A Olympics
## Description :
<img src="../fig/fig1.png" style="width: 500px;">
## Data Description:
## Instructions:
- In this exercise, you will simulate the 100m sprint race discussed during the lecture.
- We have already defined for you a Sprinter() class which has two characteristics for each sprinter:
- Base time
- Performance variance
- Run the code cell that makes four instances of the `Sprinter()` class. You will work with those for the entire exercise.
- Call the time attribute of the helper class to get the time taken by a competitor in the actual race.
- First run the race simulation five times; you will do this by creating a dictionary with participant name as keys, and time taken in a simulated race as the values. You will sort this dictionary by values and determine the winner of the simulated race.
- Repeat the simulation of the race for 10,000 times and count who won the race for how many times. Based on this observation, you will then investigate why a particular participant won as many times?
- Repeat the simulation for 10,000 times, but this time get the distribution of times for each participant over these runs.
- Calculate the mean race time, standard deviation of the race time and the confidence interval for each participant.
- Use the helper code to observe a plot similar to the one given below:
<img src="../fig/fig2.png" style="width: 500px;">
## Hints:
<a href="https://realpython.com/python-counter/" target="_blank">Counter()</a>
Helps accumulating counts of objects in a certain data structure.
<a href="https://numpy.org/doc/stable/reference/generated/numpy.mean.html" target="_blank">np.mean()</a>
Used to calculate the mean of an array.
<a href="https://www.w3schools.com/python/ref_func_sorted.asp" target="_blank">sorted()</a>
Used to sort data.
<a href="https://numpy.org/doc/stable/reference/generated/numpy.std.html" target="_blank">np.std()</a>
Used to calculate the std deviation of an array.
<a href="https://numpy.org/doc/stable/reference/generated/numpy.percentile.html" target="_blank">np.percentile</a>
Used to calculate percentile of data inbetween a given range. Frequently used for calculating confidence intervals.
## CS109A Olympics : 100m dash
We are going to have 4 of our team members compete against each other in the 100m dash.
```
# Importing libraries
import numpy as np
from time import sleep
import os
from IPython.display import clear_output
from collections import Counter
from helper import Sprinter
from helper import run_sim
import matplotlib.pyplot as plt
from prettytable import PrettyTable
plt.xkcd(scale=0,randomness=4)
```
## Taking a look at the competitors
Each participant has a characteristic assigned to them. The characteristic has 2 parts :
1. Base speed : This is the time they gave in a non-competitive environment.
2. Performance variance : Based on the mood, weather and other conditions this measure determines how much a participant's time will vary.
```
# Name of sprinters
sprinters = ['Pavlos','Tale','Varshini','Hayden']
# Defining charactersistics, ('Base pace','performance variance')
characteristics = [(13,0.25),(12.5,0.5),(12.25,1),(14.5,1)]
sprinters_dict = {}
for idx,sprinter in enumerate(sprinters):
# Take note of the * before characteristics
sprinters_dict[sprinter] = Sprinter(*characteristics[idx])
```
## Running a race
`sprinters_dict` has keys as the name of each participant, and the value as a class. The `time` attribute of the class is the time taken by that person to run a race.
- Call `sprinters_dict['Pavlos'].time` for 10 different times.
```
# Call time attribute
___
```
## ⏸ Pause & Think
Run the cell above, once again. What do you observe?
A. Output is different because the python compile memory location has changed
B. Output is the same
C. Output changes because it is a new sample from random process
```
### edTest(test_chow0) ###
# Submit an answer choice as a string below (eg. if you choose option A put 'A')
answer = '___'
```
- Get the times for each participant by calling the `time` attribute and create a dictionary called `race`, which has the key as the name of the participant and value as the time taken by participant to run the race.
- Sort `race.items()` according to time and get the item in dictionary with the least time taken to finish and assign it to `winner`.
```
### edTest(test_race) ###
# Get the times for each participant and make a dictionary
race = ___
# Sort the items of the dictionary to get the winner
# Hint: Remember to sort by the values and not the keys
winner = ___
```
## Race simulation
As you would have noticed, every time you make a new dictionary `race`, the results would differ.
Redefine the `race` dictionary, and run the cell below for a simulation of the race!
```
# Get the times for each participant and make a dictionary
race = {sprinter:dash.time for sprinter,dash in sprinters_dict.items()}
# Sort the items of the dictionary to get the winner
winner = sorted(race.items(),key=lambda x:x[1])[0]
# Uncomment and execute the following code
# run_sim(race,winner)
```
## Multiple simulations
Earlier was just one race, we want to find out who performs better over multiple races. So let's run the race 5 times
- Run a loop for 5 times
- In each loop generate the race dictionary as done earlier, and get the winner after sorting `race.items()`
- Append name of the winners to the `winner_list`
Keep track of everyone's timings
```
# Run the simulation and append winners to the winner_list
# Create an empty list
winner_list = []
# Run a simulation for 5 loops
for simulation in range(5):
# Create a race dictionary
race = {k:v.time for k,v in sprinters_dict.items()}
# Sort the items
winner = sorted(race.items(),key=lambda x:x[1])[0]
# Append the name of the winner to winners_list
winner_list.append(winner)
# Take a look at the winners list
winner_list
```
### Even more simulations
We will run 10,000 simulations and use the `Counter` to see who wins how many times.
Check the hints for how to use `Counter()`.
```
# Run the simulation and append winners to the winner_list
# Create an empty list
winner_list = []
# Run a simulation for 10000 loops
for simulation in range(10000):
# Create race dictionary
race = {k:v.time for k,v in sprinters_dict.items()}
# Sort the items
winner = sorted(race.items(),key=lambda x:x[1])[0]
# Append the name of the winner to winners_list
winner_list.append(winner[0])
# Display first 5 entries from winner_list
winner_list___
### edTest(test_wins) ###
# Get the counts for each person winning the race
# Hint: Use counter, look at the hints
wins = Counter(winner_list)
# Print wins to see the output of the simulation
print(___)
# Helper code to plot the wins of each sprinter
plt.bar(list(wins.keys()),list(wins.values()),alpha=0.5)
plt.xlabel('Sprinters')
plt.ylabel('Race wins',rotation=0,labelpad=30)
plt.show();
```
## Why is Varshini winning so much ?
Let us analyze why exactly is Varshini winning so frequently in our simulations.
But first, we will need to record the sprint timings for each sprinter in every simulation.
We will again run 10,000 simulations but this time record the individual sprint timings for each simulation instead.
- Make a new dictionary `race_results` with keys as the name of sprinters and the value as an empty list. We will append race results to this list after each simulation.
- Run a simulation loop for 10000 times
- In each simulation loop over `sprinters_dict.items()` and for each participant:
- Calculate time by calling `.time`
- `append` time to the list for particular key of `race_results`
```
# Run the earlier simulation loop for 10000 times
# Loop over the sprinters_dict items and for each participant
# Call time and append to the corresponding list in race_results
race_results= {k:[] for k in sprinters_dict.keys()}
for simulation in range(10000):
for sprinter,dash in sprinters_dict.items():
# For a given participant call the .time attribute
sprint_timing = dash.time
race_results[sprinter].append(sprint_timing)
```
### Sample mean $\bar{x}$ sample standard deviation $s$
Now we have a list of times given by each participant. We have the complete distribution, so let's calculate the mean, standard deviation and confidence interval.
As discussed in the lecture, if we have a given sample, we can quickly compute the mean and standard deviation using `np.mean()` and `np.std()`.
Let's begin with the race results for `Pavlos`.
```
# Using the race_results dictionary, find the mean
# and std for 'Pavlos'
pavlos_mean = ___
pavlos_std = ___
print(f'The average pace of Pavlos is {pavlos_mean:.2f} and the sample std is {pavlos_std:2f}')
```
### Sample mean $\bar{x}$ sample standard deviation $s$ for all sprinters
For each sprinter in the `race_results` dicitionary, find the mean and standard deviation of the 10,000 simulations using the `np.mean()` and `np.std()` functions.
Store your findings in a new dictionary called `race_stats`.
```
# Calculate mean and std of each participant
# Initialize an empty dictionary
race_stats = {}
# Loop over race_results.keys()
for sprinter in race_results.keys():
sprinter_mean = np.mean(race_results[sprinter])
sprinter_std = np.std(race_results[sprinter])
# Store it as a list [mean,std] corresponding to each
# participant key in race_stats
race_stats[sprinter] = [sprinter_mean,sprinter_std]
# Use the helper code below to print your findings
pt = PrettyTable()
pt.field_names = ["Sprinter", "Sample mean", "Sample std"]
for sprinter,stats in race_stats.items():
pt.add_row([sprinter, round(stats[0],3),round(stats[1],3)])
print(pt)
```
### Confidence Interval
Confidence interval is the range of values for which we can claim a certain confidence level(95% mostly). The confidence interval represents values for the population parameter for which the difference between the parameter and the observed estimate is not significant at the 5% level.
- Calculate the 95% CI by getting `np.percentile` at 2.5 and 97.5.
- Calculate and append these to the list of stats in the `race_stats` dictionary, for each participant
```
#By using the race_results dictionary defined above,
# Find the 2.5 and 97.5 percentile of Tale's race runs.
# Hint : Use race_results['Tale's']
CI = np.percentile(___,[___,___])
print(f'The 95% confidence interval for Tale is {round(CI[0],2),round(CI[1],2)}')
```
### Confidence intervals for all sprinters.
Let's repeat the above for each sprinter.
You will add this information to your `race_stats` dictionary.
We expect you to extend stats list with the $2.5$ and the $97.5$ percentile values for each sprinter.
For e.g., if for `Pavlos`, we have `mean=13.00`, `std=0.1`, and CI as `(12.8,13.2)`, your `race_stats['Pavlos']` must look like: `[13.00,0.1,12.8,13.2]`.
```
# Repeat the same as above, but for every sprinter
# run through the race_results dictionary for each sprinter
# find the confidence interval, and add it to the race_stats dictionary
# defined above
for sprinter,runs in race_results.items():
ci = np.percentile(runs,[2.5,97.5])
# Hint: You can use the .extend() method to add it to the
# existing list of stats
race_stats[sprinter].extend(ci)
# Use the helper code below to print your findings
pt = PrettyTable()
pt.field_names = ["Sprinter", "Sample mean", "Sample std","95% CI"]
for sprinter,stats in race_stats.items():
mean = round(stats[0],3)
std = round(stats[1],3)
confidence_interval = (round(stats[2],3),round(stats[3],3))
pt.add_row([sprinter, mean,std,confidence_interval])
print(pt)
```
## Histogram plot for each sprinter
Run the following cell to get a cool plot for distribution of times.
```
# Helper code to plot the distribution of times
fig = plt.gcf()
fig.set_size_inches(10,6)
bins = np.linspace(10, 17, 50)
for sprinter,runs in race_results.items():
height, bins, patches = plt.hist(runs, bins, alpha=0.5, \
label=sprinter,density=True,edgecolor='k')
plt.fill_betweenx([0, height.max()], race_stats[sprinter][2], race_stats[sprinter][3], alpha=0.2)
plt.legend(loc='upper left',fontsize=16)
plt.xlabel('Seconds')
plt.ylabel('Frequency',rotation=0,labelpad=25)
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title('Time distribution for sprinters')
plt.show()
```
## ⏸ Pause & Think
Take a look at the histograms for each participant and comment on why do you think is Varshini winning more races?
```
### edTest(test_chow1) ###
# Write your answer as a string below
answer = '___'
```
## ⏸ What **one parameter** should Tale change in order to win more races?
**Note : Pick one that is most influential**
A. Improve consistency
B. Reduce base time
C. Increase base time
D. Relax and hydrate before the race
```
### edTest(test_chow2) ###
# Submit an answer choice as a string below (eg. if you choose option A put 'A')
answer = '___'
# Before you click mark, please comment out the run_sim function above
```
## 👩🏻🎓 Bonus (Not graded)
Find out who among has would have the most podium finishes (top 3).
```
# Your code here
```
| github_jupyter |
# Fitting and error propagation
One of the most popular fitting tools is [SciPy curve_fitting](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html).
However, often we want to also get an estimate on the error of the fit-parameters and derived quantities.
This can be very easily done with the [lmfit](https://lmfit.github.io/lmfit-py/) and [uncertainty packages](https://pythonhosted.org/uncertainties/), which we will discuss here.
```
import numpy as np # widely used python library for data manipulation, the 'as' allows you to rename the package on import
import pandas as pd
import json, yaml
import sys # get some information about the current OS
import matplotlib.pyplot as plt
# show figures inline in the notebook
%matplotlib inline
from pathlib import Path # working with path objects - usefull for OS independent code
import lmfit # for more information see https://lmfit.github.io/lmfit-py/
from uncertainties import ufloat, umath, correlated_values,covariance_matrix # packages to work with uncertainties
```
## Generate the data
```
def gaussian(x, amp, cen, wid):
return amp * np.exp(-(x-cen)**2 / wid)
x = np.linspace(-10, 10, 101)
y = gaussian(x, 2.33, 0.21, 1.51) + np.random.normal(0, 0.2, x.size)
```
## Fitting with uncertainties
```
gmodel = lmfit.Model(gaussian) # turn the function into a model
print('parameter names: {}'.format(gmodel.param_names))
print('independent variables: {}'.format(gmodel.independent_vars))
params = gmodel.make_params(cen=1, amp=20, wid=2) # generate the parametes from the function arguments and provide some initial guess
params['amp'].set(min = 0) # you can set limits for the parameters, e.g. the amplitude is always positive
# params['wid'].set(vary=False) # you can also fix some parameters - try with and without!
result = gmodel.fit(y, params, x=x) # fit the model to the data
# plot the data, the initial guess and the fit result
plt.plot(x, y, 'bo')
plt.plot(x, result.init_fit, 'k--', label='initial fit')
plt.plot(x, result.best_fit, 'r-', label='best fit')
plt.legend(loc='best')
plt.show()
params # show the initial guesses and intervals
```
show the fit report
```
result
```
## error propagation
### the uncertainties package
https://pythonhosted.org/uncertainties/
```
from uncertainties import ufloat
from uncertainties.umath import * # sin(), etc.
x = ufloat(1, 0.1) # x = 1+/-0.1
print(2*x)
print(sin(2*x))
```
Another strength of this package is its correct handling of **correlations**. For instance, the following quantity is exactly zero even though x has an uncertainty:
```
x-x
```
### correlated variables from fit
```
# extract the values preserving the correlations between them
values_corr = correlated_values(result.best_values.values(), result.covar)
# extract the values ignoring correlations between them
values = [ufloat(m,s) for m,s in zip(result.best_values.values(), np.sqrt(np.diag(result.covar)))]
# the mean and error are the same for both
print('mean:', [x1.n-x2.n for x1, x2 in zip(values, values_corr)])
print('std:', [x1.s-x2.s for x1, x2 in zip(values, values_corr)])
# define some function that uses two values with uncertainties
g = lambda x1, x2 : x1/x2
# calculate g with the correlations
g(values_corr[0], values_corr[2])
# calculate g without the correlations
g(values[0], values[2])
```
# save notebook as html
```
from edaipynb import save_notebook_as_html
save_notebook_as_html('../../html')
```
| github_jupyter |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=TechnologyStudies/ComputerScienceSocrata/4-filtering-datasets.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# Filtering Datasets
We have experimented with visualizing open source data in a browser. However, lets broaden our perspective and consider how designing queries is an important part of designing any data-drive applications, including many of those that run on phones and tablets. For example, consider a mobile app that plots schools within walking distance to the user on a map. We want to plot only a small subset of all schools in Calgary. How should this application filter the data?
There are two general approaches, named after the location where the data actually gets filtered: the server (i.e. where the data is stored) or the client (where the data is used). These may be on same device, or they may be separated. They are also very likely running on different technologies. Filtering on either side has its pros and cons:
### Server-side filtering
You have already seen this type of filter through the use of tht WHERE clause in SODA. This approach reduces the amount of data that you receive from wherever that data is stored. The data is likely being sent across a network (in the case of cloud-based data) or from a different application (in the case of a more traditional database system). If the speed of the data transmission is limited (e.g. slow internet) or if the data transmission is costly (e.g. the user's data plan is limited), then you may very well wish to use a server-side filter.
For example, below is our familiar SODA query for obtaining all schools within Calgary. I've created a WHERE clause that finds all schools within a 'square' around the 'current' location, which we will assume to be William Aberhart High School (51.07866950960592°, -114.11533747920511°). To keep things simple, I will make my square 2 $\times$ 0.01 degree latitude by 2 $\times$ 0.01 degree longitude.
if you are curious, according to [this calculator](http://www.csgnetwork.com/degreelenllavcalc.html), 0.01 degree latitude equals about 1.112 km, and one minute longitude at Calgary's latitude equals about 0.702 kilometres, so this would be a box about 2.2 by 1.4 km
```
import requests as rq
import pandas as pd
import io as io
domain = "https://data.calgary.ca/resource/"
uuid_school_locations = "fd9t-tdn2"
def run_query(domain, uuid, query):
session = rq.Session()
results = session.get(domain + uuid +".csv?$query=" + query)
dataframe = pd.read_csv(io.StringIO(results.content.decode('utf-8')))
return dataframe
latitude = 51.07866950960592
longitude = -114.11533747920511
query_template = """
SELECT
*
WHERE
latitude BETWEEN '{0}' AND '{1}' AND
longitude BETWEEN '{2}' AND '{3}'
"""
# longitude BETWEEN '-114.10533747920511' AND '-114.12533747920511'
query = query_template.format(latitude - 0.01, latitude + 0.01, longitude + 0.01, longitude - 0.01)
local_schools = run_query(domain, uuid_school_locations, query)
local_schools
```
##### Note
- We can easily re-create the query by changing the latitude and the longitude, and creating a newly formatted query using the `query_template`.
- But then we would have to resubmit the query, and this may take quite some time.
### Client-side filtering
The opposite approach is to obtain an entire dataset and to then programatically filter using your native language. The pandas library provides various tools that allow you to easily select specific records in a dataframe. In this case, we will use the [panda dataframe's **loc** function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html) on the *entire* dataset. The output should be equivalent to the server-side filter's output
```
query = """
SELECT
*
"""
all_schools = run_query(domain, uuid_school_locations, query)
local_schools = all_schools.loc[(all_schools ['latitude'] > latitude - 0.01)
& (all_schools ['latitude'] < latitude + 0.01)
& (all_schools ['longitude'] > longitude - 0.01)
& (all_schools ['longitude'] < longitude + 0.01)]
local_schools
```
### Visualization
So, with all that work done, let's do the visualization.
```
import plotly.express as px
figure1 = px.scatter_mapbox(local_schools, lat="latitude", lon="longitude",
color_discrete_sequence=["blue"],
zoom=13, height=600,
size='latitude',
size_max = 15,
hover_name="name",
hover_data=["type", "grades", "address_ab"],)
figure1.update_layout(mapbox_style="open-street-map")
figure1.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
```
But if we then changed our current location to be Sir Winston Churchill High School (51.10011269545306°, -114.1399900255147°), we can very quickly change our results without having to re-query the City of Calgary server! It may still take some time to re-plot the data, but latency is not a concern.
```
local_schools2 = all_schools.loc[(all_schools ['latitude'] > 51.09011269545306)
& (all_schools ['latitude'] < 51.11011269545306)
& (all_schools ['longitude'] < -114.1299900255147)
& (all_schools ['longitude'] > -114.1499900255147)]
local_schools2
figure1 = px.scatter_mapbox(local_schools2, lat="latitude", lon="longitude",
color_discrete_sequence=["blue"],
zoom=13, height=600,
size='latitude',
size_max = 15,
hover_name="name",
hover_data=["type", "grades", "address_ab"],)
figure1.update_layout(mapbox_style="open-street-map")
figure1.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
```
#### Note
There are many many functions within the [pandas dataframe class](https://pandas.pydata.org/pandas-docs/stable/reference/frame.html) that allow you to filter, combine, and transform data. Learning these would take time, and is outside of our scope here. This course is about learning how to query for data.
## Which approach is best?
That depends, of course!
If your application will show multiple views of the data set within a short time, it makes sense to download it once in its entirety. Re-querying for the data for each view would obviously be inefficient.
However, loading the data in its entirety will cost more bandwidth, and may take time up front. Also, your data will at some point go stale; hence the 'within a short time' qualifier.
So why not just query for little chunks of data only when needed? Well, you probably have already noticed that executing a query takes some time. The query has to pass from your local device to a remote server, the server has to retrieve the data and format it, and the the server has to pass it back across a network. The lag in time between query and response is known as *latency*. Latency may make your application appear to freeze. This may be frustrating to your user. Never mind what your user may experience if they lose their network connection completely.
The time gaps in between the various steps may be fractions of seconds, but that is an eternity compared to how fast your local device can process locally stored data. Thus, loading your data up front *may* be a good design decision.
In the above example, the client-side approach would probably be best. The dataset is relatively small, so data usage is reasonable, unless the data plan really is restrictive. Also, the data is highly static, as not many schools will get added in the next month, and none will change location. Thus, it would be good design to download the dataset, and perhaps even store it locally in a csv file. You can then move all around the city, and the map can be updated very quickly and without extra data transmission. Of course, your app will still want to update the dataset once in a while, and may need to load the Edmonton data if we change cities!
An alternate example could be an app that checks if any of your friends are within walking distance. Perhaps this app has a few million users, and their locations are being stored on a server somewhere in Silicon Valley. You would not want to download the entire dataset, as 99.99% of it would be useless to you. Furthermore, the data is highly dynamic, and if you don't update continuously, you will have a useless app. In this example, the server-side approach would undoubtedly be best.
## Final Thoughts
- Hopefully you appreciate the point that you can 'massage' your data both locally and remotely. and that you choose which way you go based on performance.
- The technology you work with will also have an impact on which approach you take. Other open data APIs do not have the ability to perform server-side filtering (and aggregation), and you may be forced to go client-side.
- To be quite honest, there will also be times where it is just easier to do your filtering (and aggregation) on the client side, instead of breaking your brain in writing the perfect SQL statement!
- Whereas the previous lessons dealt with 'big data' (aggregating finding patterns in large sets), this lesson dealt with 'specific data' (querying for specific information).
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
x_train = np.arange(20).reshape(-1,2)
x_train
y_train = np.array([0,1,1,0,0,1,1,1,1,0])
y_train.shape
x_train.shape
#可视化
print(x_train[y_train==0,0])
print(x_train[y_train==0,1])
plt.scatter(x_train[y_train==0, 0], x_train[y_train==0,1],color='r')
plt.scatter(x_train[y_train==1, 0], x_train[y_train==1,1],color='g')
plt.title("Hospital")
plt.show()
#单独绘制一个点
x = np.array([20, 21])
plt.scatter(x_train[y_train==0, 0], x_train[y_train==0,1],color='r')
plt.scatter(x_train[y_train==1, 0], x_train[y_train==1,1],color='g')
plt.scatter(x[0], x[1], color='b')
plt.title("Hospital")
plt.show()
```
## 模拟实现 kNN 的过程
## 1. k 2. 距离计算 3. 下标排序 4. 统计 5. 可视化
```
k = 3
from math import sqrt
distances = []
for each_x_train in x_train:
d = sqrt(np.sum((each_x_train - x) ** 2))
distances.append(d)
distances
np.sort(distances)
np.argsort(distances) #知道距离排序不能解决应用问题,我需要知道的是哪几个点
nearest = np.argsort(distances)
nearest[:k]
for i in nearest[:k]:
print(i)
topK_y = [y_train[i] for i in nearest[:k]]
y_train
topK_y
```
### 统计
```
from collections import Counter
Counter(topK_y) #统计 返回字典
votes = Counter(topK_y)
votes
votes.most_common(1) #最多的几个元素 数组
votes.most_common(1)[0]
votes.most_common(1)[0][0]
predict_y = votes.most_common(1)[0][0]
if predict_y == 0 :
color = 'r'
else:
color = 'g'
plt.scatter(x_train[y_train==0, 0], x_train[y_train==0,1],color='r')
plt.scatter(x_train[y_train==1, 0], x_train[y_train==1,1],color='g')
plt.scatter(x[0], x[1], color=color)
plt.title("Hospital")
plt.show()
```
## scikit-learn中使用 kNN
```
from sklearn.neighbors import KNeighborsClassifier
kNN_classifier = KNeighborsClassifier(n_neighbors = 3) #k
kNN_classifier.fit(x_train, y_train)
#⚠️预测点 一维 [20,23] [[20,21]] #多个点
x = np.array([20,21,23,24,25,26]).reshape(-1,2)
x
```
##### predict
```
y_predict = kNN_classifier.predict(x)
y_predict
```
## 运行自己封装的 kNN
```
%run playML/kNN.py
knn_clf = KNNClassifier(k=3)
knn_clf.fit(x_train, y_train)
y_predict = knn_clf.predict(x)
y_predict
```
# 真实的数据集练习
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
```
## 1) 导入数据
```
iris = datasets.load_iris()
x = iris.data
x
x.shape
y = iris.target
y
y.shape
```
#### train test split
```
# 1. 乱序
shuffle_indexes = np.random.permutation(len(x))
shuffle_indexes
#2. 测试集获取
test_ratio = 0.2
test_size = (int)(len(x) * test_ratio) #有可能是浮点
# 3. 获取测试集的下标
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
test_indexes
train_indexes
x_train = x[train_indexes]
y_train = y[train_indexes]
x_test = x[test_indexes]
y_test = y[test_indexes]
y_train
y
```
## 使用封装
```
import numpy as np
from playML.model_selection import train_test_split
from playML import *
# from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)
y_train.shape
x_test.shape
my_knn_clf = KNNClassifier(k = 3)
my_knn_clf.fit(x_train, x_test)
y_predict = my_knn_clf.predict(y_train)
y_predict
y_test
sum(y_predict == y_test)
sum(y_predict == y_test) / len(y_test)
```
| github_jupyter |
```
#全景画像(Target)と背景画像(Background)を合体させます
#Copyright (c) 2018 Shotaro Ishigami
import cv2
import numpy as np
from IPython.display import display, Image as iP
from PIL import Image, ImageDraw, ImageFilter
from ipywidgets import interact
import glob,sys
import random
#画像表示用
import matplotlib.pyplot as plt
def display_cv_image(image, format='.png'):
decoded_bytes = cv2.imencode(format, image)[1].tobytes()
display(iP(data=decoded_bytes))
cv2.__version__
TARGET_DIR="./Target.bak/"
OUTDIR="./Output.bak/"
def Extraction_color(B,G,R,tar,dilate=0,erode=0):
B_thres_type=cv2.THRESH_BINARY_INV
B_thres=B
G_thres_type=cv2.THRESH_BINARY_INV
G_thres=G
R_thres_type=cv2.THRESH_BINARY_INV
R_thres=R
# img=cv2.imread("Target/target1.jpg")
BGR = cv2.split(tar) #BGRに分離
_, B_mask = cv2.threshold(BGR[0], B_thres, 1, B_thres_type)
_, G_mask = cv2.threshold(BGR[1], G_thres, 1, G_thres_type)
_, R_mask = cv2.threshold(BGR[2], R_thres, 1, R_thres_type)
background_mask = B_mask * G_mask * R_mask
result = cv2.merge(BGR + [255 * background_mask])
# display_cv_image(result,".png")
result=Closing(result,dilate=dilate,erode=erode)
return result
def PasteTarget(x=0,y=0,background=None,mask=None):
target=Image.open("./tmp/tmp.png").convert("RGBA")
print(target.mode)
#マスク画像が指定された時(Extractionが呼ばれてる時)はmask処理をする
if mask is None:
background.paste(target,(x,y),target)
else:
mask_img=Image.open(mask)#.convert("RGBA")
print(mask_img.mode)
background.paste(target,(x,y),mask_img)
return background
#画像を2値化します。
def Binarize(gray,im):
im_gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)#画像の読み込み
#ガウスフィルタ
im_gray_smooth=cv2.GaussianBlur(im_gray,(11,11),0)
#2値化
_,th1 = cv2.threshold(im_gray_smooth,gray,255,cv2.THRESH_BINARY)
return th1
#2値化した情報からマスクを作成します。
def MakeMask(th1,dilate=0,erode=0):
#マスク画像作成
mask_img=255-th1
mask_img=Closing(mask_img,dilate=dilate,erode=erode)
cv2.imwrite("./tmp/mask.png",mask_img)
# ターゲット画像を背景の2倍の大きさにします。
def img_expansion(img,resize,width=1900,height=1080):
scale=2 #背景の何倍に拡張するか
img.thumbnail((width/scale*resize,height/scale*resize))
# 透明に塗りつぶす用の背景画像を作成
bg = Image.new("RGBA",[width,height],(255,255,255,0))
# 元の画像を、背景画像のセンターに配置
bg.paste(img,(int((width-img.size[0])/2),int((height-img.size[1])/2)))
# bg.paste(img,(0,0))
return bg
#ターゲット切り取り
def ImageTrim(img):
th1=Binarize(245,img)
# 輪郭を抽出
# contours : [領域][Point No][0][x=0, y=1]
# cv2.CHAIN_APPROX_NONE: 中間点も保持する
# cv2.CHAIN_APPROX_SIMPLE: 中間点は保持しない
_,contours, hierarchy = cv2.findContours(th1, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
h,w,_=img.shape[:3]
img_area=h*w
maxContour = 0
# c=0###########################
# 各輪郭に対する処理
for i in range(0, len(contours)):
# 輪郭の領域を計算
area = cv2.contourArea(contours[i])
# ノイズ(小さすぎる領域)と画像全体の輪郭を除外
if area < 1e3 or area>=img_area-100000:
continue
# 外接矩形
if len(contours[i]) > 0:
if(area>maxContour):
rect = contours[i]
x, y, w, h = cv2.boundingRect(rect)
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0,0 ), 1)
# cv2.imwrite("./tmp/"+str(c)+"trim.png",img)##############
# c+=1####################
# 外接矩形毎に画像を保存
img_pe=img[y:y + h, x:x + w]
maxContour=area
print("img_area:"+str(img_area))
print("area:"+str(area))
try:
return img_pe
except NameError:
return None
# 抽出モードによって抽出手法を変更します。maskを生成した場合、マスク画像のパスを返します。
def Extraction(Extraction_mode=None,
Threshold=None,
img_ex_Op=None,
dilate=None,
erode=None,
B=None,G=None,R=None,
Homography_mode=None,
X=None,Y=None,tar_w=None,tar_h=None,
x1_Ho=None,y1_Ho=None, x4_Ho=None,y4_Ho=None,
x2_Ho=None,y2_Ho=None, x3_Ho=None,y3_Ho=None):
if(Extraction_mode=="Extraction_binarization"):
cv2.imwrite("./tmp/tmp.png",img_ex_Op)
#2値化による輪郭抽出
th1=Binarize(Threshold,img_ex_Op)
MakeMask(th1,dilate,erode)
return str("./tmp/mask.png")
else:
Extraction_color(B,G,R,img_ex_Op,dilate=dilate,erode=erode)# 色指定による輪郭抽出(BGR)
return None
#クロージングをします(拡大縮小率を変えられる)
def Closing(img,kernel=np.ones((5,5),np.uint8),dilate=0,erode=0):
img_dilate=cv2.dilate(img,np.ones((5,5),np.uint8),iterations = dilate)
img_dilate_erode = cv2.erode(img_dilate,np.ones((5,5),np.uint8),iterations = erode)
return img_dilate_erode
# PILで明度調整
def PIL_point(point,img):
img_dw=img.point(lambda x:x*point)
return img_dw
#OpenCVで明度調整
def OpenCV_gamma(gamma,img):
gamma_cvt=np.zeros((256,1),dtype='uint8')
for i in range(256):
gamma_cvt[i][0]=255*(float(i)/255)**(1.0/gamma)
img_gamma=cv2.LUT(img,gamma_cvt)
return img_gamma
#射影変換(ホモグラフィー変換)
def Homography(img,tar_w=None,tar_h=None,x1=0,y1=0,x2=0,y2=1080,x3=1900,y3=1080,x4=1900,y4=0):
tar_h,tar_w,_=img.shape[:3]
pts1 = np.float32([[0,0],[0,tar_h],[tar_w,tar_h],[tar_w,0]])#4隅を指定
pts2 = np.float32([[x1,y1],[x2,y2+tar_h],[x3+tar_w,y3+tar_h],[x4+tar_w,y4]])#変換後
M = cv2.getPerspectiveTransform(pts1,pts2)
return cv2.warpPerspective(img,M,(tar_w,tar_h),borderValue=(255,255,255))
#ぼかし(ターゲット全体)
def TargetSmoothingBlur(img=None,Blur=None):
if(Blur>0 and Blur%2!=0):
dst=cv2.blur(img,ksize=(Blur,Blur))
return dst
else:
print("ぼかしに使うカーネルは1以上で奇数の値を指定してください。")
return img
#バイラテラルフィルタ
def BilateralBlur(img=None,cnt=1):
if(cnt>1):
dst=cv2.bilateralFilter(img,15,20,20)
for i in range(cnt-1):
dst=cv2.bilateralFilter(dst,15,20,20)
return dst
else:
return img
# アフィン変換
def Affine(img,angle):
h,w,c=img.shape
M=cv2.getRotationMatrix2D(center=(w/2,h/2),angle=angle,scale=1.)
dst=cv2.warpAffine(img,M,dsize=(w*2,h*2),borderValue=(255,255,255))
return dst
# セピア変換
def Sepia(im):
b, g, r = im[:,:,0],im[:,:,1], im[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
r=gray*240/255
g=gray*200/255
b=gray*145/255
im[:,:,0],im[:,:,1], im[:,:,2]=b, g, r
return im
#鮮鋭化
def Sharp(img=None):
kernel=np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]],np.float32)
dst=cv2.filter2D(img,-1,kernel)
return dst
#擬似遮蔽
#簡単な図形をランダムで生成し、遮蔽物を再現します。作成する個数は設定でき、初期値は0です。
#ターゲット周辺に生成されます。
def HideTarget(img,num):
cv2.imwrite("./tmp/Shield.png",MakeShield(img,num))
target=Image.open("./tmp/tmp.png").convert("RGBA")
shield=Image.open("./tmp/Shield.png").convert("RGBA")
target.paste(shield,(0,0),shield)
return target
# 図形生成
def MakeShield(img,num=0):
h,w,_=img.shape
Shield=np.full((h,w,3),255,np.uint8)
for i in range(num):
# 描画設定
ShieldType,FillOption,startX,startY,ColorR,ColorG,ColorB=Dise(w,h)
if(ShieldType==0):#四角形
endX=random.randint(0,w)
endY=random.randint(0,h)
cv2.rectangle(Shield,(startX,startY),
(endX,endY),
(ColorB,ColorG,ColorR),FillOption)
elif(ShieldType==1):#円
min_len=min(h,w)
radius=random.randint(1,min_len/2)
cv2.circle(Shield,(startX,startY),
radius,(ColorB,ColorG,ColorR),FillOption)
elif(ShieldType==2):#楕円
radiusX=random.randint(1,w)
radiusY=random.randint(1,h)
angle=random.randint(1,359)
cv2.ellipse(Shield,((startX,startY),
(radiusX,radiusY),angle),
(ColorB,ColorG,ColorR),FillOption)
Shield=Extraction_color(B=254,G=254,R=254,tar=Shield)
return Shield
def Dise(w,h):
ShieldType=random.randint(0,2)
FillOption=random.choice([-1,5])
startX=random.randint(0,w)
startY=random.randint(0,h)
ColorR=random.randint(0,254)
ColorG=random.randint(0,254)
ColorB=random.randint(0,254)
return ShieldType,FillOption,startX,startY,ColorR,ColorG,ColorB
# 画像を合成します。
#すべての引数は省略できます。
#bak:背景画像
#tar:前景画像(ターゲット)
#SharpBackground:背景を鮮鋭化するか
#Brightness_mode:明るさを変更する方法を選択(No,PIL,OpenCV)
#dilate,erode:クロージング時の拡大率と縮小率
#X,Y:前景画像の貼り付け位置(途中の処理で前景画像をリサイズするのでその後の貼り付け位置)
#resize:前景画像を倍数指定でリサイズします
#R,G,B:切り抜かれる色を指定(0〜255)
#Threshold:2値化のの際の閾値(輪郭抽出)
#brightness:明るさ。
#BilateralFilter:バイラテラルフィルタ
#GausianFilter:ガウシアンフィルタ
#HideNum:遮蔽物の数
#x1_Ho,y1_Ho,x2_Ho,y2_Ho=,x3_Ho,y3_Ho,x4_Ho,y4_Ho:射影変換。
def Union(label_name="?",
bak="/home/kagamiwomiru/datasets/haikeigazou/1-1.jpg",
tar="/home/kagamiwomiru/datasets/hyousiki/326-6.jpg",
SharpBackground=False,
SepiaAll=False,
Brightness_mode="No",
dilate=5,erode=5,
X=0,Y=0,
angle=0,
resize=1,
R=244,G=248,B=243,
Threshold=200,
brightness=1.0,
BilateralFilter=1,
GausianFilter=1,
HideNum=0,
x1_Ho=0,y1_Ho=0,x2_Ho=0,y2_Ho=0,x3_Ho=0,y3_Ho=0,x4_Ho=0,y4_Ho=0
):
if(SharpBackground is True):
#背景画像の鮮鋭化
cv2.imwrite("./tmp/background.jpg",Sharp(cv2.imread(bak)))
background=Image.open("./tmp/background.jpg")
else:
background=Image.open(bak)
target=Image.open(tar)
w,h=background.size
tar_w,tar_h=target.size
#前景画像を拡張
target=img_expansion(target,resize,w,h)
# target.save("./tmp/tmp_ex.png")
#PILからOpenCVへ変換
target = cv2.cvtColor(np.array(target),cv2.COLOR_BGR2RGB)
#バイラテラルぼかし
target=BilateralBlur(img=target,cnt=BilateralFilter)
# #前景画像の抽出
# target_ex=Extraction_color(tar=target,dilate=dilate, erode=erode,B=B,G=G,R=R)
# cv2.imwrite("./tmp/tmp.png",target_ex)
#2値化による輪郭抽出
cv2.imwrite("./tmp/tmp.png",target)
th1=Binarize(Threshold,target)
MakeMask(th1,dilate,erode)
target=Image.open("./tmp/tmp.png")
mask=Image.open("./tmp/mask.png")
target.putalpha(mask)
target.save("./tmp/tmp.png")
#ホモグラフィー変換
target=Homography(cv2.imread("./tmp/tmp.png",-1),tar_w=tar_w,tar_h=tar_h,
x1=x1_Ho,y1=y1_Ho,x2=x2_Ho,y2=y2_Ho,x3=x3_Ho,y3=y3_Ho,x4=x4_Ho,y4=y4_Ho)
#アフィン回転
target=Affine(target,angle)
#トリミング
target=ImageTrim(target)
cv2.imwrite("./tmp/tmp.png",target)
try:
tar_h,tar_w,_=target.shape[:3]
except AttributeError:
print("射影変換に失敗しました。現在の値を0に近づけるとうまくいくかもしれません。")
sys.exit(1)
target=HideTarget(target,HideNum)
target.save("./tmp/tmp.png")
#ターゲット全体にガウシアンぼかし
target=TargetSmoothingBlur(cv2.imread("./tmp/tmp.png",-1),Blur=GausianFilter)
cv2.imwrite("tmp/tmp.png",target)
tar_h,tar_w,_=target.shape[:3]
#リサイズ
target=cv2.resize(target,(int(tar_w*resize),int(tar_h*resize)))
# target=cv2.resize(target,resize,resize)
cv2.imwrite("./tmp/tmp.png",target)
tar_h,tar_w,_=target.shape[:3]
#OpenCVで明度変換
if (Brightness_mode == "OpenCV"):
target=OpenCV_gamma(brightness,cv2.imread("./tmp/tmp.png",-1))
cv2.imwrite("./tmp/tmp.png",target)
#PILで明度変換
if (Brightness_mode == "PIL"):
target=PIL_point(brightness,Image.open("./tmp/tmp.png"))
target.save("./tmp/tmp.png")
out_image=PasteTarget(x=X,y=Y,background=background)#画像の貼り付け
out_image.save("./tmp/result.png")
if(SepiaAll is True):
SepiaImage=Sepia(cv2.imread("./tmp/result.png"))
cv2.imwrite("./tmp/result.png",SepiaImage)
print(label_name+","+str(X)+","+str(Y)+","+str(X+tar_w)+","+str(Y+tar_h))
out_image=Image.open("./tmp/result.png")
return plt.imshow(out_image)
# return out_image
```
# テスト用
様々な変数を調整して、最適なパラメータを探します。
## 色で抽出
初期設定では白色を消すようにしているので、前景画像によっては消えすぎてしまうことがある。
背景画像と前景画像を合成する関数 **Union**ではこの問題を回避するため、引数で消す色を指定できる。(RGB)
## 引数
- label_name:割り振るラベルIDを入力します。
- bak:背景画像
- tar:前景画像(ターゲット)
- SharpBackground:背景を鮮鋭化するか
- Extraction_mode:抽出方法を選択(輪郭抽出、色で抽出)
- Brightness_mode:明るさを変更する方法を選択(No,PIL,OpenCV)
- dilate,erode:クロージング時の拡大率と縮小率
- decrease:前景画像の縮小率(ターゲットが大きすぎるときに縮小率を整数で指定します)
- X,Y:前景画像の貼り付け位置(途中の処理で前景画像をリサイズするのでその後の貼り付け位置)
- R,G,B:切り抜かれる色を指定(0〜255)
- Threshold:2値化のの際の閾値(輪郭抽出)
- brightness:明るさ。(Brightness modeがNo以外で適用)
- BilateralFilter:バイラテラルフィルタ
- GausianFilter:ガウシアンフィルタ
- HideNum:遮蔽物の数
- x1_Ho,y1_Ho,x2_Ho,y2_Ho=,x3_Ho,y3_Ho,x4_Ho,y4_Ho:射影変換。
```
interact(Union,img_expansion_mode=True,
label_name="?",
bak=glob.glob("/home/kagamiwomiru/datasets/haikeigazou/*"),
tar=glob.glob("/home/kagamiwomiru/datasets/hyousiki/*"),
SharpBackground=False,
Ex_mode="No",
SepiaAll=False,
Brightness_mode=["No","PIL","OpenCV"],
dilate=(0,5,1),erode=(0,5,1),
angle=(0,180,1),
resize=(0.1,10,0.1),
X=(0,2000,50),Y=(0,2000,50),
R=(0,255,1),G=(0,255,1),B=(0,255,1),
Threshold=(0,255,1),
brightness=(0.1,2.0,0.1),
BilateralFilter=(1,10,1),
GausianFilter=(1,9,2),
HideNum=(0,10,1),
x1_Ho=(-1500,1500,5),y1_Ho=(-1500,1500,5),x2_Ho=(-1500,1500,5),y2_Ho=(-1500,1500,5),x3_Ho=(-1500,1500,5),y3_Ho=(-1500,1500,5),x4_Ho=(-1500,1500,5),y4_Ho=(-1500,1500,5)
)
```
| github_jupyter |
```
import pandas as pd
f = open('mediaeval-2015-trainingset.txt').read().split('\n')
f[1].split('\t')
headers = f[0].split('\t')
data_list = []
for tweet in f:
t = tweet.split('\t')
data_list.append(t)
import pandas as pd
df = pd.DataFrame(columns=headers)
for i in range(len(data_list)):
df.loc[i] = data_list[i]
df = df.iloc[1:]
df.head()
df.shape
fake = 0
true = 0
for index, row in df.iterrows():
if row['label'] == 'fake':
fake = fake + 1
else:
true = true + 1
fake, true
fake/true
2564/1217
f1 = open('mediaeval-2015-trainingset.txt').read().split('\n')
f2 = open('mediaeval-2015-testset.txt').read().split('\n')
headers = f1[0].split('\t')
f = f1[1:] + f2[1:]
data_list = []
for tweet in f:
t = tweet.split('\t')
data_list.append(t)
df = pd.DataFrame(columns=headers)
for i in range(len(data_list)):
df.loc[i] = data_list[i]
df = df.iloc[1:]
df.head()
df.shape
fake = 0
true = 0
for index, row in df.iterrows():
if row['label'] == 'fake':
fake = fake + 1
else:
true = true + 1
fake,true
from better_profanity import profanity
dirty_text = "That l3sbi4n did a very good H4ndjob."
profanity.contains_profanity(dirty_text)
import re
from textblob import TextBlob
import pointofview
from better_profanity import profanity
def get_word_count(row):
tweet = row[1]
return len(tweet.split(' '))
def get_character_count(row):
tweet = row[1]
return len(tweet)
def get_question_count(row):
tweet = row[1]
return tweet.count('?')
def get_exclaimation_count(row):
tweet = row[1]
return tweet.count('!')
def has_colon(row):
tweet = row[1]
if tweet.find(':') != -1:
return 1
else:
return 0
def get_mention_count(row):
tweet = row[1]
return tweet.count('@')
def get_hashtag_count(row):
tweet = row[1]
return tweet.count('#')
def get_url_count(row):
tweet = row[1]
return len(re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', tweet))
def get_polarity(row):
tweet = row[1]
return (TextBlob(tweet)).sentiment[0]
def get_subjectivity(row):
tweet = row[1]
return (TextBlob(tweet)).sentiment[1]
def get_first_pronouns(row):
tweet = row[1]
return len(pointofview.parse_pov_words(tweet)['first'])
def get_second_pronouns(row):
tweet = row[1]
return len(pointofview.parse_pov_words(tweet)['second'])
def get_third_pronouns(row):
tweet = row[1]
return len(pointofview.parse_pov_words(tweet)['third'])
def has_profanity(row):
tweet = row[1]
if profanity.contains_profanity(tweet):
return 1
else:
return 0
def get_via_count(row):
tweet = row[1]
return tweet.lower().count('via')
def get_uppercase_chars(row):
tweet = row[1]
return len(re.findall(r'[A-Z]',tweet))
df['word_count'] = df.apply(lambda row: get_word_count(row), axis=1)
df['character_count'] = df.apply(lambda row: get_character_count(row), axis=1)
df['uppercase_count'] = df.apply(lambda row: get_uppercase_chars(row), axis=1)
df['question_count'] = df.apply(lambda row: get_question_count(row), axis=1)
df['exclaimation_count'] = df.apply(lambda row: get_exclaimation_count(row), axis=1)
df['mention_count'] = df.apply(lambda row: get_mention_count(row), axis=1)
df['hashtag_count'] = df.apply(lambda row: get_hashtag_count(row), axis=1)
df['url_count'] = df.apply(lambda row: get_url_count(row), axis=1)
df['polarity'] = df.apply(lambda row: get_polarity(row), axis=1)
df['subjectivity'] = df.apply(lambda row: get_subjectivity(row), axis=1)
df['first_pronoun_count'] = df.apply(lambda row: get_first_pronouns(row), axis=1)
df['second_pronoun_count'] = df.apply(lambda row: get_second_pronouns(row), axis=1)
df['third_pronoun_count'] = df.apply(lambda row: get_third_pronouns(row), axis=1)
df['via_count'] = df.apply(lambda row: get_via_count(row), axis=1)
df['has_profanity'] = df.apply(lambda row: has_profanity(row), axis=1)
df['has_colon'] = df.apply(lambda row: has_colon(row), axis=1)
df.drop('tweetId', axis=1, inplace=True)
df.drop('timestamp', axis=1, inplace=True)
df.drop('userId', axis=1, inplace=True)
df.drop('imageId(s)', axis=1, inplace=True)
df.drop('username', axis=1, inplace=True)
df.head(20)
import numpy as np
df["label_code"] = np.where(df["label"]=='real', 0, 1)
df.to_pickle('complete_dataset_features.pkl')
df = pd.read_pickle("complete_dataset_features.pkl")
df.groupby('label_code').mean()
df.mean()
df_fake = df.loc[(df['label_code'] == 1)]
df_real = df.loc[(df['label_code'] == 0)]
import seaborn as sns
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm').set_precision(2)
# 'RdBu_r' & 'BrBG' are other good diverging colormaps
type(corr)
c1 = corr.abs().unstack().drop_duplicates()
l = c1.sort_values(ascending = False)
l
def sort_correlation_matrix(correlation_matrix):
cor = correlation_matrix.abs()
top_col = cor[cor.columns[0]][1:]
top_col = top_col.sort_values(ascending=False)
ordered_columns = [cor.columns[0]] + top_col.index.tolist()
return correlation_matrix[ordered_columns].reindex(ordered_columns)
sort_correlation_matrix(corr)
corr['label_code'].sort_values()
df['pronoun_count'] = df['first_pronoun_count'] + df['second_pronoun_count'] + df['third_pronoun_count']
corr = df.corr()
corr['label_code'].sort_values()
import seaborn as sns
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
df['sub_polar'] = df['subjectivity'] + df['polarity']
corr = df.corr()
corr['label_code'].sort_values()
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
df2 = df[['label_code', 'exclaimation_count']]
df2.groupby('label_code').mean()
df2 = df[['label_code', 'via_count']]
import seaborn as sns
sns.catplot(y='via_count', x='label_code', data=df2, kind='point', aspect=1, )
df2 = df[['label_code', 'exclaimation_count']]
import seaborn as sns
sns.catplot(y='exclaimation_count', x='label_code', data=df2, kind='point', aspect=1, )
df2 = df[['label_code', 'question_count']]
import seaborn as sns
sns.catplot(y='question_count', x='label_code', data=df2, kind='point', aspect=1, )
df2 = df[['label_code', 'has_profanity']]
import seaborn as sns
sns.catplot(y='has_profanity', x='label_code', data=df2, kind='point', aspect=1, )
```
| github_jupyter |
```
"""Notes/instructions on how to run experiments.
"""
from nna.fileUtils import list_files,save_to_csv
from pathlib import Path
from nna.pre_process_func import read_queue
import pandas as pd
# originally from ../src/notes.md
```
* start with screen
* get GPU Memory so I can use it later
* one file file at a time (~2 gb)
* I will divide files to 1 hour slices
* I will use 25 cpus so 25 processes
* embeddings file name will be filename_embeddings.
1) create a text file with paths to input audio files (one full path per line)
* `find /tank/data/nna/real/ -iname "*flac" > ExperimentRunVXXX.txt`
* or following code, list_files function
v2) since now we have new files in the old folders, we need to find which files has results which files do not,
we could let the code ignore existing results however, ffmpeg divides files into couple of hours of segments without checking results.
```
datafiles_path="/home/enis/projects/nna/data/"
search_path="/tank/data/nna/real/"
ignore_folders=["/tank/data/nna/real/ivvavik/","/tank/data/nna/real/stinchcomb/dups/","/tank/data/nna/real/stinchcomb/excerpts/"]
files_path_list=list_files(search_path,ignore_folders)
files_path_list=list(set(files_path_list))
datafiles_path="/home/enis/projects/nna/data/"
input_filename = "ExperimentRunV5.txt"
thepath = datafiles_path + input_filename
with open(thepath,"w") as myfile:
myfile.writelines("\n".join(files_path_list))
```
2) update input, output directory, input list, in /nna/src/params.py
3-1) sync files
`rsync -av --recursive --update ./ enis@crescent:/home/enis/projects/nna/ `
3-2) run codes
```bash
python3 ./src/scripts/pre_process.py &>> job_logs/logs.txt; python /home/enis/projects/nna/src/nna/slack_message.py -t "cpu job ended" -f job_logs/logs.txt &
python3 ./src/scripts/watch_VGGish.py &>> job_logs/logs_gpu.txt; python /home/enis/projects/nna/src/nna/slack_message.py -t "gpu job stopped" &
```
4) to re-run, update the code and remove temporary flac files
`rsync -av --recursive --update /Users/berk/Documents/workspace/speech_audio_understanding/src/ enis@crescent:/home/enis/projects/nna/`
`find /scratch/enis/data/nna/real/ -iname "*flac" -delete`
`find /scratch/enis/data/nna/real/ -path "*/*_segments*/*" -delete`
`find /scratch/enis/data/nna/real/ -name "*_segments" -type d -delete`
also remove `job_logs/pre_processing_queue.csv` if jobs left unfinished
and STOP all processes
5) tracking progress
```bash
cat job_logs/pre_processing_queue.csv | wc -l; cat job_logs/pre_processed_queue.csv | wc -l; cat job_logs/VGGISH_processing_queue.csv | wc -l; cat job_logs/vggish_embeddings_queue.csv | wc -l;
```
6) backup
```bash
tar cf - /scratch/enis/data/nna/backup/NUI_DATA/ -P | pv -s $(du -sb /scratch/enis/data/nna/backup/NUI_DATA/ | awk '{print $1}') | gzip > embeddings_backup.tar.gz
```
```
q1=read_queue("../src/job_logs/pre_processing_queue.csv")
q2=read_queue("../src/job_logs/pre_processed_queue.csv")
q3=read_queue("../src/job_logs/VGGISH_processing_queue.csv")
q4=read_queue("../src/job_logs/vggish_embeddings_queue.csv")
q5=read_queue("../src/job_logs/Audioset_processing_queue.csv")
q6=read_queue("../src/job_logs/Audioset_output_queue.csv")
CABLE=read_queue("../src/job_logs/_CABLE_output_queue.csv")
q1set=set(["/".join(i.split("/")[-1:])[:-5] for i in q1])
q2set=set(["/".join(i.split("/")[-2:-1])[:-13] for i in q2])
print(q1set.difference(q2set))
print(q2set.difference(q1set))
# files without results (subDirectoryAddons)
# this one goes through files in the properties_df and checks if there are corresponding files with given file name add ons.
from nna.fileUtils import standard_path_style
import pandas as pd
# file_properties_df_ST=pd.read_pickle("../data/stinchcomb_dataV1.pkl")
file_properties_df_ST = []
# file_properties_df = pd.read_pickle("../data/realdata_v2No_stinchcomb.pkl")
# file_properties_df = pd.read_pickle("../data/prudhoeAndAnwr4photoExp_dataV1.pkl")
file_properties_df = pd.read_pickle("/home/enis/projects/nna/data/prudhoeAndAnwr4photoExp_dataV1.pkl")
# file_properties_df = pd.read_pickle("../data/allFields_dataV4.pkl")
subDirectoryAddon="vgg"
# subDirectoryAddon="_CABLE"
# subDirectoryAddon="_CABLE"
subDirectoryAddons=["CABLE","RUNNINGWATER","INSECT", "RAIN", "WATERBIRD", "WIND", "SONGBIRD", "AIRCRAFT"]
notDoneFilesDict={}
for subDirectoryAddon in subDirectoryAddons:
print(subDirectoryAddon)
notDoneFiles=[]
for afile,row in file_properties_df.iterrows():
checkFile = standard_path_style("/scratch/enis/data/nna/real/",row,sub_directory_addon=subDirectoryAddon,file_name_addon="")
if not checkFile.exists():
notDoneFiles.append(afile)
if file_properties_df_ST != []:
for afile,row in file_properties_df_ST.iterrows():
checkFile = standard_path_style("/scratch/enis/data/nna/real/",row,sub_directory_addon=subDirectoryAddon,file_name_addon="")
if not checkFile.exists():
# print(afile)
# print(checkFile)
notDoneFiles.append(afile)
notDoneFiles.sort()
notDoneFiles=[str(i) for i in notDoneFiles]
notDoneFilesDict[subDirectoryAddon]=notDoneFiles[:]
# input_filename = "ExperimentRunV6.txt"
# thepath=datafiles_path+input_filename
# with open(thepath,"w") as myfile:
# myfile.writelines("\n".join(notDoneFiles))
len(notDoneFiles)
# len(notDoneFiles) 13729
# len(noEmbeddings)
thepath
input_filename = "allModels6tagRunV1_20191102.txt"
thepath=datafiles_path+"inferenceInputs/"+input_filename
# thepath
with open(thepath,"w") as myfile:
myfile.writelines("\n".join([str(i) for i in yesEmbeddings]))
# from missing results, find missing embeddings
# in the next part, we find missing embeddings by searching folders, here with by iterating file_properties_df
noEmbeddings=[]
yesEmbeddings=[]
for f in notDoneFiles:
row = file_properties_df.loc[Path(f)]
checkFile= standard_path_style("/scratch/enis/data/nna/real/",row,subDirectoryAddon="_vgg",fileNameAddon="_rawembeddings000.npy")
if not checkFile.exists():
# print(checkFile)
noEmbeddings.append(checkFile)
else:
yesEmbeddings.append(checkFile)
len(noEmbeddings)
# check if any of the missing files were in the previous experiment
inputFilesOld="/scratch/enis/data/nna/real/input_2020-10-12_17:11:26.txt"
with open(inputFilesOld) as ff:
ffL=ff.readlines()
ffL = [i.strip().split("\t")[0] for i in ffL]
noEmbeddings2=[]
yesEmbeddings2=[]
for f in notDoneFiles:
row = file_properties_df.loc[Path(f)]
checkFile= standard_path_style("/scratch/enis/data/nna/real/",row,subDirectoryAddon="_vgg",fileNameAddon="_rawembeddings000.npy")
if f not in ffL:
# print(checkFile)
noEmbeddings2.append(checkFile)
else:
yesEmbeddings2.append(checkFile)
setnoEmbeddings = set(noEmbeddings)
setnoEmbeddings2 = set(noEmbeddings2)
print("Following ones are in the input but do not have embeddings")
setnoEmbeddings.difference(setnoEmbeddings2)
len(setnoEmbeddings2)
t=[]
for i in yesEmbeddings[0:10]:
t.append(str(i))
for i in yesEmbeddings[-20:-10]:
t.append(str(i))
```
### FIND missing prediction results
```
import glob
inputCSV=[]
# for i in ["ivvavik","anwr", "prudhoe" , "stinchcomb"]:
for i in ["anwr", "prudhoe" ]:
x=glob.glob( f"/scratch/enis/data/nna/real/{i}/**/*_rawembeddings*.npy",recursive=True)
print(i,len(x))
inputCSV.extend(x)
# save_to_csv("../src/job_logs/inferenceRun3.csv",[[str(afile)] for afile in inputCSV])
save_to_csv("/home/enis/projects/nna/job_logs/inferenceRunVtest.csv",[[str(afile)] for afile in t])
# _preprocessed.npy
search_path="/tank/data/nna/real/"
ignore_folders=["/tank/data/nna/real/ivvavik/","/tank/data/nna/real/stinchcomb/dups/","/tank/data/nna/real/stinchcomb/excerpts/"]
files_path_list=list_files(search_path,ignore_folders)
files_path_list=list(set(files_path_list))
datafiles_path="/home/enis/projects/nna/data/"
input_filename = "ExperimentRunV5.txt"
thepath=datafiles_path+input_filename
with open(thepath,"w") as myfile:
myfile.writelines("\n".join(files_path_list))
```
### Run predictions
```
#TODO all these are running on raw embeddings, makes sure thats checked before running on the input
inputCsvFile="./job_logs/inferenceRun3.csv"
inputCsvFile="/home/enis/projects/nna/data/inferenceInputs/allModels6tagRunV1_20191102.txt"
outputLogs="job_logs/MLlogs.txt"
i=0
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Cable_Gaussian Process_Raw_Concat_2020-03-02--14-06.joblib" --modelName "_CABLE" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Running Water_Neural Net_Raw_Concat_2020-03-02--14-06.joblib" --modelName "_RUNNINGWATER" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Insect_Linear SVM_Raw_Concat_2020-03-02--14-06.joblib" --modelName "_INSECT" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Rain_Gaussian Process_Raw_Concat_2020-03-02--14-06.joblib" --modelName "_RAIN" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Water Bird_RBF SVM_Raw_many2one_2020-03-02--14-06.joblib" --modelName "_WATERBIRD" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Wind_Neural Net_Raw_many2one_2020-03-02--14-06.joblib" --modelName "_WIND" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Songbird_Neural Net_Raw_many2one_2020-03-02--14-06.joblib" --modelName "_SONGBIRD" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
python3 ./src/scripts/watchMLInference.py --inputCsv $inputCsvFile --modelPath "./src/nna/assets/sklearnModels/Aircraft_AdaBoost_Raw_Average_2020-03-02--14-06.joblib" --modelName "_AIRCRAFT" &>> $outputLogs &
pids[i]=$! && ((i=i+1));
for pid in ${pids[*]}; do
wait $pid
done;
python3 slack_message.py -t "MLInference job ended" -f $outputLogs &
AIRCRAFT,SONGBIRD,WATERBIRD,WIND
# set(inputCSV)
import time
logsPath="../src/job_logs/"
inputCSVset=set(inputCSV)
for subDirectoryAddon in subDirectoryAddons:
csvFile=logsPath+subDirectoryAddon+"_output_queue.csv"
print(subDirectoryAddon)
csvFileSet=set(read_queue(csvFile))
print(len(inputCSVset.difference(csvFileSet)))
# for i in inputCSVset.difference(csvFileSet):
# print(i)
```
| github_jupyter |
# Image Classification using MobileNetV2
This Code Template is for simple image classification using MobileNetV2.
<img src="https://cdn.blobcity.com/assets/gpu_required.png" height="25" style="margin-bottom:-15px" />
### Required Packages
```
import os
import shutil
import numpy as np
from glob import glob
import itertools
from sklearn.metrics import plot_confusion_matrix, classification_report, confusion_matrix
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import Input
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, GlobalMaxPool2D, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.applications import MobileNetV2
import matplotlib.pyplot as plt
import cv2
import random
import seaborn as sns
```
### Initialization
Path of the main folder containing train and test folders
```
base_dir = os.path.dirname('')
```
Path of the train and test folders containing image files
```
train_dir = os.path.join(base_dir, 'train') #train
test_dir = os.path.join(base_dir, 'test') #test
```
List of target categories which are required for model training and predictions.
```
target = []
```
### Data Fetching
The OS module in Python provides functions for interacting with the operating system. This module provides a portable way of using operating system-dependent functionality.
OpenCV-Python is a library of Python bindings designed to solve computer vision problems.
We will use the <Code>os.path</Code> module to interact with the file system and the <Code>cv2.imread()</Code> method to load an image from the specified file path.
### Directory Structure
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>Master Directory</b>
|__<b>train</b>
|______ <b>Class 1</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>Class 2</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>Class 3</b>: [1.jpg, 2.jpg, 3.jpg ....]
:
:
|__<b>test</b>
|______ <b>Class 1</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>Class 2</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>Class 3</b>: [1.jpg, 2.jpg, 3.jpg ....]
:
:
</pre>
```
# useful for getting number of files
train_image_files = glob(train_dir + '/*/*.jp*g')
test_image_files = glob(test_dir + '/*/*.jp*g')
# Loading the image dataset and plotting some images from all target categories
def plot_image(data, target):
count = 0
for category in target:
path=os.path.join(data, category)
flg = 0
ar = []
# storing image paths in an array
for img in os.listdir(path):
ar.append(os.path.join(path,img))
flg+= 1
if flg==2:
break
# plotting the images in dataset
plt.figure(figsize=(5,5))
for i in range(2):
d1 = ar[i]
img_array = cv2.imread(d1,cv2.IMREAD_COLOR)
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
ax = plt.subplot(1,2,i+1)
plt.imshow(img_array)
plt.title(category)
plt.axis("off")
count+= 1
if count == len(target):
break
plot_image(train_dir,target)
```
### Data Preprocessing
An Image occupies a significant portion of space. Since images represent huge datasets, loading them at once to memory (RAM) can lead to severe scalibility issues and memory limitations. Therefore, in practice, images are loaded directly from the directory structure.
TensorFlow 2 comes with a handy class called <Code>ImageDataGenerator</Code> that augments images. It takes the following parameters:
1. **rotation_range**: Int.
>Degree range for random rotations.
2. **width_shift_range**: Float, 1-D array-like or int
* float: fraction of total width, if < 1, or pixels if >= 1.
* 1-D array-like: random elements from the array.
* int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
* With width_shift_range=2 possible values are integers [-1, 0, +1], same as with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are floats in the interval [-1.0, +1.0).
3. **height_shift_range**: Float, 1-D array-like or int.
>Values have similar meaning as width_shift_range
4. **zoom_range**: Float or [lower, upper].
>Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
channel_shift_range Float. Range for random channel shifts.
5. **horizontal_flip**: Boolean.
>Randomly flip inputs horizontally.
6. **rescale**: rescaling factor. Defaults to None.
>If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (after applying all other transformations).
7. **data_format** Image data format, either "channels_first" or "channels_last".
>"channels_last" mode means that the images should have shape (samples, height, width, channels), "channels_first" mode means that the images should have shape (samples, channels, height, width).
For more information, refer to the [API](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator#args)
```
train_datagenerator = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.2,
data_format = "channels_last")
test_datagenerator = ImageDataGenerator(rescale = 1./255) #No need for augmentation for test dataset
```
### Flow From Directory
<Code>ImageDataGenerator</Code> class uses the method <Code>flow_from_directory</Code> to load the images. However, for this method to work, it is necessary the directory follow the above directory structure.
<Code>flow_from_directory</Code> takes the path to a directory & generates batches of augmented data. It takes following parameters:
1. **directory**: string, path to the target directory.
>It should contain one subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside each of the subdirectories directory tree will be included in the generator.
2. **target_size**: Tuple of integers (height, width), defaults to (256,256).
>The dimensions to which all images found will be resized.
color_mode One of "grayscale", "rgb", "rgba". Default: "rgb". Whether the images will be converted to have 1, 3, or 4 channels.
3. **classes**: Default: None.
>Optional list of class subdirectories (e.g. ['dogs', 'cats']).
4. **class_mode**: Default: "categorical".
>One of "categorical", "binary", "sparse", "input", or None.
5. **batch_size**: Default: 32
>Size of the batches of data.
6. **shuffle**: Default: True
>Whether to shuffle the data. If set to False, sorts the data in alphanumeric order.
For more information, refer to the [API](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator#flow_from_directory)
```
batch_size = 64 # number of images to process at a time
IMG_SHAPE = 224 # convert all images to be 224 x 224
train_data = train_datagenerator.flow_from_directory(directory = train_dir,
batch_size = batch_size,
target_size = (IMG_SHAPE, IMG_SHAPE),
shuffle = True, #for training only
class_mode = 'sparse', #type of problem (sparse, binary, or categorical, etc.)
classes = target)
test_data = test_datagenerator.flow_from_directory(directory = test_dir,
batch_size = batch_size,
target_size = (IMG_SHAPE, IMG_SHAPE),
shuffle = False,
class_mode = 'sparse',
classes = target)
```
### Model
**MobileNetV2**
This function returns a Keras image classification model, optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see this page for detailed examples.
For transfer learning use cases, make sure to read the guide to transfer learning & fine-tuning.
Note: each Keras Application expects a specific kind of input preprocessing. For MobileNet, call tf.keras.applications.mobilenet.preprocess_input on your inputs before passing them to the model. mobilenet.preprocess_input will scale input pixels between -1 and 1.
For more Information:
[Refer](https://keras.io/api/applications/mobilenet/)
```
loss = "binary_crossentropy"
output_activation = 'sigmoid'
learning_rate = 0.001
epochs = 5
NUM_COLOR_CHANNELS = 3
conv_base = MobileNetV2(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3))
conv_base.summary()
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1, activation=output_activation))
model.summary()
# compile model
model.compile(loss=loss,
optimizer=optimizers.RMSprop(learning_rate=learning_rate),
metrics=['accuracy'])
# Fit the model
history = model.fit(train_data,
epochs=epochs,
validation_data=test_data)
```
### Plot Curves
Plot loss and accuracy metrics to see how the model trains
```
def plot_model(model_name_history, metric_name):
# Plot loss per iteration
fig, ax = plt.subplots(1, 2,figsize=(15,5))
ax[0].plot(model_name_history.history['loss'], label='loss')
ax[0].plot(model_name_history.history['val_loss'], label='val_loss')
ax[0].legend()
# Plot accuracy per iteration
ax[1].plot(model_name_history.history[metric_name], label='accuracy')
ax[1].plot(model_name_history.history[f"val_{metric_name}"], label='val_accuracy')
ax[1].legend()
plot_model(history, 'accuracy')
```
### Model Evaluation (Accuracy)
```
predictions = model.predict(test_data)
y_pred = np.argmax(predictions,axis = 1)
y_test = test_data.classes
class_labels = list(test_data.class_indices.keys())
loss, accuracy = model.evaluate(test_data)
print("Loss: ", loss)
print("Accuracy Score: ",accuracy)
```
### Confusion Matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
def get_confusion_matrix(data, N):
print("Generating confusion matrix", N)
predictions = []
targets = []
i = 0
for x, y in data:
i += 1
if i % 50 == 0:
print(i)
p = model.predict(x)
y_pred = np.argmax(p,axis = 1)
predictions = np.concatenate((predictions, y_pred))
targets = np.concatenate((targets, y))
if len(targets) >= N:
break
cm = confusion_matrix(targets, predictions)
return cm
cm = get_confusion_matrix(test_data, len(test_image_files))
sns.heatmap(cm,
annot = True,
fmt = 'd',
xticklabels=class_labels,
yticklabels= class_labels,
cmap = plt.cm.Blues)
plt.show()
```
#### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
* **where**:
- Precision:- Accuracy of positive predictions.
- Recall:- Fraction of positives that were correctly identified.
- f1-score:- percent of positive predictions were correct
- support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test, y_pred, target_names=class_labels))
```
#### Creator: Jay Shimpi, Github: [Profile]( https://github.com/JayShimpi22)
| github_jupyter |
# Streaming Data to your Data Lake
This workshop will walk through another common scenario when building your data lake. We will generate a Kinesis Firehose to send data to our data lake in real-time. We will walk through manually adding the metadata to the Glue Data Catalog and add the partitions for the data coming in. Finally, we will create an aggregate query that can utilize the partitions of the data generated.
```
import boto3
import botocore
import json
import project_path # path to helper methods
from lib import workshop
from pandas import read_sql
iam = boto3.client('iam')
logs = boto3.client('logs')
firehose = boto3.client('firehose')
glue = boto3.client('glue')
s3 = boto3.client('s3')
# General variables for the region and account id for the location of the resources being created
session = boto3.session.Session()
region = session.region_name
account_id = boto3.client('sts').get_caller_identity().get('Account')
workshop_user = 'bdw'
# Kinesis Firehose
delivery_stream_name = 'taxi-streaming_' + workshop_user # Name of the firehose to send Apache log simulations
firehose_role_name = 'taxi-firehose-role_' + workshop_user # Role used for the Kinesis firehose
firehose_policy_name = 'taxi-firehose-policy_' + workshop_user # Inline policy of the Kinesis Firehose Role
cloudwatch_logs_group_name = '/taxi_' + workshop_user
cloudwatch_logs_stream_name = 'ingestion-stream'
# Glue
database_name = 'taxi_' + workshop_user
table_name = 'yellow_streaming_' + workshop_user
```
### [Create S3 Bucket](https://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html)
We will create an S3 bucket that will be used throughout the workshop for storing our data.
[s3.create_bucket](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.create_bucket) boto3 documentation
```
bucket = workshop.create_bucket_name('taxi-')
session.resource('s3').create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': region})
print(bucket)
```
### [Create the Role for the Kinesis Firehose](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html)
When you're using an Amazon S3 destination, Kinesis Data Firehose delivers data to your S3 bucket and can optionally use an AWS KMS key that you own for data encryption. If error logging is enabled, Kinesis Data Firehose also sends data delivery errors to your CloudWatch log group and streams. You are required to have an IAM role when creating a delivery stream. Kinesis Data Firehose assumes that IAM role and gains access to the specified bucket, key, and CloudWatch log group and streams.
[Kinesis Firehose access to S3 Destination](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
```
role_doc = {
"Version": "2012-10-17",
"Statement": [
{"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": account_id
}
}
}]
}
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"cloudwatch:*",
"cloudwatchlogs:*"
],
"Resource": [
"*"
],
"Effect": "Allow"
},
{
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::" + bucket + "/*",
"arn:aws:s3:::" + bucket
],
"Effect": "Allow"
}
]
}
role_arn = workshop.create_role(iam, firehose_role_name, json.dumps(role_doc), firehose_policy_name, json.dumps(inline_policy))
print(role_arn)
```
## Create the CloudWatch Log Group and Stream
[Monitoring Kinesis Data Firehose](https://docs.aws.amazon.com/firehose/latest/dev/monitoring-with-cloudwatch-logs.html)
```
response = logs.create_log_group(
logGroupName=cloudwatch_logs_group_name,
)
print('CloudWatch Log Group status: ' + str(response['ResponseMetadata']['HTTPStatusCode']))
response = logs.create_log_stream(
logGroupName=cloudwatch_logs_group_name,
logStreamName=cloudwatch_logs_stream_name
)
print('CloudWatch Log Stream status: ' + str(response['ResponseMetadata']['HTTPStatusCode']))
```
### [Copy Sample Data to S3 bucket](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-download-file.html)
We will download some files from New York City Taxi and Limousine Commission (TLC) Trip Record Data dataset available on the [AWS Open Data Registry](https://registry.opendata.aws/nyc-tlc-trip-records-pds/).
```
!aws s3 cp s3://nyc-tlc/trip\ data/yellow_tripdata_2017-01.csv .
```
### [Create the Kinesis Firehose we will use to send Apache Logs to our Data Lake](https://docs.aws.amazon.com/firehose/latest/dev/what-is-this-service.html)
Amazon Kinesis Data Firehose is a fully managed service for delivering real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon Elasticsearch Service (Amazon ES), and Splunk. Kinesis Data Firehose is part of the Kinesis streaming data platform, along with Kinesis Data Streams, Kinesis Video Streams, and Amazon Kinesis Data Analytics. With Kinesis Data Firehose, you don't need to write applications or manage resources. You configure your data producers to send data to Kinesis Data Firehose, and it automatically delivers the data to the destination that you specified. You can also configure Kinesis Data Firehose to transform your data before delivering it.
In this example, we will create custom S3 prefixes for when the data lands in S3. This will allow us to precreate the partitions that will be cataloged in the Glue Data Catalog. To find more information follow this [link](https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html)
[firehose.create_delivery_stream](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/firehose.html#Firehose.Client.create_delivery_stream)
```
response = firehose.create_delivery_stream(
DeliveryStreamName=delivery_stream_name,
DeliveryStreamType='DirectPut',
S3DestinationConfiguration={
'RoleARN': role_arn,
'BucketARN': 'arn:aws:s3:::' + bucket,
'Prefix': 'datalake/taxi/streaming/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/',
'ErrorOutputPrefix': "datalake/taxi-error/streaming/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}",
'BufferingHints': {
'SizeInMBs': 50,
'IntervalInSeconds': 60
},
'CompressionFormat': 'GZIP',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption'
},
'CloudWatchLoggingOptions': {
'Enabled': True,
'LogGroupName': cloudwatch_logs_group_name,
'LogStreamName': cloudwatch_logs_stream_name
}
}
)
```
### Wait for the Kinesis Firehose to become 'Active'
The Kinesis Firehose Delivery Stream is in the process of being created.
```
import time
response = firehose.describe_delivery_stream(
DeliveryStreamName=delivery_stream_name
)
status = response['DeliveryStreamDescription']['DeliveryStreamStatus']
print(status)
while status == 'CREATING':
time.sleep(30)
response = firehose.describe_delivery_stream(
DeliveryStreamName=delivery_stream_name
)
status = response['DeliveryStreamDescription']['DeliveryStreamStatus']
print(status)
print('Kinesis Firehose created.')
```
### Send simulated Taxi events to Kinesis Firehose
The code below will generate taxi events from the file we downloaded and send them to the Kinesis Data Firehose. We will generate new times for the data to allow them to fit into the current time window. To optimize writes to the firehose we could also leverage the [firehose.put_record_batch](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/firehose.html#Firehose.Client.put_record_batch) call as well.
[firehose.put_record](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/firehose.html#Firehose.Client.put_record)
```
import csv
import datetime
import random
f = open('yellow_tripdata_2017-01.csv')
csv_f = csv.reader(f)
first = True
max_records = 1000
cnt = 0;
for row in csv_f:
if first: # skip the header record for the csv file
first = False
continue
if row:
drop_off = datetime.datetime.now()
pick_up = drop_off - datetime.timedelta(minutes=random.randint(5,60))
fmt = "%Y-%m-%d %H:%M:%S"
data_row = {
'vendorid': row[0],
'tpep_pickup_datetime': pick_up.strftime(fmt), # change to updated time
'tpep_dropoff_datetime': drop_off.strftime(fmt), # change to updated time
'passenger_count': row[3],
'trip_distance': row[4],
'ratecodeid': row[5],
'store_and_fwd_flag': row[6],
'pulocationid': row[7],
'dolocationid': row[8],
'payment_type': row[9],
'fare_amount': row[10],
'extra': row[11],
'mta_tax': row[12],
'tip_amount': row[13],
'tolls_amount': row[14],
'improvement_surcharge': row[15],
'total_amount': row[16]
}
res = firehose.put_record(
DeliveryStreamName=delivery_stream_name,
Record={
'Data': json.dumps(data_row) + '\n'
}
)
time.sleep(0.1)
cnt = cnt + 1
if cnt >= max_records:
break
```
### Wait for Firehose to persist data to S3
```
time.sleep(60)
```
### Finding out the current execution role of the Notebook
We are using SageMaker Python SDK to retrieve the current role for this Notebook which needs to be enhanced to support the functionality in AWS Glue.
```
# Import SageMaker Python SDK to get the Session and execution_role
import sagemaker
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = get_execution_role()
role_name = role[role.rfind('/') + 1:]
print(role_name)
```
### Adding AWS Glue as an additional trusted entity to this role
This step is needed if you want to pass the execution role of this Notebook while calling Glue APIs as well without creating an additional **Role**. If you have not used AWS Glue before, then this step is mandatory.
If you have used AWS Glue previously, then you should have an already existing role that can be used to invoke Glue APIs. In that case, you can pass that role while calling Glue (later in this notebook) and skip this next step.
On the IAM dashboard, please click on **Roles** on the left sidenav and search for this Role. Once the Role appears, click on the Role to go to its **Summary** page. Click on the **Trust relationships** tab on the **Summary** page to add AWS Glue as an additional trusted entity.
Click on **Edit trust relationship** and replace the JSON with this JSON.
```
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"sagemaker.amazonaws.com",
"glue.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
```
Once this is complete, click on **Update Trust Policy** and you are done.

```
print("https://console.aws.amazon.com/iam/home?region={0}#/roles/{1}".format(region, role_name))
```
### Create the [AWS Glue Catalog Database](https://docs.aws.amazon.com/glue/latest/dg/define-database.html)
When you define a table in the AWS Glue Data Catalog, you add it to a database. A database is used to organize tables in AWS Glue. You can organize your tables using a crawler or using the AWS Glue console. A table can be in only one database at a time.
There is a central Glue Catalog for each AWS account. When creating the database you will use your account id declared above as `account_id`
[glue.create_database](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.create_database)
```
def create_db(glue_client, database_name):
"""Create the specified Glue database if it does not exist"""
try:
glue_client.get_database(Name=database_name)
except glue_client.exceptions.EntityNotFoundException:
print("Creating database: %s" % database_name)
glue_client.create_database(
DatabaseInput={'Name': database_name}
)
create_db(glue, database_name)
```
### [Create the Streaming table in Glue](https://docs.aws.amazon.com/glue/latest/dg/tables-described.html)
When you define a table in AWS Glue, you also specify the value of a classification field that indicates the type and format of the data that's stored in that table. If a crawler creates the table, these classifications are determined by either a built-in classifier or a custom classifier. If you create a table manually in the console or by using an API, you specify the classification when you define the table. For more information about creating a table using the AWS Glue console, see [Working with Tables on the AWS Glue Console](https://docs.aws.amazon.com/glue/latest/dg/console-tables.html).
We also create the partitions that will be generated by using the Kinesis Firehose. You can find those values in the `PartitionKeys` section of the `create_table` call in Glue.
[glue.create_table](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.create_table)
```
prefix = 'datalake/taxi/streaming/'
location = 's3://{0}/{1}'.format(bucket,prefix)
response = glue.create_table(
CatalogId=account_id,
DatabaseName=database_name,
TableInput={
'Name': table_name,
'Description': 'Yellow Taxi Streaming dataset',
'StorageDescriptor': {
'Columns': [
{
'Name': 'vendorid',
'Type': 'bigint'
},
{
'Name': 'tpep_pickup_datetime',
'Type': 'string'
},
{
'Name': 'tpep_dropoff_datetime',
'Type': 'string'
},
{
'Name': 'passenger_count',
'Type': 'bigint'
},
{
'Name': 'trip_distance',
'Type': 'double'
},
{
'Name': 'ratecodeid',
'Type': 'bigint'
},
{
'Name': 'store_and_fwd_flag',
'Type': 'string'
},
{
'Name': 'pulocationid',
'Type': 'bigint'
},
{
'Name': 'dolocationid',
'Type': 'bigint'
},
{
'Name': 'payment_type',
'Type': 'bigint'
},
{
'Name': 'fare_amount',
'Type': 'double'
},
{
'Name': 'extra',
'Type': 'double'
},
{
'Name': 'mta_tax',
'Type': 'double'
},
{
'Name': 'tip_amount',
'Type': 'double'
},
{
'Name': 'tolls_amount',
'Type': 'double'
},
{
'Name': 'improvement_surcharge',
'Type': 'double'
},
{
'Name': 'total_amount',
'Type': 'double'
}
],
'Location': location,
'InputFormat': 'org.apache.hadoop.mapred.TextInputFormat',
'OutputFormat': 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',
'SerdeInfo': {
'SerializationLibrary': 'org.openx.data.jsonserde.JsonSerDe',
'Parameters': {
'paths': 'vendorid,tpep_pickup_datetime,tpep_dropoff_datetime,passenger_count,trip_distance,ratecodeid,store_and_fwd_flag,pulocationid,dolocationid,payment_type,fare_amount,extra,mta_tax,tip_amount,tolls_amount,improvement_surcharge,total_amount'
}
},
},
'PartitionKeys': [
{
'Name': 'year',
'Type': 'bigint',
},
{
'Name': 'month',
'Type': 'bigint',
},
{
'Name': 'day',
'Type': 'bigint',
},
{
'Name': 'hour',
'Type': 'bigint',
}
],
'TableType': 'EXTERNAL_TABLE',
'Parameters': {
'classification': 'json'
}
}
)
print('https://{0}.console.aws.amazon.com/glue/home?region={0}#database:name={1}'.format(region, database_name))
```
### Update partitions for Glue Table
When manually creating a table with partitions you need to notify the Glue Data Catalog of the partitions. There are a number of ways to populate the partitions, you could use Athena and run `MSCK REPAIR TABLE table_name` to recover partitions and data associated with partitions as an option, but in this workshop we will discover the associated partitions in the S3 bucket and add them with the Glue Partition API.
[glue.batch_create_partition](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.batch_create_partition)
```
response = s3.list_objects_v2(
Bucket=bucket,
Prefix=prefix
)
# Load the table created above to get the StorageDescriptor def for columns, etc.
streaming_table = glue.get_table(
CatalogId=account_id,
DatabaseName=database_name,
Name=table_name
)
storage_descriptor= streaming_table['Table']['StorageDescriptor']
# De-dupe partitions if there are any
partitions = set()
for obj in response['Contents']:
# remove the first 3 prefixes for the datalake/taxi/streaming/ data lake location above and the last entry for the file
keys = obj['Key'].split('/')[3:-1]
# get the values of the prefixes in between. These are the year,month,day,hour values to be used for the partition
values = [k.split('=')[1] for k in keys]
storage_descriptor['Location'] = '{0}{1}'.format(location, '/'.join(keys))
partitions.add(json.dumps({"StorageDescriptor": storage_descriptor ,"Values": list(values)}))
#batch add partitions from the kinesis stream
response = glue.batch_create_partition(
CatalogId=account_id,
DatabaseName=database_name,
TableName=table_name,
PartitionInputList=list(json.loads(part) for part in partitions)
)
```
### Get the result of the partition load
If you run the above commands multiple times or over partitions that have already been registered you will see and erro message `Partition already exists.` and a list of partition values that already exist.
```
print(response)
```
### Query the Data Lake with Athena
To query the tables created by the crawler we will be installing a python library for querying the data in the Glue Data Catalog with Athena. For more information jump to [PyAthena](https://pypi.org/project/PyAthena/)
```
!pip install PyAthena
```
### Simple Select Query
Notice you have the values for the partitions as part of the table returned. You can use these values to minimize the number of objects scanned in the S3 location helping to improve both performance and cost of the Athena queries.
```
from pyathena import connect
from pyathena.util import as_pandas
cursor = connect(region_name=region, s3_staging_dir='s3://'+bucket+'/athena/temp').cursor()
cursor.execute('select * from ' + database_name + '.' + table_name + ' limit 10')
df = as_pandas(cursor)
df.head(5)
```
### Query with where clause using partitions
In this example, if we had more partition you would benefit from the partitions in S3 for the `year`, `month`, and `day` whenlooking for specific data that leverage the partitions. Here we parse the variables from the partition we just added to Glue and use them in an aggregate query to return the records generated for the day.
```
path = json.loads(list(partitions)[0])['StorageDescriptor']['Location'].split('/')
year = path[6].split('=')[1]
month = path[7].split('=')[1]
day = path[8].split('=')[1]
hour = path[9].split('=')[1]
%%time
cursor.execute('''SELECT count(vendorid) as cnt FROM ''' + database_name + '''.''' + table_name + '''
WHERE year = ''' + year + '''
AND month = ''' + month + '''
AND day = ''' + day)
df = as_pandas(cursor)
df
```
## Clean Up
That's it, in this notebook we pulled data from the open data registry, cataloged it by creating a new table and partition in the Glue Data Catalog, and finally queried the results with Athena.
```
response = iam.delete_role_policy(
RoleName=firehose_role_name,
PolicyName=firehose_policy_name
)
iam.delete_role(RoleName=firehose_role_name)
response = logs.delete_log_group(
logGroupName=cloudwatch_logs_group_name
)
firehose.delete_delivery_stream(
DeliveryStreamName=delivery_stream_name
)
response = glue.delete_database(
CatalogId = account_id,
Name = database_name
)
!aws s3 rb s3://$bucket --force
```
| github_jupyter |
## 4.3 컬러 이미지를 분류하는 CNN 구현
CNN을 이용해 사진을 분류하는 방법을 다룹니다.
### 4.3.1 분류 CNN 패키지 임포트
1. 필요한 패키지들을 임포트합니다.
---
### 4.3.6 전체 코드
```
# set to use CPU
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# 1. 분류 CNN 패키지 임포트
from sklearn import model_selection, metrics
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import matplotlib.pyplot as plt
import os
from keras import backend as K
from keras.utils import np_utils
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keraspp import skeras
from keraspp import sfile
# 2. 분류 CNN 모델링
class CNN(Model):
def __init__(model, nb_classes, in_shape=None):
super(CNN,model).__init__() # added 2021-10-01
model.nb_classes = nb_classes
model.in_shape = in_shape
model.build_model()
super().__init__(model.x, model.y)
model.compile()
def build_model(model):
nb_classes = model.nb_classes
in_shape = model.in_shape
x = Input(in_shape)
h = Conv2D(32, kernel_size=(3, 3), activation='relu',
input_shape=in_shape)(x)
h = Conv2D(64, (3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Dropout(0.25)(h)
h = Flatten()(h)
z_cl = h
h = Dense(128, activation='relu')(h)
h = Dropout(0.5)(h)
z_fl = h
y = Dense(nb_classes, activation='softmax', name='preds')(h)
model.cl_part = keras.models.Model(x, z_cl)
model.fl_part = Model(x, z_fl)
model.x, model.y = x, y
def compile(model):
Model.compile(model, loss='categorical_crossentropy',
optimizer='adadelta', metrics=['accuracy'])
# 3. 분류 CNN을 위한 데이터 준비
class DataSet:
def __init__(self, X, y, nb_classes, scaling=True,
test_size=0.2, random_state=0):
"""
X is originally vector. Hence, it will be transformed
to 2D images with a channel (i.e, 3D).
"""
self.X = X
self.add_channels()
X = self.X
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.2, random_state=random_state)
print(X_train.shape, y_train.shape)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
if scaling:
# scaling to have (0, 1) for each feature (each pixel)
scaler = MinMaxScaler()
n = X_train.shape[0]
X_train = scaler.fit_transform(
X_train.reshape(n, -1)).reshape(X_train.shape)
n = X_test.shape[0]
X_test = scaler.transform(
X_test.reshape(n, -1)).reshape(X_test.shape)
self.scaler = scaler
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
self.X_train, self.X_test = X_train, X_test
self.Y_train, self.Y_test = Y_train, Y_test
self.y_train, self.y_test = y_train, y_test
# self.input_shape = input_shape
def add_channels(self):
X = self.X
if len(X.shape) == 3:
N, img_rows, img_cols = X.shape
if K.image_dim_ordering() == 'th':
X = X.reshape(X.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X = X.reshape(X.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
else:
input_shape = X.shape[1:] # channel is already included.
self.X = X
self.input_shape = input_shape
# 4. 분류 CNN의 학습 및 성능 평가를 위한 머신 클래스
class Machine():
def __init__(self, X, y, nb_classes=2, fig=True):
self.nb_classes = nb_classes
self.set_data(X, y)
self.set_model()
self.fig = fig
def set_data(self, X, y):
nb_classes = self.nb_classes
self.data = DataSet(X, y, nb_classes)
print('data.input_shape', self.data.input_shape)
def set_model(self):
nb_classes = self.nb_classes
data = self.data
self.model = CNN(nb_classes=nb_classes, in_shape=data.input_shape)
def fit(self, epochs=10, batch_size=128, verbose=1):
data = self.data
model = self.model
history = model.fit(data.X_train, data.Y_train,
batch_size=batch_size, epochs=epochs,
verbose=verbose,
validation_data=(data.X_test, data.Y_test))
return history
def run(self, epochs=100, batch_size=128, verbose=1):
data = self.data
model = self.model
fig = self.fig
history = self.fit(epochs=epochs,
batch_size=batch_size, verbose=verbose)
score = model.evaluate(data.X_test, data.Y_test, verbose=0)
print('Confusion matrix')
Y_test_pred = model.predict(data.X_test, verbose=0)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(data.y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Save results
suffix = sfile.unique_filename('datatime')
foldname = 'output_' + suffix
os.makedirs(foldname)
skeras.save_history_history(
'history_history.npy', history.history, fold=foldname)
model.save_weights(os.path.join(foldname, 'dl_model.h5'))
print('Output results are saved in', foldname)
if fig:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
skeras.plot_acc(history)
plt.subplot(1, 2, 2)
skeras.plot_loss(history)
plt.show()
self.history = history
return foldname
# 5. 분류 CNN의 학습 및 성능 평가 수행
from keras import datasets
import keras
assert keras.backend.image_data_format() == 'channels_last'
# from keraspp import aicnn
class MyMachine(Machine):
def __init__(self):
(X, y), (x_test, y_test) = datasets.cifar10.load_data()
super(MyMachine,self).__init__(X, y, nb_classes=10)
def main():
m = MyMachine()
m.run(epochs=2)
if __name__ == '__main__':
main()
```
| github_jupyter |
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
This notebook was generated for TensorFlow 2.6.
# Generative deep learning
## Text generation
### A brief history of generative deep learning for sequence generation
### How do you generate sequence data?
### The importance of the sampling strategy
**Reweighting a probability distribution to a different temperature**
```
import numpy as np
def reweight_distribution(original_distribution, temperature=0.5):
distribution = np.log(original_distribution) / temperature
distribution = np.exp(distribution)
return distribution / np.sum(distribution)
```
### Implementing text generation with Keras
#### Preparing the data
**Downloading and uncompressing the IMDB movie reviews dataset**
```
!wget https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
```
**Creating a dataset from text files (one file = one sample)**
```
import tensorflow as tf
from tensorflow import keras
dataset = keras.utils.text_dataset_from_directory(
directory="aclImdb", label_mode=None, batch_size=256)
dataset = dataset.map(lambda x: tf.strings.regex_replace(x, "<br />", " "))
```
**Preparing a `TextVectorization` layer**
```
from tensorflow.keras.layers import TextVectorization
sequence_length = 100
vocab_size = 15000
text_vectorization = TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length,
)
text_vectorization.adapt(dataset)
```
**Setting up a language modeling dataset**
```
def prepare_lm_dataset(text_batch):
vectorized_sequences = text_vectorization(text_batch)
x = vectorized_sequences[:, :-1]
y = vectorized_sequences[:, 1:]
return x, y
lm_dataset = dataset.map(prepare_lm_dataset, num_parallel_calls=4)
```
#### A Transformer-based sequence-to-sequence model
```
import tensorflow as tf
from tensorflow.keras import layers
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super(PositionalEmbedding, self).get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
class TransformerDecoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention_1 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.attention_2 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.layernorm_3 = layers.LayerNormalization()
self.supports_masking = True
def get_config(self):
config = super(TransformerDecoder, self).get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
def get_causal_attention_mask(self, inputs):
input_shape = tf.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = tf.range(sequence_length)[:, tf.newaxis]
j = tf.range(sequence_length)
mask = tf.cast(i >= j, dtype="int32")
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = tf.concat(
[tf.expand_dims(batch_size, -1),
tf.constant([1, 1], dtype=tf.int32)], axis=0)
return tf.tile(mask, mult)
def call(self, inputs, encoder_outputs, mask=None):
causal_mask = self.get_causal_attention_mask(inputs)
if mask is not None:
padding_mask = tf.cast(
mask[:, tf.newaxis, :], dtype="int32")
padding_mask = tf.minimum(padding_mask, causal_mask)
attention_output_1 = self.attention_1(
query=inputs,
value=inputs,
key=inputs,
attention_mask=causal_mask)
attention_output_1 = self.layernorm_1(inputs + attention_output_1)
attention_output_2 = self.attention_2(
query=attention_output_1,
value=encoder_outputs,
key=encoder_outputs,
attention_mask=padding_mask,
)
attention_output_2 = self.layernorm_2(
attention_output_1 + attention_output_2)
proj_output = self.dense_proj(attention_output_2)
return self.layernorm_3(attention_output_2 + proj_output)
```
**A simple Transformer-based language model**
```
from tensorflow.keras import layers
embed_dim = 256
latent_dim = 2048
num_heads = 2
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerDecoder(embed_dim, latent_dim, num_heads)(x, x)
outputs = layers.Dense(vocab_size, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(loss="sparse_categorical_crossentropy", optimizer="rmsprop")
```
### A text-generation callback with variable-temperature sampling
**The text-generation callback**
```
import numpy as np
tokens_index = dict(enumerate(text_vectorization.get_vocabulary()))
def sample_next(predictions, temperature=1.0):
predictions = np.asarray(predictions).astype("float64")
predictions = np.log(predictions) / temperature
exp_preds = np.exp(predictions)
predictions = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, predictions, 1)
return np.argmax(probas)
class TextGenerator(keras.callbacks.Callback):
def __init__(self,
prompt,
generate_length,
model_input_length,
temperatures=(1.,),
print_freq=1):
self.prompt = prompt
self.generate_length = generate_length
self.model_input_length = model_input_length
self.temperatures = temperatures
self.print_freq = print_freq
def on_epoch_end(self, epoch, logs=None):
if (epoch + 1) % self.print_freq != 0:
return
for temperature in self.temperatures:
print("== Generating with temperature", temperature)
sentence = self.prompt
for i in range(self.generate_length):
tokenized_sentence = text_vectorization([sentence])
predictions = self.model(tokenized_sentence)
next_token = sample_next(predictions[0, i, :])
sampled_token = tokens_index[next_token]
sentence += " " + sampled_token
print(sentence)
prompt = "This movie"
text_gen_callback = TextGenerator(
prompt,
generate_length=50,
model_input_length=sequence_length,
temperatures=(0.2, 0.5, 0.7, 1., 1.5))
```
**Fitting the language model**
```
model.fit(lm_dataset, epochs=200, callbacks=[text_gen_callback])
```
### Wrapping up
| github_jupyter |
```
import os
import sys
module_path = os.path.abspath(os.path.join('../../src'))
print(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
import json
import csv
!wget https://raw.githubusercontent.com/audioset/ontology/master/ontology.json
!wget http://storage.googleapis.com/us_audioset/youtube_corpus/v1/csv/unbalanced_train_segments.csv
with open("ontology.json") as f:
ont=json.load(f)
len(ont)
histogram={}
with open("unbalanced_train_segments.csv") as f:
segments=csv.reader(f)
header=next(segments)
header=next(segments)
header=next(segments)
for line in segments:
# print(line[3:])
if len(line)>3:
line[3]=line[3].strip()[1:]
line[-1]=line[-1].strip()[:-1]
for label in line[3:]:
histogram.setdefault(label,0)
histogram[label]+=1
total_count=sum(histogram.values())
tag2labelmappings={
"Songbird": ['Bird','Owl','Bird vocalization, bird call, bird song',
'Pigeon, dove','Coo','Chirp, tweet','Squawk',
'Bird flight, flapping wings','Gull, seagull','Chirp tone',
'Hoot'],
"Water Bird":['Duck','Goose','Quack','Frog',"Croak",'Caw'],
"Insect": ['Fly, housefly','Insect', 'Bee, wasp, etc.', 'Buzz','Mosquito',"Cricket","Rustle"],
"Vehicle": ['Vehicle','Car', 'Engine','Boat, Water vehicle','Train','Rail transport',
'Motor vehicle (road)','Railroad car, train wagon',
'Motorboat, speedboat', 'Motorcycle','Subway, metro, underground',
'Lawn mower','Electric shaver, electric razor',
'Jet engine', 'Light engine (high frequency)','Sewing machine','Power tool'],
"Aircraft": ['Engine','Fixed-wing aircraft, airplane','Aircraft engine',
'Propeller, airscrew','Aircraft', 'Helicopter'],
"Running Water":["Waterfall","Waves, surf"],
"Rain":["Rain","Raindrop","Rain on surface",],
"Cable":["Bang","Slap, smack","Whack, thwack",
"Smash, crash","Breaking","Knock","Tap",
"Thump, thud","Whip","Flap","Clip-clop"],
"Wind":["Wind","Howl"]}
del tag2labelmappings["Vehicle"]
label2tagmappings={}
for tag in tag2labelmappings:
for label in tag2labelmappings[tag]:
label2tagmappings.setdefault(label,[])
label2tagmappings[label].append(tag)
all_val=[]
for val in tag2labelmappings.values():
all_val.extend(val)
found=[]
for label in ont:
if label["name"] in all_val:
# print(label["name"])
found.append(label["name"])
len(set(found)),len(set(all_val))
set(all_val).difference(set(found))
label
count=0
hist_label={}
hist_tag={}
for label in ont:
if label["name"] in all_val:
# print(label["name"])
x=histogram.get(label["id"],0)
hist_label.setdefault(label["name"],0)
hist_label[label["name"]]+=x
tag=label2tagmappings[label["name"]][0]
hist_tag.setdefault(tag,0)
hist_tag[tag]+=x
count+=x
if x ==0:
print(label["name"])
# found.append(label["name"])
count, count/total_count
# histogram.keys()
a=list(hist_label.items())
a.sort(key=lambda x: x[1],reverse=True)
a
# a
b=list(hist_tag.items())
b.sort(key=lambda x: x[1],reverse=True)
b
other_hist={"Songbird":0.18,
"Water Bird": 0.088,
"Insect": 0.085,
"Running Water":0.094,
"Rain" :0.046,
"Cable": 0.2,
"Wind" :0.29,
"Aircraft" :0.013}
for k in b:
print(k[0],"{:.2}, {:.2}, {:.3}".format(k[1]/count,other_hist[k[0]],(k[1]/count)/other_hist[k[0]] ))
audioset 2X Songbird, 17X more Aircraft
ours 2X Cable, 5X Wind
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gtbook/robotics/blob/main/S73_drone_sensing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%pip install -q -U gtbook
import math
import numpy as np
import plotly.express as px
import pandas as pd
try:
import google.colab
except:
import plotly.io as pio
pio.renderers.default = "png"
import gtsam
from gtbook.drone import axes
```
# Sensing for Drones
> Drones like inertial sensing and cameras.
**This Section is still in draft mode and was released for adventurous spirits (and TAs) only.**
```
from gtbook.display import randomImages
from IPython.display import display
display(randomImages(7, 3, "steampunk", 1))
```
## Inertial Measurement Units
Inertial measurement units (IMUs) often bundle the following three sensors: gyroscopes, accelerometers, and magnetometers.
### Gyroscopes
Drones fly in 3D, and have to carefully control their attitude.
A **gyroscope** measures *changes* in orientation around three orthogonal axes, i.e., an angular velocity $\omega$.
We can integrate the angular velocity $\omega(t)$ over time:
$$
R^n_b(t) = R^n_b(0) \int_{\tau=0}^t \exp \hat{\omega}(\tau) d\tau
$$
Above, the exponential map $\exp \hat{\omega}(\tau)$ is as defined in the previous section, where we have also shown how to numerically integrate forward in rotation space.
However, because the gyroscope measurements are corrupted by random noise, this noise is also integrated over time.
Even more damning, if we do *not* know the bias, the error grows linearly over time, proportional to the bias error.
Both effects mean that we will gradually lose track of the correct attitude $R^n_b(t)$, a process known as "drift".
Good (expensive) gyroscopes are able to track the attitude for a long time, whereas cheaper (MEMS) gyroscopes as those found in drones (and phones) can drift from a usable attitude estimate in 100s or even 10s of seconds.
### Accelerometers
An accelerometer measures the linear acceleration in 3D.
The most frequent and dependable use of an accelerometer is to "aid" a gyroscope, maintaining absolute orientation over time.
As discussed above, integrating the angular velocity $\omega(t)$ over time accumulates error.
Because gravity is such a strong signal it often dwarfs the accelerations due to maneuvering, and hence we can use it to correct our attitude estimate $\hat{R}^n_b$. This is known as "aiding" the gyroscope. Note that the attitude has *three* degrees of freedom, and the accelerometer can only correct two of them: pitch and roll. The absolute heading of the drone is still *unobservable*.
In theory, we can also *doubly* integrate the measured acceleration to obtain the position $r^n(t)$.
However, because of the double integration, the effect of random noise and bias error is doubly compounded, making the use of an accelerometer for estimating a drone's position rather tricky. It can be done, but it requires great care and careful initialization of the biases. In fact, aircraft equipped with *inertial navigation systems* typically have an "INS alignment procedure" where the aircraft remains stationary on the taxi-way for a short period prior to take-off. Even then, unless the accelerometer can be aided using absolute sources of position, such as GPS, an INS is bound to diverge sooner or later.
### Magnetometers
A magnetometer measures a 3D vector that points along Earth's local magnetic field.
The magnetic field roughly points to the magnetic north, although it really is 3-dimensional.
A magnetometer is really like a sophisticated compass, and hence its main use is to "aid" the gyroscope, providing a measurement on the absolute heading, i.e., the last remaining degree of freedom.
## Camera Extrinsics
The second frequently used sensor for drones is a camera, or multiple cameras. Cameras are light-weight, cheap, and they provide some amazing capabilities. By tracking features in the image(s) over time, cameras can provide relative motion measurements, i.e., **visual odometry**. If you have a pre-existing map of the environment, you can use a camera to **localize**, providing absolute orientation and position even without an IMU. *If* an IMU is available it can be used to track the high frequency *relative* motion of the drone, while the visual information provides a lower frequency but *absolute* measurement of the drone's pose. In that way, IMU and camera measurements are perfectly complementary. In addition, if no map is available, cameras can be used to build a 3-D map of the environment in real time, using a paradigm called **visual SLAM**.
In this section we will concentrate on the extrinsic calibration of cameras and camera rigs.
We already discussed cameras as sensors in section 5.2, including their *intrinsic* calibration parameters such as focal length, image center, etc.
However, when using cameras on a drone, it is very important to know the relative position and orientation of the camera with respect to the drone's body frame, the so called **extrinsic calibration parameters**, consisting of a position $t^b_c$ and orientation $R^b_c$ of the camera in the body frame.
We first need to specify the *position* of the camera(s) on the drone.
Recall that the drone *body coordinate frame* is forward-left-up (FLU), and hence this is how we need to think about where the camera is:
a camera towards the front of the drone will have a positive $X$ value, etc. Below is a simple example with two cameras in front and one towards the back of the drone:
```
t1 = gtsam.Point3(0.1, 0.05, 0.01) # front-left
t2 = gtsam.Point3(0.1,-0.05, 0.01) # front-right
t3 = gtsam.Point3(-0.1, 0, 0.01) # back
fig = px.scatter_3d(x=[t1[0], t2[0], t3[0]], y=[t1[1], t2[1], t3[1]], z=[t1[2], t2[2], t3[2]])
fig.add_traces(axes(gtsam.Pose3(), scale=0.08, labels=["F","L","U"])) # add FLU drone body frame
fig.update_yaxes(scaleanchor = "x", scaleratio = 1);
fig.show()
```
To specify the *orientation* $R^b_c$ for each of the cameras, we need to remember that (a) the Z-axis points into the scene, and (b) the Y-axis points down. The easiest way to specify this is by using the `gtsam.Rot3` constructor that takes three column vectors:
```
F,L,U = np.eye(3)
bTc1 = gtsam.Pose3(gtsam.Rot3(-L,-U,F), t1); fig.add_traces(axes(bTc1, scale=0.05, labels=["X1","Y1","Z1"]))
bTc2 = gtsam.Pose3(gtsam.Rot3(-L,-U,F), t2); fig.add_traces(axes(bTc2, scale=0.05, labels=["X2","Y2","Z2"]))
bTc3 = gtsam.Pose3(gtsam.Rot3(L,-U,-F), t3); fig.add_traces(axes(bTc3, scale=0.05, labels=["X3","Y3","Z3"]))
fig.show()
```
Try to understand the code above, which made camera 1 and 2 look forward ($F$), creating a *forward-looking stereo pair*, and camera 3 look backwards ($-F$). Especially for visual odometry, which we will cover in the next section, having both forward and backward looking cameras is a good idea, yielding high quality estimates of the drone's rotation. Cameras pointed to the side will often suffer from motion blur in forward flight mode, especially with close obstacles at high speed.
## Projecting 3D Points
> Transforming from navigation to drone to camera.
We already know how to project points specified in the camera frame from chapter 5. However, for visual odometry or visual slam, an additional step is needed: we need to transform the 3-D points from the navigation frame into the camera frame. This does not only involve the camera extrinsics, but also the drone's pose in the navigation frame itself. Let us start by reviewing the fundamental projection equation from chapter 5:
$$
u = u_0 + f \frac{X^c}{Z^c} ~~~~ v = v_0 + f \frac{Y^c}{Z^c}.
$$
where $u_0$, $v_0$, and $f$ are the *intrinsic* camera calibration parameters, and $P^c=(X^c,Y^c,Z^c)$ are the coordinates of a 3D point in the *camera* coordinate frame, hence the superscript $c$. However, what if we are given the 3D coordinates $P^n=(X^n,Y^n,Z^n)$ in the ENU navigation frame, rather than in the camera frame? Because the camera is mounted on the drone, we have to do this in two steps:
- convert from navigation to body frame: $P^b = (R^n_b)^T (P^n - t^n_b)$
- convert from body to camera frame: $P^c = (R^b_c)^T (P^b - t^b_c)$
where $T^n_b=(R^n_b,t^n_b)$ is the drone's FLU body pose with respect to the ENU navigation frame, and $T^b_c=(R^b_c,t^b_c)$ is the camera extrinsics specified in that body frame. In case there are multiple cameras the first conversion needs to be done only once, but the second conversion will be camera specific.
## A Stereo Example in Code
> It all works, folks!
As an example, let us assume the drone is at position $t^n_b=(100, 300, 10)$, i.e., 10 meters high, flying north:
```
E,N,U = np.eye(3)
ntb = gtsam.Point3(100, 300, 10)
nRb = gtsam.Rot3(N,-E,U) # flying north, left of drone facing west
nTb = gtsam.Pose3(nRb, ntb)
```
Let's project a point $P^n=(103,310,12)$ 10 meters in front of the drone (check this!) into the stereo pair. We make use of the GTSAM method `Pose3.TransformTo` to convert from navigation to body (once) and then from body to camera (twice):
```
wP = gtsam.Point3(103,310,12)
bP = nTb.transformTo(wP)
print(f"bP = {bP} in (F,L,U) body frame")
c1P = bTc1.transformTo(bP)
print(f"c1P = {c1P} in camera frame 1")
c2P = bTc2.transformTo(bP)
print(f"c2P = {c2P} in camera frame 2")
```
As you can see, the point in body coordinates is $10m$ ahead, because the x-coordinate is $10$ in the FLU body frame. Moreover, the points expressed in the two forward-looking camera frames are identical *except* for the X coordinates, which is exactly what we expect from a stereo rig. We can then apply the intrinsics to get the final image coordinates, for example using a $640\times 480$ image and a focal length of $300$:
```
w, h, f = 640, 480, 300
u0, v0 = float(w/2), float(h/2)
u1, v1 = u0 + f * c1P[0]/c1P[2], v0 + f * c1P[1]/c1P[2]
print(f"u1,v1 = {np.round([u1,v1],1)} in image 1")
u2, v2 = u0 + f * c2P[0]/c2P[2], v0 + f * c2P[1]/c2P[2]
print(f"u2,v2 = {np.round([u2,v2],1)} in image 2")
```
Again, exactly what we expect for a stereo rig. In this case the disparity is $412.4-409.4=3$ pixels, and if we plug that into the fundamental stereo equation from Section 5.2, with baseline $10cm$ (check the extrinsics!), we indeed obtain that the point is at a depth of $10m$:
$$
Z = B \frac{f}{d} = 0.1 \frac{300}{3} = 10
$$
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
EXPERIMENT = 'pairwise_power'
df_1 = pd.read_csv(f'./results/{EXPERIMENT}_results_first_half.csv', sep=', ', engine='python')
df_2 = pd.read_csv(f'./results/{EXPERIMENT}_results_second_half.csv', sep=', ', engine='python')
df_2['params_index'] += df_1['params_index'].max() + 1
df = pd.concat([df_1, df_2], ignore_index=True)
df = df.loc[df['Precision'].notna(), :]
df = df.loc[df['Average precision'].notna(), :]
df['Fraction of shifting mechanisms'] = df['sparsity'] / df['n_variables']
# plot_df = df.melt(
# id_vars=[
# 'params_index', 'n_variables', 'n_total_environments', 'sparsity', 'Fraction of shifting mechanisms',
# 'sample_size', 'dag_density', 'reps', 'data_simulator', 'dag_simulator',
# 'Method', 'Number of environments', 'Rep', 'MEC size', 'Soft'],
# # value_vars=['True orientation rate', 'False orientation rate', 'Average precision'], # 'Fraction of possible DAGs'],
# value_vars=['Precision', 'Recall'], #'Average precision'],
# var_name='Metric',
# value_name='Average fraction',
# )
plot_df = df.rename(
{
'n_variables': 'Number of variables',
'dag_density': 'Edge density',
'sparsity': 'Number of shifts',
'sample_size': '# samples',
'n_total_environments': 'Total environments',
}, axis=1
).replace(
{'er': 'Erdos-Renyi', 'ba': 'Hub', 'PC (pool all)': 'Full PC (oracle)'}
)
sns.set_context('paper')
grid_vars = [
# 'Number of environments',
'Fraction of shifting mechanisms', 'Edge density', 'Number of variables'
]
n_settings = [5, 3, 3]
indices = [
(a, b) for a, b in zip(np.cumsum([0] + n_settings)[:-1], np.cumsum(n_settings))
]
for graph_model in plot_df['dag_simulator'].unique():
fig, axes = plt.subplots(3, len(grid_vars), sharey='row', sharex='col', figsize=(3*len(grid_vars), 8))
for row, metric in zip(axes, ['Recall', 'Precision', 'Average precision']):
# for row, metric in zip(axes, ['Recall', 'Precision', 'Average precision']):
for g_var, (lb, ub), ax in zip(grid_vars, indices, row):
sns.lineplot(
data=plot_df[
(plot_df['params_index'] >= lb)
& (plot_df['params_index'] < ub)
& (plot_df['dag_simulator'] == graph_model)
& (~plot_df['Method'].isin(['Full PC (oracle)']))
& (plot_df['Soft'] == False)
# IMPORTANT! otherwise average over all number of environments
& (
(plot_df['Number of environments'] == plot_df['Number of environments'].max())
if not (g_var == 'Number of environments') else True)
],
# data=plot_df.groupby([g_var, 'Test']).mean().reset_index(),
x=g_var,
y=metric,
hue='Method',
ax=ax,
palette=[
sns.color_palette("tab10")[i]
for i in [1, 2, 3, 4, 5]
],
# markers=['d', 'P', 's'],
# palette='Set1',
legend='full',
# alpha=1,
)
# ax.axvline(0.05, ls=':', c='grey')
# ax.set_title('')
# ax.legend(loc='upper right')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
for ax in np.concatenate(axes)[:-1]:
ax.get_legend().remove()
plt.ylim([0.4, 1.03])
# plt.suptitle(f'DAG model: {graph_model}', fontsize=14, y=0.96, x=0.45)
plt.tight_layout()
# plt.savefig(f'./figures/empirical_select_rates_{graph_model}.pdf')
plt.show()
```
## multi-panel
```
EXPERIMENT = 'pairwise_power'
df = pd.read_csv(f'./results/{EXPERIMENT}_results_50_reps.csv', sep=', ', engine='python')
df['Fraction of shifting mechanisms'] = df['sparsity'] / df['n_variables']
# plot_df = df.melt(
# id_vars=[
# 'params_index', 'n_variables', 'n_total_environments', 'sparsity', 'Fraction of shifting mechanisms',
# 'sample_size', 'dag_density', 'reps', 'data_simulator', 'dag_simulator',
# 'Method', 'Number of environments', 'Rep', 'MEC size', 'Soft'],
# # value_vars=['True orientation rate', 'False orientation rate', 'Average precision'], # 'Fraction of possible DAGs'],
# value_vars=['Precision', 'Recall'], #'Average precision'],
# var_name='Metric',
# value_name='Average fraction',
# )
df = df.rename(
{
'n_variables': 'Number of variables',
'dag_density': 'Edge density',
'sparsity': 'Number of shifts',
'sample_size': '# samples',
'n_total_environments': 'Total environments',
}, axis=1
).replace(
{'er': 'Erdos-Renyi', 'ba': 'Hub', 'PC (pool all)': 'Full PC (oracle)'}
)
sns.set_context('paper')
plot_df = df
grid_vars = [
# 'Number of environments',
'Fraction of shifting mechanisms', 'Edge density', 'Number of variables'
]
# indices = [
# (16, 30), (8, 16), (30, 40), (0, 8),
# ]
indices = [
(4, 9), (9, 12), (0, 4)
]
for graph_model in plot_df['dag_simulator'].unique():
fig, axes = plt.subplots(3, len(grid_vars), sharey='row', sharex='col', figsize=(3*len(grid_vars), 8))
# for row, metric in zip(axes, ['True orientation rate', 'False orientation rate', 'Average precision']):
for row, metric in zip(axes, ['Recall', 'Precision', 'Average precision']):
for g_var, (lb, ub), ax in zip(grid_vars, indices, row):
sns.lineplot(
data=plot_df[
(plot_df['params_index'] >= lb)
& (plot_df['params_index'] < ub)
& (plot_df['dag_simulator'] == graph_model)
& (plot_df['Soft'] == False)
# IMPORTANT! otherwise average over all number of environments
& (
(plot_df['Number of environments'] == plot_df['Number of environments'].max())
if not (g_var == 'Number of environments') else True)
],
# data=plot_df.groupby([g_var, 'Test']).mean().reset_index(),
x=g_var,
y=metric,
hue='Method',
ax=ax,
# markers=['d', 'P', 's'],
# palette='Set1',
legend='full',
# alpha=1,
)
# ax.axvline(0.05, ls=':', c='grey')
# ax.set_title('')
# ax.legend(loc='upper right')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
for ax in np.concatenate(axes)[:-1]:
ax.get_legend().remove()
plt.ylim([0.4, 1.03])
# plt.suptitle(f'DAG model: {graph_model}', fontsize=14, y=0.96, x=0.45)
plt.tight_layout()
# plt.savefig(f'./figures/empirical_select_rates_{graph_model}.pdf')
plt.show()
```
## 2 environments, old
```
sns.set_context('paper')
plot_df = df
grid_vars = [
# 'Number of environments',
'Fraction of shifting mechanisms', 'Edge density', 'Number of variables'
]
# indices = [
# (16, 30), (8, 16), (30, 40), (0, 8),
# ]
indices = [
(5, 10), (10, 14), (0, 5)
]
for graph_model in plot_df['dag_simulator'].unique():
fig, axes = plt.subplots(3, len(grid_vars), sharey='row', sharex='col', figsize=(3*len(grid_vars), 8))
for row, metric in zip(axes, ['True orientation rate', 'False orientation rate', 'Average precision']):
# for row, metric in zip(axes, ['Recall', 'Precision', 'Average precision']):
for g_var, (lb, ub), ax in zip(grid_vars, indices, row):
sns.lineplot(
data=plot_df[
(plot_df['params_index'] >= lb)
& (plot_df['params_index'] < ub)
& (plot_df['dag_simulator'] == graph_model)
# IMPORTANT! otherwise average over all number of environments
& (
(plot_df['Number of environments'] == plot_df['Number of environments'].max())
if not (g_var == 'Number of environments') else True)
],
# data=plot_df.groupby([g_var, 'Test']).mean().reset_index(),
x=g_var,
y=metric,
hue='Method',
ax=ax,
# markers=['d', 'P', 's'],
# palette='Set1',
legend='full',
# alpha=1,
)
# ax.axvline(0.05, ls=':', c='grey')
# ax.set_title('')
# ax.legend(loc='upper right')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
for ax in np.concatenate(axes)[:-1]:
ax.get_legend().remove()
plt.ylim([0.4, 1.03])
# plt.suptitle(f'DAG model: {graph_model}', fontsize=14, y=0.96, x=0.45)
plt.tight_layout()
# plt.savefig(f'./figures/empirical_select_rates_{graph_model}.pdf')
plt.show()
```
## vary sparsity
```
EXPERIMENT = 'pairwise_power'
df = pd.read_csv(f'./results/{EXPERIMENT}_results_sparsity.csv', sep=', ', engine='python')
plot_df = df.melt(
id_vars=[
'params_index', 'n_variables', 'n_total_environments', 'sparsity',
'sample_size', 'dag_density', 'reps', 'data_simulator', 'dag_simulator',
'Method', 'Number of environments', 'Rep', 'MEC size', 'Soft'],
# value_vars=['True orientation rate', 'False orientation rate', 'Average precision'], # 'Fraction of possible DAGs'],
value_vars=['Precision', 'Recall', 'Average precision'],
var_name='Metric',
value_name='Average fraction',
)
for ds in df['dag_simulator'].unique():
g = sns.relplot(
data=plot_df[
# (plot_df['sample_size'] == plot_df['sample_size'].max())
# & (plot_df['dag_simulator'] == ds)
# (plot_df['Method'].isin(['Full PC (oracle)', 'Min changes (oracle)', 'Min changes (KCI)']))
(plot_df['Soft'] == False)
# & (plot_df['sparsity'].isin([2, 4]))
],
x='sparsity',
y='Average fraction',
hue='Method',
# row='sparsity',
col='Metric',
# ci=None,
kind='line',
# style='Soft',
# height=3,
# aspect=2, # 3,
# legend='Full',
facet_kws={'sharey': False, 'sharex': True},
)
# col_vals = g.data[g._col_var].unique()
# for c, col_val in enumerate(col_vals):
# g.axes[0, c].set_ylabel(col_val, visible=True)
# row_vals = g.data[g._row_var].unique()
# for r, row_val in enumerate(row_vals):
# for c, col_val in enumerate(col_vals):
# g.axes[r, c].set_title(f'{g._row_var} = {row_val}')
# g.axes[r, c].set_ylabel(col_val, visible=True)
# g.fig.suptitle(f'DAG model: {title_dict[ds]}', fontsize=14, y=1.02, x=0.45)
plt.subplots_adjust(wspace=0.06)
plt.show()
```
## vary n_vars, fixed sparsity ratio
```
EXPERIMENT = 'pairwise_power'
df = pd.read_csv(f'./results/{EXPERIMENT}_results_n_vars.csv', sep=', ', engine='python')
plot_df = df.melt(
id_vars=[
'n_variables', 'n_total_environments', 'sparsity',
'sample_size', 'dag_density', 'reps', 'data_simulator', 'dag_simulator',
'Method', 'Number of environments', 'Rep', 'MEC size', 'Soft'],
# value_vars=['True orientation rate', 'False orientation rate', 'Average precision'], # 'Fraction of possible DAGs'],
value_vars=['Precision', 'Recall', 'Average precision'],
var_name='Metric',
value_name='Average fraction',
)
sns.set_context('paper')
for ds in df['dag_simulator'].unique():
g = sns.relplot(
data=plot_df[
# (plot_df['sample_size'] == plot_df['sample_size'].max())
# & (plot_df['dag_simulator'] == ds)
(plot_df['Method'].isin(['Full PC (oracle)', 'Min changes (oracle)', 'Min changes (GAM)']))
# (plot_df['Soft'] == False)
& (plot_df['Number of environments'] == 2)
],
x='n_variables',
y='Average fraction',
hue='Method',
# row='sparsity',
col='Metric',
# ci=None,
kind='line',
style='Soft',
height=3,
# aspect=2, # 3,
# legend='Full',
facet_kws={'sharey': False, 'sharex': True},
)
# col_vals = g.data[g._col_var].unique()
# for c, col_val in enumerate(col_vals):
# g.axes[0, c].set_ylabel(col_val, visible=True)
# row_vals = g.data[g._row_var].unique()
# for r, row_val in enumerate(row_vals):
# for c, col_val in enumerate(col_vals):
# g.axes[r, c].set_title(f'{g._row_var} = {row_val}')
# g.axes[r, c].set_ylabel(col_val, visible=True)
# g.fig.suptitle(f'DAG model: {title_dict[ds]}', fontsize=14, y=1.02, x=0.45)
# plt.subplots_adjust(wspace=0.06)
plt.show()
```
| github_jupyter |
Our first [public data set](http://deeplearnphysics.org/DataChallenge/#ChallengeClassification) is available! We are very excited for this 1st step to have public data set with [training tutorials](http://deeplearnphysics.org/Blog/tutorial_summary.html). This notebook is an announcement + browsing of the file contents. This may be a useful guide before jumping into the tutorial. Since this notebook is a bit lengthy, here's an outline of what's covered.
0. [Overall summary of sample generation configuration](#overview)
1. [Exploring file contents](#file_contents)
2. [Example image dump](#image_dump) (data analysis)
3. [Particle-wise event fraction](#event_fraction) (data analysis)
4. [Energy and momentum distribution](#kinematics) (data analysis)
5. [Image filtering effect](#filter_effect) (data analysis)
Before anything, make sure you have necessary software stacks (`larcv`, python packages) and can execute these imports.
```
from __future__ import print_function
import ROOT
from ROOT import TChain
from larcv import larcv
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
<a name="overview"></a>
## Conditions to make the samples
Here's the big picture:
* All files (train, test, and tutorial files) contain single-particle events.
* This means each 2D image contain only 1 particle.
* There are five particle types including electron, gamma ray, muon, pion, and proton.
* An event fraction per particle type is equal for all samples.
* A particle's generation point is uniformly distributed within 5.12 m cubic volume.
* A particle's (momentum) direction is isotropic.
* A particle's momentum is uniformly distributed in the range specified per type.
* electrons ... 35.5 to 800 MeV/c
* gamma rays ... 35 to 800 MeV/c
* muons ... 90 to 800 MeV/c
* pions ... 105 to 800 MeV/c
* protons ... 275 to 800 MeV/c
* 2.56 m cubic 3D volume is chosen to maximize the particle's trajectory within the volume and recorded in the file.
* 2D images are created as 2D projections (xy, yz, zx) of the 3D data
* These are 3 channels of 2D images in each file.
* Events that contain any 2D projection **image with less than 10 non-zero pixels are filtered out.**
*This is to remove empty and almost empty images from the set.
<a name="file_contents"></a>
## Exploring the contents
I think the file description is best done with the data. So let's explore them in this notebook. This also serves as an example of how to browse through `larcv` data products! We use this [test file](http://deeplearnphysics.org/DataChallenge/#ChallengeClassification) which I already downloaded in this directory. Let's check what data products are in the file. But **before going further**, please go over [this tutorial](http://deeplearnphysics.org/Blog/tutorials/tutorial-02.html) if you have not done so!
```
ROOT.TFile.Open('test_40k.root').ls()
```
As described in the linked tutorial, TTree names tell us what they are: we got `image2d` and `particle` data products with "data" and "mctruth" instance labels. `image2d` is, as its name suggests, 2D image.
<a name="image_dump"></a>
## Closer look: image
Let's take a peek of it using [TChain](https://root.cern.ch/root/html526/TChain.html), the same technique introduced in the linked tutorial.
```
from larcv import larcv
# Create TChain, count # of entries
chain_image2d = ROOT.TChain('image2d_data_tree')
chain_image2d.AddFile('test_40k.root')
print(chain_image2d.GetEntries(),'entries found!')
# Get a specific event (first entry)
chain_image2d.GetEntry(0)
cpp_object = chain_image2d.image2d_data_branch
print('Object type:',cpp_object)
# Get std::vector<larcv::Image2D>
image2d_array = cpp_object.as_vector()
# Dump images
fig, axes = plt.subplots(1, image2d_array.size(), figsize=(12,4), facecolor='w')
for index,image2d in enumerate(image2d_array):
image2d_numpy = larcv.as_ndarray(image2d)
axes[index].imshow(image2d_numpy, interpolation='none',cmap='jet')
# Find bounds for non-zero pixels + padding of 5 pixels
nz_pixels=np.where(image2d_numpy>0.0)
ylim = (np.min(nz_pixels[0])-5,np.max(nz_pixels[0])+5)
xlim = (np.min(nz_pixels[1])-5,np.max(nz_pixels[1])+5)
# Adjust for allowed image range
ylim = (np.max((ylim[0],0)), np.min((ylim[1],image2d_numpy.shape[1]-1)))
xlim = (np.max((xlim[0],0)), np.min((xlim[1],image2d_numpy.shape[0]-1)))
# Set range
axes[index].set_ylim(ylim)
axes[index].set_xlim(xlim)
plt.show()
```
Looks like this is an electron EM-shower. Why electron and not gamma ray? That's because, to my eyes, this looks like low dE/dX at the beginning of the shower trunk. But to check if I am right or not, we have to access simulation information (coming up next)!
<a name="event_fraction"></a>
## Closer look: particle (label)
We can look at `particle` information in the file, which you will use to generate a _label_ for image classification training, to check if the image we saw was electron or not.
```
# Create TChain, count # of entries
chain_particle = ROOT.TChain('particle_mctruth_tree')
chain_particle.AddFile('test_40k.root')
print(chain_image2d.GetEntries(),'entries found!')
# Get a specific event (first entry)
chain_particle.GetEntry(0)
cpp_object = chain_particle.particle_mctruth_branch
print('Object type: {}\n'.format(str(cpp_object)))
# Dump particle information
print('Checking particle information for 1st entry...')
for particle in cpp_object.as_vector():
print('PDG Code: {}'.format(particle.pdg_code()))
print('Initial energy: {:.3} GeV'.format(particle.energy_init()))
```
[PDG code](http://pdg.lbl.gov/2007/reviews/montecarlorpp.pdf) 11 corrsponds to electron! So I guessed it right :)
<a name="kinematics"></a>
## Closer look: kinematics
Next, let's look at the fraction of each particle type and their energy distributions for fun. We first make `pandas.DataFrame` with numpy arrrays for simple analysis and plotting.
```
pdg_array = np.zeros([chain_particle.GetEntries()],dtype=np.int32)
energy_array = np.zeros([chain_particle.GetEntries()],dtype=np.float64)
momentum_array = np.zeros([chain_particle.GetEntries()],dtype=np.float64)
for index in range(chain_particle.GetEntries()):
chain_particle.GetEntry(index)
particle = chain_particle.particle_mctruth_branch.as_vector().front()
pdg = int(particle.pdg_code())
total_energy = particle.energy_init() * 1000.
kinetic_energy = total_energy - larcv.ParticleMass(pdg)
momentum = np.sqrt(np.power(total_energy,2) - np.power(larcv.ParticleMass(pdg),2))
pdg_array[index] = pdg
energy_array[index] = kinetic_energy
momentum_array[index] = momentum
#if momentum > 800:
# print(pdg,kinetic_energy,momentum)
import pandas as pd
df = pd.DataFrame(data={'pdg' : pdg_array, 'energy' : energy_array, 'momentum' : momentum_array})
```
List of particles and their event counts:
```
pdg_list, pdg_counts = np.unique(df.pdg.values,return_counts=True)
print('PDGs found: {}'.format(pdg_list))
print('PDG counts: {}'.format(pdg_counts))
```
... as expected, we have equal partition of 5 particle types. Check the range of momentum per particle type:
```
PDG2NAME = {11 : 'electron',
22 : 'gamma',
13 : 'muon',
211 : 'pion',
2212 : 'proton'}
for pdg in pdg_list:
sub_df = df.query('pdg=={}'.format(pdg))
min_value = sub_df.momentum.values.min()
max_value = sub_df.momentum.values.max()
print('{:10s} momentum range: {:.3g} => {:.3g} MeV/c'.format(PDG2NAME[pdg], min_value, max_value))
```
This is also as expected from the prescription. Next is more interesting: plot energy distributions of gamma ray and muon
```
fig, ax = plt.subplots(figsize=(12,8),facecolor='w')
pdg_list = [13,22]
sub_df = df.query('pdg in {}'.format(pdg_list))
min_value = sub_df.energy.values.min()
max_value = sub_df.energy.values.max()
for pdg in pdg_list:
pdg_df = sub_df.query('pdg == {}'.format(pdg))
values = pdg_df.energy.values
plt.hist(values, bins=20, range=(min_value,max_value), label='PDG {}'.format(pdg), alpha=0.5)
plt.tick_params(labelsize=20)
plt.grid(True,which='both')
plt.xlabel('Initial Kinetic Energy [MeV]',fontsize=20,fontweight='bold',fontname='Georgia')
plt.ylabel('Number of entries',fontsize=20,fontweight='bold',fontname='Georgia')
leg=plt.legend(fontsize=16,loc=4)
leg_frame=leg.get_frame()
leg_frame.set_facecolor('white')
plt.show()
```
We see two features here:
* Muon appears to have non-uniform kinetic energy distribution while that of gamma ray appears to be uniform.
* Muon kinetic energy distribution falls off beyond (about) 700 MeV while that of gamma ray continues up to 800 MeV
The first point is due to the fact that particles are generated **uniformly in momentum space** and not in kinetic energy. The non-negligible muon mass (105.6 MeV) in the generated momentum range causes its kinetic energy to be non-uniform. On the other hand, gamma ray has 0 mass (no please don't ask "what if photons are massive..." question). That means momentum = energy, and hence you see a _uniform kinetic energy_ distribution ranging up to 800 MeV.
Let's actually plot the momentum distribution.
```
fig, ax = plt.subplots(figsize=(12,8),facecolor='w')
pdg_list = [13,22]
sub_df = df.query('pdg in {}'.format(pdg_list))
min_value = sub_df.momentum.values.min()
max_value = sub_df.momentum.values.max()
for pdg in pdg_list:
pdg_df = sub_df.query('pdg == {}'.format(pdg))
values = pdg_df.momentum.values
plt.hist(values, bins=20, range=(min_value,max_value), label='PDG {}'.format(pdg), alpha=0.5)
plt.tick_params(labelsize=20)
plt.grid(True,which='both')
plt.xlabel('Initial Momentum [MeV/c]',fontsize=20,fontweight='bold',fontname='Georgia')
plt.ylabel('Number of entries',fontsize=20,fontweight='bold',fontname='Georgia')
leg=plt.legend(fontsize=16,loc=4)
leg_frame=leg.get_frame()
leg_frame.set_facecolor('white')
plt.show()
```
Which looks _almost_ perfect, except there's a drop in the 1st bin for muon. This is a binning effect. Remember the momentum range of muon is set to 90 to 800 MeV/c. If we are to add this condition in our analysis so that we remove the binning effect, we can see a uniform momentum distribution.
```
fig, ax = plt.subplots(figsize=(12,8),facecolor='w')
pdg_list = [13,22]
sub_df = df.query('pdg in {} and momentum > 90'.format(pdg_list))
min_value = sub_df.momentum.values.min()
max_value = sub_df.momentum.values.max()
for pdg in pdg_list:
pdg_df = sub_df.query('pdg == {}'.format(pdg))
values = pdg_df.momentum.values
plt.hist(values, bins=20, range=(min_value,max_value), label='PDG {}'.format(pdg), alpha=0.5)
plt.tick_params(labelsize=20)
plt.grid(True,which='both')
plt.xlabel('Initial Momentum [MeV/c]',fontsize=20,fontweight='bold',fontname='Georgia')
plt.ylabel('Number of entries',fontsize=20,fontweight='bold',fontname='Georgia')
leg=plt.legend(fontsize=16,loc=4)
leg_frame=leg.get_frame()
leg_frame.set_facecolor('white')
plt.show()
```
<a name="filter_effect"></a>
## Closer look: image filtering effect
Recall one of the sample preparation procedures: "_filtering of events for which any 2D image contains less than 10 non-zero pixels_". This was intended to remove (almost) empty images. But it's important to note this condition **filters out a short particle trajectory** which typically means a low energy particle. So this filter biases a particle's energy!
How significant is this effect? How far a particle travels depends on its type because of [different stopping power](http://pdg.lbl.gov/2014/reviews/rpp2014-rev-passage-particles-matter.pdf). In our energy range, the most severly (and actually only) affected is proton. Let's compare proton and gamma ray momentum distribution.
```
fig, ax = plt.subplots(figsize=(12,8),facecolor='w')
pdg_list = [2212,22]
sub_df = df.query('pdg in {}'.format(pdg_list))
min_value = sub_df.momentum.values.min()
max_value = sub_df.momentum.values.max()
for pdg in pdg_list:
pdg_df = sub_df.query('pdg == {}'.format(pdg))
values = pdg_df.momentum.values
plt.hist(values, bins=20, range=(min_value,max_value), label='PDG {}'.format(pdg), alpha=0.5)
plt.tick_params(labelsize=20)
plt.grid(True,which='both')
plt.xlabel('Initial Momentum [MeV/c]',fontsize=20,fontweight='bold',fontname='Georgia')
plt.ylabel('Number of entries',fontsize=20,fontweight='bold',fontname='Georgia')
leg=plt.legend(fontsize=16,loc=2)
leg_frame=leg.get_frame()
leg_frame.set_facecolor('white')
plt.show()
```
From earlier analysis in this notebook, we already know that the momentum range of protons in this sample is 276 to 800 MeV/c. In this plot, we can clearly see that proton events are not uniformly distributed over its momentum range.
So how much protons are affected by "10 pixels filter"? To figure this out, we have to know what 10 pixels correspond to the real world's distance scale. This information is stored in 2D image _meta_ data.
```
for index,image2d in enumerate(image2d_array):
print(image2d.meta().dump())
```
"Distance Unit: 1" means it is in cm scale (`larcv::DistanceUnit_t::kUnitCM`). So each 2D image spans 128cm x 128cm space with 256 x 256 pixels, meaning each pixel corresponds to 0.5 cm in vertical and horizontal size. This means a proton must travel at least 5 cm in all 2D projections, or
$$ x^2+y^2 \geq 25 \text{cm,}$$
$$ y^2+z^2 \geq 25 \text{cm,}$$
$$ z^2+x^2 \geq 25 \text{cm.}$$
Summing them over we find $\sqrt{x^2+y^2+z^2}\geq\sqrt{75/2}\approx6.12$ cm as the minimum distance required for a proton to travel. So what proton energy does this trajectory size corresponds to? Here's a plot I made from [NIST](https://www.nist.gov/pml/stopping-power-range-tables-electrons-protons-and-helium-ions) that shows the proton trajectory size in liquid Argon medium versus its kinetic energy. <img src="imgs/proton_dist.png"> ... from which we can tell $\approx6$ cm corresponds to $\approx80$ MeV. Given the proton mass (938.28 MeV), this corresponds to $\approx419$ MeV/c momentum. **Indeed we see a sharp drop** around that momentum value in the plot above.
### What is affecting the higher momentum range?
Note "$\geq$" sign in the equation we derived above: 6.12 cm is the bare minimum requirement. The actual requirement depends on the initial momentum direction because the projected 2D distances depends on that angle and the projection angles. For instance, a proton with momentum higher than $\approx419$ MeV/c may travel $\approx7.1$ cm from point A $(x,y,z) = (0,0,0)$ [cm] to point B $(4.5,4.5,0)$ [cm]. This satisfies the minimum trajectory length requirement. But it fails to satisfy both $y^2+z^2\geq25$ cm and $z^2+x^2\geq25$, so it won't pass the filter.
In addition, the projection angle also affects projected trajectory size per pixel. 0.5 cm/pixel is the shortest possible case. If a trajectory runs along a diagonal direction of a projected 2D pixel, a requirement of 10 pixels mean 7.1 cm instead of 5 cm, increase of 20% in the required trajectory size.
### Why are there entries in the lower momentum range?
There could be more than one reason. But one thing we definitely expect is a production of other particle types through a proton-nucleus interaction. Recall the filter requires "10 non-zero pixels on each 2D projection planes" but does not specify the particle type. A proton with 70 MeV kinetic energy may be below the threshold to travel long enough, but may produce gamma rays or other particles that can travel longer with lower energy.
So why don't we take a look at them? Let's pick an event with a proton energy below 50 MeV and list them.
```
sub_df = df.query('pdg==2212 and energy < 50')
print('Found {} entries'.format(sub_df.index.size))
print(sub_df)
```
Let's take a look at just 1st projection of the first entry (32020)
```
# Create TChain, count # of entries
chain_image2d = ROOT.TChain('image2d_data_tree')
chain_image2d.AddFile('test_40k.root')
print(chain_image2d.GetEntries(),'entries found!')
# Get a specific event (first entry)
chain_image2d.GetEntry(32020)
cpp_object = chain_image2d.image2d_data_branch
print('Object type:',cpp_object)
image2d = cpp_object.as_vector().front()
fig, ax = plt.subplots(figsize=(12,12), facecolor='w')
image2d_numpy = larcv.as_ndarray(image2d)
ax.imshow(image2d_numpy, interpolation='none', vmin=0., vmax=image2d_numpy.mean(), cmap='gray')
# Find bounds for non-zero pixels + padding of 5 pixels
nz_pixels=np.where(image2d_numpy>0.0)
ylim = (np.min(nz_pixels[0])-5,np.max(nz_pixels[0])+5)
xlim = (np.min(nz_pixels[1])-5,np.max(nz_pixels[1])+5)
# Adjust for allowed image range
ylim = (np.max((ylim[0],0)), np.min((ylim[1],image2d_numpy.shape[1]-1)))
xlim = (np.max((xlim[0],0)), np.min((xlim[1],image2d_numpy.shape[0]-1)))
# Set range
ax.set_ylim(ylim)
ax.set_xlim(xlim)
plt.show()
```
These scattered charge depositions are likely by compton scatterings from gamma rays and definitely not from a single proton trajectory (which would have been continuous).
## Ending remark
I hope this notebook covered good enough to make you feel more familiar with the public data contents we provided for a single-particle image classification. You might find some of the sample generation configuration not suitable for your physics research. Feel free to [contact us](contact@deeplearnphysics.org) if you want samples generated under a different condition. Enabling your research goal is an important goal for our group!
| github_jupyter |
# Sentiment Analysis - Text Classification with Universal Embeddings
Textual data in spite of being highly unstructured, can be classified into two major types of documents.
- __Factual documents__ which typically depict some form of statements or facts with no specific feelings or emotion attached to them. These are also known as objective documents.
- __Subjective documents__ on the other hand have text which expresses feelings, mood, emotions and opinion.
Sentiment Analysis is also popularly known as opinion analysis or opinion mining. The key idea is to use techniques from text analytics, NLP, machine learning and linguistics to extract important information or data points from unstructured text. This in turn can help us derive the sentiment from text data

Here we will be looking at building supervised sentiment analysis classification models thanks to the advantage of labeled data! The dataset we will be working with is the IMDB Large Movie Review Dataset having 50000 reviews classified into positive and negative sentiment. I have provided a compressed version of the dataset in this repository itself for your benefit!
Do remember that the focus here is not sentiment analysis but text classification by leveraging universal sentence embeddings.

We will leverage the following sentence encoders here for demonstration from [TensorFlow Hub](https://tfhub.dev/):
- [__Neural-Net Language Model (nnlm-en-dim128)__](https://tfhub.dev/google/nnlm-en-dim128/1)
- [__Universal Sentence Encoder (universal-sentence-encoder)__](https://tfhub.dev/google/universal-sentence-encoder/2)
_Developed by [Dipanjan (DJ) Sarkar](https://www.linkedin.com/in/dipanzan/)_
# Install Tensorflow Hub
```
!pip install tensorflow-hub
```
# Load up Dependencies
```
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import pandas as pd
```
# Check if GPU is available for use!
```
tf.test.is_gpu_available()
tf.test.gpu_device_name()
```
# Load and View Dataset
```
dataset = pd.read_csv('movie_reviews.csv.bz2', compression='bz2')
dataset.info()
dataset['sentiment'] = [1 if sentiment == 'positive' else 0 for sentiment in dataset['sentiment'].values]
dataset.head()
```
# Build train, validation and test datasets
```
reviews = dataset['review'].values
sentiments = dataset['sentiment'].values
train_reviews = reviews[:30000]
train_sentiments = sentiments[:30000]
val_reviews = reviews[30000:35000]
val_sentiments = sentiments[30000:35000]
test_reviews = reviews[35000:]
test_sentiments = sentiments[35000:]
train_reviews.shape, val_reviews.shape, test_reviews.shape
```
# Basic Text Wrangling
```
!pip install contractions
!pip install beautifulsoup4
import contractions
from bs4 import BeautifulSoup
import unicodedata
import re
def strip_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
[s.extract() for s in soup(['iframe', 'script'])]
stripped_text = soup.get_text()
stripped_text = re.sub(r'[\r|\n|\r\n]+', '\n', stripped_text)
return stripped_text
def remove_accented_chars(text):
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return text
def expand_contractions(text):
return contractions.fix(text)
def remove_special_characters(text, remove_digits=False):
pattern = r'[^a-zA-Z0-9\s]' if not remove_digits else r'[^a-zA-Z\s]'
text = re.sub(pattern, '', text)
return text
def pre_process_document(document):
# strip HTML
document = strip_html_tags(document)
# lower case
document = document.lower()
# remove extra newlines (often might be present in really noisy text)
document = document.translate(document.maketrans("\n\t\r", " "))
# remove accented characters
document = remove_accented_chars(document)
# expand contractions
document = expand_contractions(document)
# remove special characters and\or digits
# insert spaces between special characters to isolate them
special_char_pattern = re.compile(r'([{.(-)!}])')
document = special_char_pattern.sub(" \\1 ", document)
document = remove_special_characters(document, remove_digits=True)
# remove extra whitespace
document = re.sub(' +', ' ', document)
document = document.strip()
return document
pre_process_corpus = np.vectorize(pre_process_document)
train_reviews = pre_process_corpus(train_reviews)
val_reviews = pre_process_corpus(val_reviews)
test_reviews = pre_process_corpus(test_reviews)
```
# Build Data Ingestion Functions
```
# Training input on the whole training set with no limit on training epochs.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{'sentence': train_reviews}, train_sentiments,
batch_size=256, num_epochs=None, shuffle=True)
# Prediction on the whole training set.
predict_train_input_fn = tf.estimator.inputs.numpy_input_fn(
{'sentence': train_reviews}, train_sentiments, shuffle=False)
# Prediction on the whole validation set.
predict_val_input_fn = tf.estimator.inputs.numpy_input_fn(
{'sentence': val_reviews}, val_sentiments, shuffle=False)
# Prediction on the test set.
predict_test_input_fn = tf.estimator.inputs.numpy_input_fn(
{'sentence': test_reviews}, test_sentiments, shuffle=False)
```
# Build Deep Learning Model with Universal Sentence Encoder
```
embedding_feature = hub.text_embedding_column(
key='sentence',
module_spec="https://tfhub.dev/google/universal-sentence-encoder/2",
trainable=False)
dnn = tf.estimator.DNNClassifier(
hidden_units=[512, 128],
feature_columns=[embedding_feature],
n_classes=2,
activation_fn=tf.nn.relu,
dropout=0.1,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.005))
```
### Train for approx 12 epochs
```
256*1500 / 30000
```
# Model Training
```
tf.logging.set_verbosity(tf.logging.ERROR)
import time
TOTAL_STEPS = 1500
STEP_SIZE = 100
for step in range(0, TOTAL_STEPS+1, STEP_SIZE):
print()
print('-'*100)
print('Training for step =', step)
start_time = time.time()
dnn.train(input_fn=train_input_fn, steps=STEP_SIZE)
elapsed_time = time.time() - start_time
print('Train Time (s):', elapsed_time)
print('Eval Metrics (Train):', dnn.evaluate(input_fn=predict_train_input_fn))
print('Eval Metrics (Validation):', dnn.evaluate(input_fn=predict_val_input_fn))
```
# Model Evaluation
```
dnn.evaluate(input_fn=predict_train_input_fn)
dnn.evaluate(input_fn=predict_test_input_fn)
```
# Build a Generic Model Trainer on any Input Sentence Encoder
```
import time
TOTAL_STEPS = 1500
STEP_SIZE = 500
my_checkpointing_config = tf.estimator.RunConfig(
keep_checkpoint_max = 2, # Retain the 2 most recent checkpoints.
)
def train_and_evaluate_with_sentence_encoder(hub_module, train_module=False, path=''):
embedding_feature = hub.text_embedding_column(
key='sentence', module_spec=hub_module, trainable=train_module)
print()
print('='*100)
print('Training with', hub_module)
print('Trainable is:', train_module)
print('='*100)
dnn = tf.estimator.DNNClassifier(
hidden_units=[512, 128],
feature_columns=[embedding_feature],
n_classes=2,
activation_fn=tf.nn.relu,
dropout=0.1,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.005),
model_dir=path,
config=my_checkpointing_config)
for step in range(0, TOTAL_STEPS+1, STEP_SIZE):
print('-'*100)
print('Training for step =', step)
start_time = time.time()
dnn.train(input_fn=train_input_fn, steps=STEP_SIZE)
elapsed_time = time.time() - start_time
print('Train Time (s):', elapsed_time)
print('Eval Metrics (Train):', dnn.evaluate(input_fn=predict_train_input_fn))
print('Eval Metrics (Validation):', dnn.evaluate(input_fn=predict_val_input_fn))
train_eval_result = dnn.evaluate(input_fn=predict_train_input_fn)
test_eval_result = dnn.evaluate(input_fn=predict_test_input_fn)
return {
"Model Dir": dnn.model_dir,
"Training Accuracy": train_eval_result["accuracy"],
"Test Accuracy": test_eval_result["accuracy"],
"Training AUC": train_eval_result["auc"],
"Test AUC": test_eval_result["auc"],
"Training Precision": train_eval_result["precision"],
"Test Precision": test_eval_result["precision"],
"Training Recall": train_eval_result["recall"],
"Test Recall": test_eval_result["recall"]
}
```
# Train Deep Learning Models on difference Sentence Encoders
- NNLM - pre-trained and fine-tuning
- USE - pre-trained and fine-tuning
```
tf.logging.set_verbosity(tf.logging.ERROR)
results = {}
results["nnlm-en-dim128"] = train_and_evaluate_with_sentence_encoder(
"https://tfhub.dev/google/nnlm-en-dim128/1", path='/storage/models/nnlm-en-dim128_f/')
results["nnlm-en-dim128-with-training"] = train_and_evaluate_with_sentence_encoder(
"https://tfhub.dev/google/nnlm-en-dim128/1", train_module=True, path='/storage/models/nnlm-en-dim128_t/')
results["use-512"] = train_and_evaluate_with_sentence_encoder(
"https://tfhub.dev/google/universal-sentence-encoder/2", path='/storage/models/use-512_f/')
results["use-512-with-training"] = train_and_evaluate_with_sentence_encoder(
"https://tfhub.dev/google/universal-sentence-encoder/2", train_module=True, path='/storage/models/use-512_t/')
```
# Model Evaluations
```
results_df = pd.DataFrame.from_dict(results, orient="index")
results_df
best_model_dir = results_df[results_df['Test Accuracy'] == results_df['Test Accuracy'].max()]['Model Dir'].values[0]
best_model_dir
embedding_feature = hub.text_embedding_column(
key='sentence', module_spec="https://tfhub.dev/google/universal-sentence-encoder/2", trainable=True)
dnn = tf.estimator.DNNClassifier(
hidden_units=[512, 128],
feature_columns=[embedding_feature],
n_classes=2,
activation_fn=tf.nn.relu,
dropout=0.1,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.005),
model_dir=best_model_dir)
dnn
def get_predictions(estimator, input_fn):
return [x["class_ids"][0] for x in estimator.predict(input_fn=input_fn)]
predictions = get_predictions(estimator=dnn, input_fn=predict_test_input_fn)
predictions[:10]
!pip install seaborn
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
with tf.Session() as session:
cm = tf.confusion_matrix(test_sentiments, predictions).eval()
LABELS = ['negative', 'positive']
sns.heatmap(cm, annot=True, xticklabels=LABELS, yticklabels=LABELS, fmt='g')
xl = plt.xlabel("Predicted")
yl = plt.ylabel("Actuals")
from sklearn.metrics import classification_report
print(classification_report(y_true=test_sentiments, y_pred=predictions, target_names=LABELS))
```
| github_jupyter |
## Model Distillation
In this notebook we train models using model distillation.
```
from google.colab import drive
drive.mount('/content/drive')
from google.colab import files
uploaded = files.upload()
!unzip dataset.zip -d dataset
import warnings
import os
import shutil
import glob
import random
import random
import cv2
from fastai.vision import *
from fastai.utils.mem import *
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
dataset="dataset"
classesPaths=sorted(glob.glob(dataset+'/*'))
classes=[pt.split(os.sep)[-1] for pt in classesPaths if os.path.isdir(pt)]
images=[pt for pt in classesPaths if not os.path.isdir(pt)]
os.makedirs(dataset+'/train')
os.makedirs(dataset+'/valid')
os.makedirs(dataset+'/images')
for im in images:
shutil.move(im,dataset+'/images/')
for cl in classes:
os.mkdir(dataset+'/train/'+cl)
images=sorted(glob.glob(dataset+'/'+cl+'/*'))
for i in range(int(len(images)*0.75)):
images=sorted(glob.glob(dataset+'/'+cl+'/*'))
j=random.randint(0,len(images)-1)
shutil.move(images[j],dataset+'/train/'+cl)
os.mkdir(dataset+'/valid/'+cl)
images=sorted(glob.glob(dataset+'/'+cl+'/*'))
for i in range(len(images)):
shutil.move(images[i],dataset+'/valid/'+cl)
def learn_with_model(dataset,model):
data=ImageDataBunch.from_folder(dataset,
ds_tfms=get_transforms(), size=224,bs=32).normalize(imagenet_stats)
learn = cnn_learner(data, model, metrics=accuracy)
learn.fit_one_cycle(2)
learn.unfreeze()
learn.lr_find()
lr=learn.recorder.lrs[np.argmin(learn.recorder.losses)]
if lr<1e-05:
lr=1e-03
learn.fit_one_cycle(8,max_lr=slice(lr/100,lr))
return learn,data
def learn_best_model(dataset,models):
best_accuracy=0
for model in models:
learn,data=learn_with_model(dataset,model)
acc=learn.validate()[1].item()
if acc>best_accuracy:
best_model=learn
best_accuracy=acc
return best_model,data
def moda(lista):
tam=len(lista[0][2])
x=np.zeros(tam)
for l in lista:
x=x+l[2].numpy()
x=x/len(lista)
maximo=x.argmax()
return maximo, x[maximo]
def omniModel(dataset,learners,th):
images=sorted(glob.glob(dataset+"/images/*"))
for image in images:
h=open_image(image)
lista=[]
for learn in learners:
p=learn.predict(h)
lista.append(p)
mod, predMax=moda(lista)
if predMax>th:
shutil.copyfile(image,dataset+'/train/'+data.classes[mod]+'/'+data.classes[mod]+'_'+image.split('/')[-1])
os.remove(image)
print(image+" --> "+dataset+'/train/'+data.classes[mod]+'/'+data.classes[mod]+'_'+image.split('/')[-1])
listaModels=[]
learner_resnet34,data=learn_with_model(dataset,models.resnet34)
listaModels.append(learner_resnet34)
learner_resnet50,data=learn_with_model(dataset,models.resnet50)
listaModels.append(learner_resnet50)
learner_resnet101,data=learn_with_model(dataset,models.resnet101)
listaModels.append(learner_resnet101)
learner_densenet121,data=learn_with_model(dataset,models.densenet121)
listaModels.append(learner_densenet121)
shutil.copytree(dataset, 'dataset_MD1')
omniModel('dataset_MD1',listaModels,0.0)
learner2MD,data=learn_best_model('dataset_MD1',[models.resnet34,models.resnet50,models.resnet101,models.densenet121])
learner2MD.export('/content/drive/My Drive/learnerMD.pkl')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from tqdm import tqdm
import yaml
```
***
## data loading
```
split_date = "2021-03-01"
data = pd.read_parquet("../data/train-m1.parquet")
unpredictable = pd.read_csv("../data/unpredictable.csv")
data = data.query("sku not in @unpredictable.sku").reset_index(drop=True)
print(len(unpredictable))
train = data.query("date <= @split_date").reset_index(drop=True)
valid = data.query("date > @split_date").reset_index(drop=True)
print("dates on train:", train.date.nunique())
print("dates on valid:", valid.date.nunique())
```
***
remove skus without sales in either train or validation periods
```
to_remove1 = set(train.groupby("sku")["sold_quantity"].sum()[train.groupby("sku")["sold_quantity"].sum() == 0].index)
to_remove2 = set(valid.groupby("sku")["sold_quantity"].sum()[valid.groupby("sku")["sold_quantity"].sum() == 0].index)
print(f"len(to_remove1): {len(to_remove1)}")
print(f"len(to_remove2): {len(to_remove2)}")
assert len(to_remove1 & to_remove2) == 0
to_remove = to_remove1 | to_remove2
train = train.query("sku not in @to_remove").reset_index(drop=True)
valid = valid.query("sku not in @to_remove").reset_index(drop=True)
train.sku.nunique()
valid.sku.nunique()
in_both = set(train.sku) & set(valid.sku)
len(in_both)
train = train.query("sku in @in_both").reset_index(drop=True)
valid = valid.query("sku in @in_both").reset_index(drop=True)
```
***
remove skus with huge variation of sales between train and validation periods
```
sales_prop = valid.groupby("sku")["sold_quantity"].mean() / train.groupby("sku")["sold_quantity"].mean()
sales_prop = sales_prop.reset_index(name="p")
to_remove = sales_prop.query("p >= 10").sku.values
print(f"len(to_remove): {len(to_remove)}")
train = train.query("sku not in @to_remove").reset_index(drop=True)
valid = valid.query("sku not in @to_remove").reset_index(drop=True)
```
***
remove skus with few data-points in train period
```
tr_count = train.groupby("sku")["date"].count().reset_index(name="c")
to_remove = tr_count.query("c <= 7").sku.values
print(f"len(to_remove): {len(to_remove)}")
train = train.query("sku not in @to_remove").reset_index(drop=True)
valid = valid.query("sku not in @to_remove").reset_index(drop=True)
```
***
save the skus
```
skus_assess_m1 = train.sku.unique().tolist()
with open("../data/skus_assess_m1.yaml", "w") as file:
yaml.dump(skus_assess_m1, file)
file.close()
len(skus_assess_m1)
```
***
## generating multiple validation sets
```
def create_validation_set(dataset, seed, harder=False):
np.random.seed(seed)
print('Sorting records...')
temp_df = dataset.loc[:, ['sku','date','sold_quantity']].sort_values(['sku','date'])
print('Grouping quantity...')
temp_dict = temp_df.groupby('sku').agg({'sold_quantity':lambda x: [i for i in x]})['sold_quantity'].to_dict()
result = []
for idx, list_quantity in tqdm(temp_dict.items(), desc='Making targets...'):
cumsum = np.array(list_quantity).cumsum()
target_stock = 0
if cumsum[-1] > 0 and len(cumsum)==30:
# choose a random target different from 0
while target_stock == 0:
if harder:
target_stock = np.random.choice(np.unique(cumsum))
else:
target_stock = np.random.choice(cumsum)
# get the first day with this amount of sales
inventory_days = np.argwhere(cumsum==target_stock).min() + 1
# add to a list
result.append({'sku':idx, 'target_stock':target_stock, 'inventory_days':inventory_days})
return result
def create_m3_dataset(dataset, seed, sample_by_sku=3):
np.random.seed(seed)
print('Sorting records...')
temp_df = dataset.loc[:, ['sku','date','sold_quantity']].sort_values(['sku','date'])
print('Grouping quantity...')
temp_dict = temp_df.groupby('sku').agg({'sold_quantity':lambda x: [i for i in x]})['sold_quantity'].to_dict()
result = []
for idx, list_quantity in tqdm(temp_dict.items(), desc='Making targets...'):
cumsum = np.array(list_quantity).cumsum()
cumsum_unique = np.unique(cumsum[cumsum > 0])
sample_size = min(sample_by_sku, len(cumsum_unique))
target_stocks = np.random.choice(cumsum_unique, size=sample_size, replace=False)
for target_stock in target_stocks:
inventory_days = np.argwhere(cumsum==target_stock).min() + 1
result.append({'sku':idx, 'target_stock':target_stock, 'inventory_days':inventory_days})
return result
seed_list = [2, 17, 19, 23]
for seed in seed_list:
valid_dataset = create_validation_set(valid, seed=seed)
valid_dataset = pd.DataFrame(valid_dataset)
print("Number of skus:", valid_dataset.sku.nunique())
valid_dataset.to_csv(f"../data/validation_seed{seed}.csv", index=False)
seed_list = [2, 17, 19, 23]
for seed in seed_list:
valid_dataset = create_validation_set(valid, seed=seed, harder=True)
valid_dataset = pd.DataFrame(valid_dataset)
print("Number of skus:", valid_dataset.sku.nunique())
valid_dataset.to_csv(f"../data/validation_seed{seed}_harder.csv", index=False)
valid_m3 = create_m3_dataset(valid, seed=42, sample_by_sku=3)
valid_m3 = pd.DataFrame(valid_m3)
valid_m3.to_csv(f"../data/validation_m3.csv", index=False)
```
***
inspecting the validation datasets
```
import matplotlib.pyplot as plt
valid1 = pd.read_csv("../data/validation_seed2.csv")
valid2 = pd.read_csv("../data/validation_seed2_harder.csv")
plt.figure(figsize=(20,5))
plt.hist([valid1.inventory_days, valid2.inventory_days], bins=np.arange(0,31), rwidth=0.8, align="right", label=["normal","harder"])
plt.grid()
plt.legend(loc="best")
plt.show()
plt.figure(figsize=(20,5))
plt.hist(valid_m3.inventory_days, bins=np.arange(0,31),
rwidth=0.8, align="right", density=False)
plt.grid()
plt.show()
plt.figure(figsize=(20,5))
plt.hist([valid2.inventory_days,valid_m3.inventory_days], bins=np.arange(0,31),
rwidth=0.8, align="right", density=True, label=["harder","augmented"])
plt.grid()
plt.legend(loc="best")
plt.show()
plt.figure(figsize=(20,5))
valid1.target_stock.quantile(np.arange(0,1,0.01)).plot()
valid2.target_stock.quantile(np.arange(0,1,0.01)).plot()
plt.grid()
plt.xlabel("quantile")
plt.ylabel("target_stock")
plt.show()
plt.figure(figsize=(20,5))
valid1.target_stock.quantile(np.arange(0.8,1,0.01)).plot()
valid2.target_stock.quantile(np.arange(0.8,1,0.01)).plot()
plt.grid()
plt.xlabel("quantile")
plt.ylabel("target_stock")
plt.show()
```
***
| github_jupyter |
## Dependent random variable
Some times simple stochastically independent random variables are not enough,
and one have to use variables with stochastic dependencies. In `chaospy` such
variables can be created through parameter declarations.
To demonstrate, let us start through example: a Gaussian distribution that
depend on a gamma distribution through its mu and sigma parameter:
```
import chaospy
dist1 = chaospy.Gamma(1)
dist2 = chaospy.Normal(mu=dist1, sigma=dist1+1)
joint = chaospy.J(dist1, dist2)
joint
```
The resulting distribution can be used as any other distribution in
`chaospy`. For example, here is the contour plot of the probability density
function together with (quasi-)random samples:
```
import numpy
from matplotlib import pyplot
grid = numpy.mgrid[0:3:100j, -3:4:100j]
pyplot.contourf(grid[0], grid[1], joint.pdf(grid), 30)
pyplot.scatter(*joint.sample(100, rule="halton"), marker="x")
pyplot.axis([0, 3, -3, 4])
```
In `chaospy` dependencies are handled by assuming all distributions have a
known [chain rule
decomposition](https://en.wikipedia.org/wiki/Chain_rule_(probability)):
$$
p_{Q_1, Q_2, \dots}(q_1, q_2, \dots) =
p_{Q_1}(q_1)\ p_{Q_2\mid Q_1}(q_2)\ p_{Q_3\mid Q_2=q_1,Q_2}(q_2)\cdots
$$
As long as this decomposition is possible, `chaospy` will figure out how to
assemble the joint density function, and random sampling. For examples, is
the following allowed:
```
dist_lo = chaospy.Uniform(0, 1)
dist_up = chaospy.Uniform(1, 2)
dist_mid = chaospy.Uniform(dist_lo, dist_up)
joint_ordered = chaospy.J(dist_lo, dist_mid, dist_up)
joint_ordered.sample(5, rule="halton").round(5)
```
or visually:
```
samples = joint_ordered.sample(200, rule="halton")
pyplot.rc("figure", figsize=[12, 4])
pyplot.subplot(131)
pyplot.scatter(samples[0], samples[1])
pyplot.subplot(132)
pyplot.scatter(samples[1], samples[2])
pyplot.subplot(133)
pyplot.scatter(samples[0], samples[2])
pyplot.show()
```
Having random samples and probability density function available, allows for
use of [generalized polynomial chaos](./generalized_polynomial_chaos.ipynb).
### Limitations
The functionality described above allows for creating the joint probability
density function and joint random samples. In practice the latter is possible
because `chaospy` constructs the forward and inverse *Rosenblatt
transformation*. However, it is important to note that dependent random
variables likes these can not be used for everything. For example, when
creating quadrature nodes and weights, rules not dependent on distributions,
like Fejér, works fine:
```
nodes, weights = chaospy.generate_quadrature(2, joint, rule="fejer_2")
nodes.round(4), weights.round(6)
```
However, rules that directly is built on properties taken from the
distributions, and in particular, those assuming stochastic independence, can
not work. For example optimal Gaussian quadrature:
```
import pytest
with pytest.raises(chaospy.StochasticallyDependentError):
chaospy.generate_quadrature(2, joint, rule="gaussian")
```
The same limitation also prevents the construction of joint *cumulative
distribution function*.
### Illegal distribution
Note that the distribution of interest here, `joint` is the joint
distribution including both the Gaussian and the Gamma distribution. The
conditional Gaussian distribution `dist2` is created and part of this, but on
its own can not be used for anything. In fact, trying to use conditional
distributions in `chaospy` will cause an error:
```
with pytest.raises(chaospy.StochasticallyDependentError):
dist2.sample(10)
```
In other words, though any dependencies can be modeled in `chaospy`,
declaring those distributions might sometimes be challenging. For example, a
transformation like:
```
dist1 = chaospy.Uniform(0, 1)
dist2 = chaospy.Normal(0, 1)
joint_illegal = chaospy.J(dist1+dist2, dist1-dist2)
with pytest.raises(chaospy.StochasticallyDependentError):
joint_illegal.sample(10)
```
| github_jupyter |
# Games on Networks
```
%%capture
# Housekeeping
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.gridspec as gridspec
import matplotlib.pylab as pl
import random as rn
import matplotlib.patches as mpatches
# Make sure you download econ46_library.py from our course material and save it in the same folder as then notebooks
# this file has some functions specifically coded for the class
from supporting_material import econ46_library as el
# These modules are only to have some interactive pieces of code in the notebooks
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
def complements_game(G,share,active_seed,max_iter=10000):
node_list = list(G.nodes())
n = len(node_list)
actions = np.zeros((n,1))
for node in active_seed:
actions[node,0] = 1
node_list.sort()
A = nx.to_numpy_matrix(G,nodelist=node_list)
degree= A.sum(axis=1)
pos_deg = degree==0
actions1 = np.zeros((1,n))
iteration=0
while (actions != actions1).any() and iteration<max_iter:
actions1 = actions.copy()
actions = ((np.dot(A,actions1)>=(degree*share+pos_deg)).astype(int))>0
iteration = iteration+1
if iteration == max_iter:
print('Game did not converge in %s iterations' % iteration)
return
actions_l = actions.reshape(n,1)
exposed_nodes = [node for node in node_list if actions_l[node]==1]
clean_nodes = [node for node in node_list if actions_l[node]==0]
plt.figure(figsize=(5,5))
pos0 = nx.kamada_kawai_layout(G)
ec = nx.draw_networkx_edges(G, pos0, alpha=1)
nc = nx.draw_networkx_nodes(G, pos0, nodelist=exposed_nodes,node_color=('#0072b2'),
with_labels=True,font_color='black', font_weight='bold', node_size=500)
nn = nx.draw_networkx_nodes(G, pos0, nodelist=clean_nodes,node_color=('#cc79a7'),
with_labels=True, font_color='black', font_weight='bold', node_size=500)
nl = nx.draw_networkx(G, node_color='none',alpha=1,font_color='white', font_weight='bold',
pos=pos0,with_labels = True,node_size=500)
blue_patch = mpatches.Patch(color='#0072b2', label='Action')
red_patch = mpatches.Patch(color='#cc79a7', label='NO Action')
plt.legend(bbox_to_anchor=(1, 1),handles=[blue_patch,red_patch])
plt.axis('off')
ax2= plt.gca()
ax2.collections[0].set_edgecolor("#000000")
plt.show()
```
## If you can't see any network under this line, run the cell of code and change the parameters when the buttons appear.
```
display(el.accordion_h,el.accordion_h2,el.lays_h,el.atts_h,el.y_h)
G = nx.from_pandas_edgelist(el.edgelist_h)
active_seed = [27,25,28]
for share in [.25,.3,.5,.6,.75]:
print('Share-of-friends-acting needed to get value from acting too:', share)
complements_game(G,share,active_seed)
```
# Peer Effects
Below you can observe the Dutch Highschool network at different points in time and the alcohol consumption of the nodes.
#### Note: Both drinking behavior and reported gender can be shown in the graph by way of coloring the nodes. Thus you cannot see both pieces of information at the same time.
### Are drinkers more likely to be friends? Are they more likely to be popular?
## Remember: excessive alcohol use leads to harmful outcomes in both the short and long terms. If you are underage you should not drink at all (no, it really is not cool, it's overrated).
```
display(el.nets_drink,el.types_drink,el.accordion_drink,el.lays_drink,el.atts_drink,el.y_drink)
```
| github_jupyter |
```
import sqlite3
from pprint import pprint
import pandas as pd
import math
import datetime
import matplotlib.pyplot as plt
tInicioGlobal = datetime.datetime.now()
```
## INICIO extraer datos (BBDD)
```
### Inicio conexion BBDD
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["stum_for_you"]
mycol = mydb["multidimensional"]
mycol.find_one()
# cursor.execute("SELECT DISTINCT s.problem_id FROM Submission as s GROUP BY s.user_id, s.problem_id HAVING (count(s.status='AC')+count(s.status='PE'))>0")
# tablaProblemBBDD = cursor.fetchall()
# cursor.execute("SELECT DISTINCT s.user_id FROM Submission as s GROUP BY s.user_id, s.problem_id HAVING (count(s.status='AC')+count(s.status='PE'))>0")
# tablaUsersBBDD = cursor.fetchall()
dataMultidimensional = mycol.find()
tablaUsersBBDD = []
tablaProblemBBDD = []
dataParse = []
for x in dataMultidimensional:
if 'proveedor' in x and 'juego' in x and 'movimiento' in x:
proveedor = x['proveedor']
juego = x['juego']
movimiento = x['movimiento']
if 'puntero_proveedor' in proveedor and 'puntero_juego' in juego and 'tipo_movimiento' in movimiento:
if movimiento['tipo_movimiento'] == 'compra':
dataParse.append({
'puntero_juego': juego['puntero_juego'],
'puntero_proveedor': proveedor['puntero_proveedor'],
'cantidad': movimiento['cantidad'],
'fecha': x['fecha']['anyo'] + '-' + x['fecha']['mes'] + '-' + x['fecha']['dia']
})
tablaUsersBBDD.append(proveedor['puntero_proveedor'])
tablaProblemBBDD.append(juego['puntero_juego'])
tablaUsersBBDD = list(set(tablaUsersBBDD))
tablaProblemBBDD = list(set(tablaProblemBBDD))
```
## FIN extraer datos
## INICIO parsear datos
```
dfSubmissionParaEntrenar = dfSubmission.copy()
dfUsersProblem = pd.DataFrame(0, index=tablaUsersBBDD, columns=tablaProblemBBDD)
for i in dfSubmissionParaEntrenar.itertuples():
dfUsersProblem.at[i.puntero_proveedor, i.puntero_juego] += int(i.cantidad)
dfOriginalUsersProblem = dfUsersProblem.copy()
aux = dfUsersProblem.sum()
aux = aux[aux<1]
dfUsersProblem = dfUsersProblem.drop(aux.index ,axis=1)
aux = dfUsersProblem.transpose().sum()
aux = aux[aux<1]
dfUsersProblem = dfUsersProblem.drop(aux.index ,axis=0)
dfOriginalUsersProblemInv = dfOriginalUsersProblem.copy()
dfOriginalUsersProblemInv[dfOriginalUsersProblem==0] = 1
dfOriginalUsersProblemInv[dfOriginalUsersProblem==1] = 0
dfUsersProblemInv = dfUsersProblem.copy()
dfUsersProblemInv[dfUsersProblem==0] = 1
dfUsersProblemInv[dfUsersProblem==1] = 0
```
## FIN parsear datos
## INICIO sacar lista de recomendación
#### Calculo probabilidad simple
```
def calculoProbabilidadSimple():
return dfUsersProblem.sum()/len(dfUsersProblemInv.index)
sProbSimple = calculoProbabilidadSimple()
```
#### probabilidad de resolver el problema COLUMNA habiendo resuelto el problema FILA
```
def calculoProbabilidadCondicionadaRR():
dfProblemProbabilidadSiOptimizado = pd.DataFrame(index=dfUsersProblem.columns, columns=dfUsersProblem.columns)
for column in dfUsersProblem.columns:
denNo = dfUsersProblem[column].sum() + 2
dfProblemProbabilidadSiOptimizado[column] = ((dfUsersProblem.mul(dfUsersProblem[column], axis=0)).sum() + 1) / denNo
return dfProblemProbabilidadSiOptimizado
dfProblemProbabilidadSiOptimizado = calculoProbabilidadCondicionadaRR()
# dfProblemProbabilidadSiOptimizado.to_csv("probPriori.csv")
```
tInicio = datetime.datetime.now()
dfProblemProbabilidadSiOptimizado = pd.read_csv("probPriori.csv", index_col=0, parse_dates=True)
tFinal = datetime.datetime.now()
t = tFinal - tInicio
print(t)
#### Calculo para TODOS los usuarios
def calculoTablaProbTodosLosUsuarios():
dfFinal = pd.DataFrame()
for c in dfProblemProbabilidadSiOptimizado.columns:
aux = dfOriginalUsersProblem.mul(dfProblemProbabilidadSiOptimizado[c])
aux = aux[aux!=0].prod(axis=1)
aux2 = dfOriginalUsersProblemInv.mul(1-dfProblemProbabilidadSiOptimizado[c])
aux2 = aux2[aux2!=0].prod(axis=1)
dfFinal[c]=aux*aux2*sProbSimple[c]
dfFinal.to_csv("allUsersProb.csv")
return dfFinal
dfFinal = calculoTablaProbTodosLosUsuarios()
```
#### Cargar de archivo los datos generados por la funcion anterior (para la demo porque la funcion tarda mucho)
dfFinal = pd.read_csv("allUsersProb.csv", index_col=0, parse_dates=True)
dfFinal.columns = dfFinal.columns.astype(int)
#Ordenar recomendaciones para cada usuario
def generarListaDeRecomendacionOrdenada():
listaRecomendacion = pd.DataFrame(columns=[0],index=dfFinal.index)
for u in dfFinal.index:
listaRecomendacion.at[u,0] = dfFinal.loc[u][dfOriginalUsersProblem.loc[u]!=1].copy().sort_values(ascending=False)
return listaRecomendacion
listaRecomendacion = generarListaDeRecomendacionOrdenada()
siguienteJuegoDeCadaProveedor = {}
for x in listaRecomendacion.index:
siguienteJuegoDeCadaProveedor[x] = listaRecomendacion.at[x,0].index[0]
siguienteJuegoDeCadaProveedor
def recomendacionUnUsuario(user):
return listaRecomendacion.at[user,0]
tFinalGlobal = datetime.datetime.now()
t = tFinalGlobal - tInicioGlobal
print(t)
```
## FIN sacar lista de recomendación
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from mmctools.helper_functions import covariance, w_s, e_s, theta
from mmctools.plotting import plot_timehistory_at_height
```
# TTU tower measured heat flux / stability
written by Eliot Quon <eliot.quon@nrel.gov>
Estimate stability and calculate heat flux using 1-Hz data from `process_TTU_tower.ipynb`. Findings:
- The virtual temperature is about 2-4% lower than the sonic temperature in this case, which translates into a difference of about 6-12 K.
- The temperature data (from a separate sensor with response time of 10s)--and quantities calculated from temperature, such as virtual temperature and potential temperature--are useful in the mean but not useful in terms of fluctuations (e.g., for calculating fluxes).
- The bulk Richardson number ($Ri_B$) is well correlated with the heat flux, and more clearly highlights periods of slight stability/instability than the Obukhov stability parameter ($z/L$).
```
towerdata = 'data/TTU_tilt_corrected_20131108-09.csv'
zhub = 74.7
```
## read pre-processed data
```
df = pd.read_csv(towerdata, parse_dates=['datetime']).set_index(['datetime','height'])
df.head()
times = df.index.levels[0]
heights = df.index.levels[1]
times
heights
```
## calculate reference quantities
```
df['wspd'] = np.sqrt(df['u']**2 + df['v']**2)
```
### virtual potential temperature calculation from Kelley2016, Eqns 2.19-21
calculate $\theta(T,p)$ and $\theta_v(\theta, r_s, RH)$
```
df['theta'] = theta(df['T'], df['p'])
e0 = 0.611 # [kPa]
eps = 0.622 # [-]
L_Rv = 5423. # [K]
# saturation vapor pressure [kPa]
es = e0 * np.exp(L_Rv*(1/273. - 1/df['T']))
# saturation mixing ratio
rs = eps * es / (df['p'] - es)
df['thetav'] = df['theta'] * (1 + 0.61*rs*df['RH']/100)
```
<font color='red'>Note: Calculating temperature fluxes based on $T$ measured by the RM Young 41382V T/RH probe produces nonsensical results, because the response time is 10s (0.1Hz); the Gill R3-50 sonics, in comparison, measure at 50 Hz.</font>
```
df['Tv'] = df['T'] * (1 + 0.61*rs*df['RH']/100)
```
### check if $T_s \approx T_v$
(assuming crosswind effects in the sonic measurements are properly corrected for and negligible)
```
# saturation mixing ratio, kg/kg
ws = w_s(df['T'], df['p'])
# mixing ratio
w = df['RH']/100. * ws
# specific humidity
q = w / (1+w)
df['Tv/Ts'] = df['Tv'] / df['Ts']
# actual TV/Ts
fig,ax = plot_timehistory_at_height(df.reset_index(1),
fields=['RH','Tv/Ts'],
fieldlabels={
'RH': 'RH [%]',
'Tv/Ts': r'$T_v / T_s$'
},
heights=heights)
# From Kaimal and Gaynor, Boundary Layer Meteorology, Vol 56, 1991:
# Tv = T (1 + 0.61 q) (2b)
# Ts = T (1 + 0.32 e/p) (3)
# = T (1 + 0.51 q) for q ~= 0.622 e/p
# Note:
# - Eqn 2b holds if the mixing ratio is small, i.e.,
# q == w/(1+w) ~= w
# Also see Wallace & Hobbs Eqn 3.60
# Tv ~= T(1 + 0.61w)
# For this case, this assumption holds.
# - Eqn 3 is the acoustically derived temperature, where the difference in coefficient
# stems from an additional dependency on the specific heats of water vapor and air.
# This appears to be the source of error in this approximation.
# - The same result as below is obtained from Eqn 5 from the same reference.
approx_Tv_Ts = (1 + 0.61*q) / (1 + 0.51*q)
approx_Tv_Ts.xs(74.7,level='height').plot()
plt.gca().ticklabel_format(axis='y',useOffset=False)
plt.title('Note: This approximation is invalid!')
plt.ylabel('Tv/Ts')
```
### estimate $\theta_v$ from $T_s$
to get high-frequency data
```
df['thetas'] = theta(df['Ts'], df['p'])
```
### calculate some stats
```
df_10min = df.unstack().resample('10min').mean().stack()
df_10min['wdir'] = 180./np.pi * np.arctan2(-df_10min['u'], -df_10min['v'])
df_10min.loc[df_10min['wdir'] < 0, 'wdir'] += 360.
df_10min['uw'] = covariance(df['u'], df['w'], '10min', resample=True)
df_10min['vw'] = covariance(df['v'], df['w'], '10min', resample=True)
df_10min['hflux'] = covariance(df['Ts'],df['w'], '10min', resample=True)
df_10min['hflux_pot'] = covariance(df['thetas'],df['w'], '10min', resample=True)
df_10min_var = df.unstack().resample('10min').var().stack()
```
derived quantities
```
ang = np.arctan2(df_10min['v'], df_10min['u'])
df_10min['TIdir'] = np.sqrt(
df_10min_var['u'] * np.cos(ang)**2
+ covariance(df['u'], df['v'], '10min', resample=True) * 2*np.sin(ang)*np.cos(ang)
+ df_10min_var['v'] * np.sin(ang)**2
)
df_10min['TIdir'] /= df_10min['wspd']
df_10min['TI'] = np.sqrt(df_10min_var['wspd']) / df_10min['wspd']
df_10min['k'] = 0.5 * (df_10min_var['u'] + df_10min_var['v'] + df_10min_var['w'])
df_10min['u*'] = (df_10min['uw']**2 + df_10min['vw']**2)**0.25
```
### SANITY CHECK: reproduce plots from Figure 3.4 in MMC Year 2 Report (PNNL-26267)
```
fig,ax = plot_timehistory_at_height(df_10min.reset_index(1),
fields=['wspd','wdir','thetav','TI'],
fieldlimits={
'wspd':(0,15),
'wdir':(180,260),
'thetav':(280,300),
'TI':(0,0.5),
},
timelimits=('2013-11-08 09:00','2013-11-09 12:00'),
heights=heights,
cmap='copper')
ax[3].xaxis.set_minor_locator(mdates.HourLocator(byhour=range(24),interval=3))
```
### estimate stability
Bulk Richardson number also plotted in Figure 3.4 of MMC Year 2 report
```
# selected stations, from report
z0,z1 = [2.4, 10.1]
dz = z1 - z0
z0,z1,dz
df0 = df_10min.xs(z0, level='height')
df1 = df_10min.xs(z1, level='height')
Tv_mean = 0.5*(df0['Tv'] + df1['Tv'])
bulk_Ri = 9.81/Tv_mean * (df1['thetav'] - df0['thetav']) * dz \
/ ((df1['u']-df0['u'])**2 + (df1['v']-df0['v'])**2)
fig,ax = plt.subplots(figsize=(10,4))
bulk_Ri.plot()
ax.set_xlabel('')
ax.set_ylabel('$Ri_B$', fontsize='x-large')
ax.set_xlim(('2013-11-08 09:00','2013-11-09 12:00'))
ax.set_ylim((-0.2, 0.4))
ax.grid(which='both')
```
Obukhov stability parameter, $z/L$
```
df_10min['L'] = -df_10min['u*']**3 * df_10min['Tv'] / (0.4 * 9.81 * df_10min['hflux'])
for z in heights:
df_10min.loc[(slice(None),z), 'z/L'] = z / df_10min.loc[(slice(None),z), 'L']
fig,ax = plt.subplots(figsize=(10,4))
bulk_Ri.plot(label=r'$Ri_B$ (z={:g}, {:g} m)'.format(z0,z1))
#for z in heights[:4]:
for z in heights[:1]:
z_L = df_10min.xs(z,level='height')['z/L']
z_L.plot(label=r'$z/L$ ($z$={:g} m)'.format(z))
ax.set_xlabel('')
ax.set_ylabel('stability metric',fontsize='x-large')
ax.grid(which='both')
ax.legend(fontsize='x-large')#,ncol=2)
ax.set_ylim((-0.7,0.7))
fig,ax = plt.subplots(figsize=(10,4))
bulk_Ri.plot(color='b')
ax.set_ylim((-1,1))
ax.set_xlabel('')
ax.set_ylabel('Bulk Ricahrdson number',fontsize='x-large')
ax.yaxis.label.set_color('b')
ax2 = ax.twinx()
for z in [10.1]:
z_L = df_10min.xs(z,level='height')['z/L']
z_L.plot(color='r'.format(z))
ax2.set_ylim((-5,5))
ax2.set_ylabel('Obukhov stability parameter',fontsize='x-large')
ax2.yaxis.label.set_color('r')
ax.grid(which='both')
labels = [r'$Ri_B$ (z={:g}, {:g})'.format(z0,z1),
r'$z/L$ ($z$={:g} m)'.format(z)]
ax.legend([ax.get_lines()[0], ax2.get_lines()[0]], labels, fontsize='x-large')
```
## measured heat flux
```
# stable/unstable conditions
Ri_s = bulk_Ri.loc[bulk_Ri > 0]
Ri_u = bulk_Ri.loc[bulk_Ri <= 0]
zsurf = 0.9
surf = df_10min.xs(zsurf, level='height')
hflux = surf['hflux']
hflux_pot = surf['hflux_pot']
fig,ax = plt.subplots(figsize=(10,4))
ax.plot(hflux.index, hflux, 'k', label=r"$\overline{T_s'w'}$")
ax.plot(hflux_pot.index, hflux_pot, 'g', label=r"$\overline{\theta_s'w'}$")
ax.legend(loc='center left',fontsize='large',title='z={:g} m'.format(zsurf))
ax.set_xlabel('')
ax.set_ylabel('surface heat flux [K-m/s]', fontsize='x-large')
ax.axhline(0, color='k', lw=1)
ax2 = ax.twinx()
ax2.plot(Ri_u.index, Ri_u, 'r.', alpha=0.3)
ax2.plot(Ri_s.index, Ri_s, 'b.', alpha=0.3)
ax2.set_ylabel(r'$Ri_B > 0$', fontsize='x-large')
ax2.yaxis.label.set_color('b')
fig.savefig('figures/calculated_heat_flux.png',dpi=150)
```
| github_jupyter |
```
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
from search_engine.trip_planner.trip_classes.Item import Item
from haversine import haversine
import numpy as np
import pandas as pd
path = ['architecture',
'historic',
'architecture',
'religion',
'historic',
'religion',
'sport',
'religion',
'sport',
'historic',
'religion',
'historic',
'architecture',
'architecture',
'historic',
'historic',
'religion',
'religion',
'architecture',
'architecture',]
path = ['cultural',
'sport',
'sport',
'sport',
'hotel',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
# 'food',
'shop',
'shop',
'architecture',
'cultural',
'cultural',
'cultural',
'cultural',
'cultural',
'cultural',
'shop',
'shop',
'sport',
'shop',
'cultural',
'cultural',
'architecture',
'architecture',
'architecture',
'architecture',
'architecture',
'architecture',
'architecture',
'architecture',
'architecture',
'cultural',
'cultural',
'cultural',
'architecture',
'sport',
'sport',
'sport',
'sport',
'sport',
'sport',
'architecture',
'sport',
'sport']
items = [Item("hotel",{"name":"","id":"247869","coordinate":{'lat': 52.515915, 'lon': 13.394078},"guestrating":"9.2"}),
Item("food",{"name":"","id":"N6121773112","coordinate":{'lat': 52.516552, 'lon': 13.403493},"guestrating":"7"}),
Item("food",{"name":"","id":"N1433120469","coordinate":{'lat': 52.519344, 'lon': 13.4025},"guestrating":"7"}),
Item("food",{"name":"","id":"Q869943","coordinate":{'lat': 52.513332, 'lon': 13.405833},"guestrating":"7"}),
Item("food",{"name":"","id":"N1244644982","coordinate":{'lat': 52.51564, 'lon': 13.406953},"guestrating":"7"}),
Item("food",{"name":"","id":"N622642560","coordinate":{'lat': 52.523018, 'lon': 13.388484},"guestrating":"7"}),
Item("food",{"name":"","id":"N89275075","coordinate":{'lat': 52.518013, 'lon': 13.407163},"guestrating":"7"}),
Item("food",{"name":"","id":"W48118905","coordinate":{'lat': 52.508587, 'lon': 13.38711},"guestrating":"7"}),
Item("food",{"name":"","id":"N2607087946","coordinate":{'lat': 52.518829, 'lon': 13.408701},"guestrating":"7"}),
Item("food",{"name":"","id":"N615149024","coordinate":{'lat': 52.523933, 'lon': 13.402408},"guestrating":"7"}),
Item("shop",{"name":"","id":"Q1901032","coordinate":{'lat': 52.519199, 'lon': 13.3836},"guestrating":"7"}),
Item("shop",{"name":"","id":"Q1901029","coordinate":{'lat': 52.508099, 'lon': 13.3881},"guestrating":"7"}),
Item("shop",{"name":"","id":"N1714250170","coordinate":{'lat': 52.522987, 'lon': 13.382868},"guestrating":"7"}),
Item("historic",{"name":"","id":"N262455591","coordinate":{'lat': 52.517254, 'lon': 13.392743},"guestrating":"7"}),
Item("historic",{"name":"","id":"N3058015348","coordinate":{'lat': 52.517815, 'lon': 13.393262},"guestrating":"7"}),
Item("historic",{"name":"","id":"N262457570","coordinate":{'lat': 52.513638, 'lon': 13.392648},"guestrating":"7"}),
Item("cultural",{"name":"","id":"W205728152","coordinate":{'lat': 52.516487, 'lon': 13.393826},"guestrating":"7"}),
Item("cultural",{"name":"","id":"W15976892","coordinate":{'lat': 52.516705, 'lon': 13.394738},"guestrating":"7"}),
Item("cultural",{"name":"","id":"N262455591","coordinate":{'lat': 52.517254, 'lon': 13.392743},"guestrating":"7"}),
Item("cultural",{"name":"","id":"N437319063","coordinate":{'lat': 52.517593, 'lon': 13.393472},"guestrating":"7"}),
Item("cultural",{"name":"","id":"N3058015349","coordinate":{'lat': 52.517849, 'lon': 13.39373},"guestrating":"7"}),
Item("sport",{"name":"","id":"R5758790","coordinate":{'lat': 52.529545, 'lon': 13.39354},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q435399","coordinate":{'lat': 52.516399, 'lon': 13.3933},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q439297","coordinate":{'lat': 52.516899, 'lon': 13.3928},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q1540327","coordinate":{'lat': 52.516899, 'lon': 13.3921},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q2110747","coordinate":{'lat': 52.516899, 'lon': 13.3961},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q806775","coordinate":{'lat': 52.5144, 'lon': 13.3955},"guestrating":"7"}),
Item("hotel",{"name":"","id":"360456","coordinate":{'lat': 51.529412, 'lon': -0.125847},"guestrating":"8.6"}),
Item("food",{"name":"","id":"N1980151259","coordinate":{'lat': 51.530228, 'lon': -0.12801},"guestrating":"7"}),
Item("food",{"name":"","id":"W132319674","coordinate":{'lat': 51.528236, 'lon': -0.128996},"guestrating":"7"}),
Item("food",{"name":"","id":"W46821178","coordinate":{'lat': 51.532394, 'lon': -0.125386},"guestrating":"7"}),
Item("food",{"name":"","id":"Q17361846","coordinate":{'lat': 51.531086, 'lon': -0.120813},"guestrating":"7"}),
Item("food",{"name":"","id":"W337127150","coordinate":{'lat': 51.526886, 'lon': -0.137008},"guestrating":"7"}),
Item("food",{"name":"","id":"W112564610","coordinate":{'lat': 51.523045, 'lon': -0.119031},"guestrating":"7"}),
Item("food",{"name":"","id":"Q18344592","coordinate":{'lat': 51.528084, 'lon': -0.138928},"guestrating":"7"}),
Item("food",{"name":"","id":"W158564484","coordinate":{'lat': 51.522205, 'lon': -0.11887},"guestrating":"7"}),
Item("food",{"name":"","id":"W225465155","coordinate":{'lat': 51.534748, 'lon': -0.138165},"guestrating":"7"}),
Item("shop",{"name":"","id":"Q4979489","coordinate":{'lat': 51.523998, 'lon': -0.124},"guestrating":"7"}),
Item("shop",{"name":"","id":"W143369836","coordinate":{'lat': 51.540443, 'lon': -0.140816},"guestrating":"7"}),
Item("shop",{"name":"","id":"Q18161799","coordinate":{'lat': 51.5186, 'lon': -0.1034},"guestrating":"7"}),
Item("historic",{"name":"","id":"Q23988281","coordinate":{'lat': 51.527199, 'lon': -0.1326},"guestrating":"7"}),
Item("historic",{"name":"","id":"Q4979489","coordinate":{'lat': 51.523998, 'lon': -0.124},"guestrating":"7"}),
Item("historic",{"name":"","id":"N544223148","coordinate":{'lat': 51.53537, 'lon': -0.130883},"guestrating":"7"}),
Item("cultural",{"name":"","id":"N2617232245","coordinate":{'lat': 51.529598, 'lon': -0.128129},"guestrating":"7"}),
Item("cultural",{"name":"","id":"W513152399","coordinate":{'lat': 51.527451, 'lon': -0.128528},"guestrating":"7"}),
Item("cultural",{"name":"","id":"W4253511","coordinate":{'lat': 51.526623, 'lon': -0.131859},"guestrating":"7"}),
Item("cultural",{"name":"","id":"Q23988281","coordinate":{'lat': 51.527199, 'lon': -0.1326},"guestrating":"7"}),
Item("cultural",{"name":"","id":"W306452420","coordinate":{'lat': 51.525036, 'lon': -0.129},"guestrating":"7"}),
Item("sport",{"name":"","id":"W29231803","coordinate":{'lat': 51.556442, 'lon': -0.151267},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q5025822","coordinate":{'lat': 51.528999, 'lon': -0.1255},"guestrating":"7"}),
Item("architecture",{"name":"","id":"W4680891","coordinate":{'lat': 51.529877, 'lon': -0.12772},"guestrating":"7"}),
Item("architecture",{"name":"","id":"N1980151259","coordinate":{'lat': 51.530228, 'lon': -0.12801},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q7107545","coordinate":{'lat': 51.528801, 'lon': -0.129},"guestrating":"7"}),
Item("architecture",{"name":"","id":"W132319674","coordinate":{'lat': 51.528236, 'lon': -0.128996},"guestrating":"7"})]
path = [item.item_type+str(i) for i,item in enumerate(items)]
path[:10]
from collections import Counter
groups = Counter([re.match(r"([a-z]+)([0-9]+)",p).groups()[0] for p in path])
cij = [[0.0, 10.496, 8.629, 16.989, 17.194, 8.601, 17.232, 8.637, 10.56, 17.249, 10.606, 4.975, 15.22, 137.228, 130.027, 8.639, 8.634, 16.815, 15.24, 10.257, 8.679, 4.485, 8.722, 15.956, 14.402, 4.565, 8.929, 138.506, 16.09, 28.968, 14.006, 8.66, 138.501, 8.553, 14.767, 14.915, 8.652, 8.613, 10.557, 8.973, 10.282, 10.003, 124.566, 14.463, 10.003, 34.54, 12.285, 34.452, 9.242, 11.133, 109.93, 11.871], [10.496, 0.0, 6.145, 8.003, 8.055, 6.104, 8.219, 6.113, 0.075, 8.212, 2.073, 7.107, 5.796, 127.01, 127.426, 6.162, 6.156, 7.663, 5.907, 0.876, 6.144, 6.313, 6.214, 6.541, 5.013, 14.835, 4.59, 128.139, 6.931, 19.208, 3.686, 6.131, 128.168, 6.17, 5.366, 5.725, 6.18, 6.081, 2.047, 4.783, 0.89, 1.107, 121.696, 5.076, 0.996, 25.911, 3.821, 25.693, 2.583, 3.908, 107.495, 5.266], [8.629, 6.145, 0.0, 8.986, 9.262, 0.049, 9.222, 0.033, 6.149, 9.252, 4.525, 3.654, 7.756, 129.336, 123.3, 0.019, 0.012, 8.931, 7.7, 5.311, 0.05, 4.656, 0.114, 8.367, 7.076, 11.731, 1.571, 130.887, 8.297, 20.96, 7.896, 0.035, 130.807, 0.08, 7.372, 7.326, 0.041, 0.066, 4.511, 1.395, 5.308, 5.044, 117.691, 7.12, 5.15, 25.984, 4.959, 25.923, 3.562, 3.614, 103.275, 3.651], [16.989, 8.003, 8.986, 0.0, 0.362, 8.989, 0.242, 8.962, 7.93, 0.266, 6.716, 12.28, 2.242, 120.403, 120.625, 8.986, 8.987, 0.448, 2.106, 7.618, 8.941, 12.518, 8.94, 1.584, 2.989, 20.615, 8.147, 121.905, 1.078, 12.027, 4.978, 8.951, 121.832, 9.065, 2.643, 2.284, 8.983, 8.967, 6.768, 8.15, 7.586, 7.735, 114.791, 2.927, 7.804, 17.976, 4.768, 17.793, 7.896, 5.856, 100.813, 5.338], [17.194, 8.055, 9.262, 0.362, 0.0, 9.264, 0.301, 9.238, 7.981, 0.252, 6.846, 12.517, 2.261, 120.159, 120.783, 9.262, 9.264, 0.396, 2.149, 7.706, 9.218, 12.716, 9.219, 1.541, 3.051, 20.861, 8.385, 121.641, 1.145, 11.794, 4.914, 9.227, 121.573, 9.34, 2.69, 2.379, 9.26, 9.241, 6.899, 8.394, 7.674, 7.835, 114.941, 2.988, 7.9, 17.88, 4.937, 17.684, 8.06, 6.069, 100.982, 5.611], [8.601, 6.104, 0.049, 8.989, 9.264, 0.0, 9.225, 0.037, 6.108, 9.255, 4.492, 3.626, 7.748, 129.344, 123.347, 0.068, 0.061, 8.932, 7.694, 5.272, 0.086, 4.617, 0.16, 8.362, 7.065, 11.718, 1.534, 130.892, 8.295, 20.967, 7.871, 0.064, 130.812, 0.082, 7.362, 7.32, 0.09, 0.026, 4.477, 1.36, 5.269, 5.003, 117.738, 7.11, 5.109, 26.005, 4.944, 25.943, 3.521, 3.598, 103.322, 3.652], [17.232, 8.219, 9.222, 0.242, 0.301, 9.225, 0.0, 9.198, 8.146, 0.055, 6.951, 12.521, 2.442, 120.162, 120.504, 9.221, 9.223, 0.587, 2.315, 7.845, 9.177, 12.761, 9.175, 1.756, 3.206, 20.856, 8.389, 121.668, 1.288, 11.785, 5.148, 9.187, 121.594, 9.3, 2.855, 2.509, 9.218, 9.203, 7.004, 8.391, 7.812, 7.964, 114.665, 3.143, 8.032, 17.746, 5.008, 17.56, 8.137, 6.099, 100.699, 5.575], [8.637, 6.113, 0.033, 8.962, 9.238, 0.037, 9.198, 0.0, 6.116, 9.228, 4.491, 3.662, 7.727, 129.315, 123.315, 0.049, 0.043, 8.906, 7.672, 5.278, 0.051, 4.653, 0.129, 8.34, 7.046, 11.75, 1.537, 130.864, 8.271, 20.938, 7.862, 0.028, 130.784, 0.102, 7.342, 7.298, 0.068, 0.039, 4.477, 1.361, 5.275, 5.011, 117.705, 7.09, 5.117, 25.97, 4.928, 25.909, 3.529, 3.582, 103.29, 3.626], [10.56, 0.075, 6.149, 7.93, 7.981, 6.108, 8.146, 6.116, 0.0, 8.138, 2.036, 7.149, 5.721, 126.939, 127.373, 6.165, 6.16, 7.589, 5.833, 0.86, 6.146, 6.367, 6.216, 6.466, 4.94, 14.894, 4.59, 128.069, 6.857, 19.133, 3.612, 6.134, 128.097, 6.175, 5.292, 5.652, 6.183, 6.085, 2.012, 4.782, 0.872, 1.106, 121.641, 5.002, 0.999, 25.836, 3.762, 25.618, 2.588, 3.869, 107.443, 5.224], [17.249, 8.212, 9.252, 0.266, 0.252, 9.255, 0.055, 9.228, 8.138, 0.0, 6.956, 12.545, 2.43, 120.137, 120.541, 9.252, 9.253, 0.565, 2.305, 7.843, 9.207, 12.777, 9.206, 1.735, 3.199, 20.881, 8.412, 121.639, 1.282, 11.762, 5.124, 9.217, 121.566, 9.331, 2.846, 2.507, 9.249, 9.233, 7.008, 8.415, 7.81, 7.964, 114.701, 3.137, 8.032, 17.746, 5.019, 17.558, 8.147, 6.118, 100.738, 5.604], [10.606, 2.073, 4.525, 6.716, 6.846, 4.492, 6.951, 4.491, 2.036, 6.956, 0.0, 6.438, 4.698, 126.626, 125.422, 4.539, 4.535, 6.451, 4.752, 1.251, 4.511, 6.148, 4.569, 5.457, 3.868, 14.649, 2.972, 127.912, 5.702, 18.457, 3.626, 4.503, 127.899, 4.568, 4.24, 4.473, 4.553, 4.466, 0.052, 3.133, 1.226, 1.19, 119.706, 3.93, 1.301, 24.682, 2.042, 24.507, 1.402, 1.836, 105.478, 3.202], [4.975, 7.107, 3.654, 12.28, 12.517, 3.626, 12.521, 3.662, 7.149, 12.545, 6.438, 0.0, 10.715, 132.675, 126.107, 3.664, 3.659, 12.155, 10.704, 6.547, 3.704, 1.704, 3.747, 11.412, 9.937, 8.356, 4.132, 134.109, 11.461, 24.307, 10.056, 3.685, 134.06, 3.578, 10.282, 10.352, 3.677, 3.639, 6.398, 4.134, 6.562, 6.251, 120.562, 9.992, 6.301, 29.604, 7.75, 29.531, 5.045, 6.478, 106.045, 7.019], [15.22, 5.796, 7.756, 2.242, 2.261, 7.748, 2.442, 7.727, 5.721, 2.43, 4.698, 10.715, 0.0, 122.025, 122.673, 7.76, 7.76, 1.872, 0.194, 5.475, 7.718, 10.734, 7.735, 0.762, 0.831, 19.066, 6.64, 123.399, 1.166, 13.769, 2.805, 7.723, 123.361, 7.829, 0.458, 0.49, 7.762, 7.723, 4.75, 6.688, 5.442, 5.62, 116.862, 0.768, 5.678, 20.125, 2.967, 19.918, 5.993, 4.258, 102.833, 4.231], [137.228, 127.01, 129.336, 120.403, 120.159, 129.344, 120.162, 129.315, 126.939, 120.137, 126.626, 132.675, 122.025, 0.0, 135.134, 129.333, 129.336, 120.52, 122.016, 127.081, 129.289, 132.751, 129.278, 121.301, 122.835, 141.019, 128.544, 9.821, 121.218, 108.382, 123.351, 129.301, 7.296, 129.416, 122.474, 122.354, 129.328, 129.323, 126.675, 128.552, 127.053, 127.31, 129.516, 122.776, 127.327, 104.608, 124.983, 104.445, 127.986, 126.208, 127.961, 125.72], [130.027, 127.426, 123.3, 120.625, 120.783, 123.347, 120.504, 123.315, 127.373, 120.541, 125.422, 126.107, 122.673, 135.134, 0.0, 123.282, 123.289, 121.07, 122.491, 126.67, 123.264, 127.655, 123.186, 122.172, 123.164, 129.702, 124.187, 143.915, 121.586, 116.34, 125.47, 123.286, 141.81, 123.342, 122.936, 122.467, 123.26, 123.353, 125.459, 124.008, 126.644, 126.596, 6.526, 123.119, 126.713, 107.89, 123.634, 108.505, 125.695, 123.623, 20.125, 122.252], [8.639, 6.162, 0.019, 8.986, 9.262, 0.068, 9.221, 0.049, 6.165, 9.252, 4.539, 3.664, 7.76, 129.333, 123.282, 0.0, 0.007, 8.932, 7.704, 5.328, 0.044, 4.671, 0.096, 8.37, 7.081, 11.735, 1.586, 130.886, 8.298, 20.958, 7.906, 0.038, 130.805, 0.086, 7.376, 7.329, 0.022, 0.084, 4.525, 1.409, 5.324, 5.06, 117.673, 7.125, 5.166, 25.976, 4.966, 25.916, 3.579, 3.621, 103.256, 3.651], [8.634, 6.156, 0.012, 8.987, 9.264, 0.061, 9.223, 0.043, 6.16, 9.253, 4.535, 3.659, 7.76, 129.336, 123.289, 0.007, 0.0, 8.933, 7.704, 5.322, 0.047, 4.664, 0.103, 8.371, 7.08, 11.732, 1.581, 130.888, 8.299, 20.96, 7.903, 0.037, 130.807, 0.082, 7.376, 7.33, 0.029, 0.077, 4.521, 1.405, 5.319, 5.055, 117.68, 7.125, 5.161, 25.98, 4.964, 25.92, 3.573, 3.62, 103.264, 3.652], [16.815, 7.663, 8.931, 0.448, 0.396, 8.932, 0.587, 8.906, 7.589, 0.565, 6.451, 12.155, 1.872, 120.52, 121.07, 8.932, 8.933, 0.0, 1.756, 7.31, 8.888, 12.336, 8.89, 1.17, 2.656, 20.504, 8.025, 121.987, 0.749, 12.164, 4.56, 8.896, 121.923, 9.009, 2.297, 1.982, 8.93, 8.909, 6.504, 8.038, 7.278, 7.439, 115.235, 2.593, 7.504, 18.276, 4.55, 18.08, 7.67, 5.698, 101.26, 5.281], [15.24, 5.907, 7.7, 2.106, 2.149, 7.694, 2.315, 7.672, 5.833, 2.305, 4.752, 10.704, 0.194, 122.016, 122.491, 7.704, 7.704, 1.756, 0.0, 5.562, 7.661, 10.754, 7.676, 0.716, 0.904, 19.059, 6.614, 123.405, 1.027, 13.736, 2.983, 7.667, 123.363, 7.774, 0.541, 0.378, 7.706, 7.669, 4.804, 6.657, 5.53, 5.7, 116.681, 0.84, 5.761, 20.027, 2.967, 19.826, 6.026, 4.235, 102.648, 4.147], [10.257, 0.876, 5.311, 7.618, 7.706, 5.272, 7.845, 5.278, 0.86, 7.843, 1.251, 6.547, 5.475, 127.081, 126.67, 5.328, 5.322, 7.31, 5.562, 0.0, 5.307, 5.929, 5.375, 6.235, 4.658, 14.491, 3.747, 128.276, 6.565, 19.097, 3.751, 5.295, 128.287, 5.34, 5.026, 5.335, 5.345, 5.248, 1.217, 3.935, 0.032, 0.299, 120.951, 4.722, 0.256, 25.586, 3.167, 25.389, 1.766, 3.082, 106.729, 4.451], [8.679, 6.144, 0.05, 8.941, 9.218, 0.086, 9.177, 0.051, 6.146, 9.207, 4.511, 3.704, 7.718, 129.289, 123.264, 0.044, 0.047, 8.888, 7.661, 5.307, 0.0, 4.702, 0.081, 8.327, 7.039, 11.779, 1.563, 130.842, 8.255, 20.914, 7.871, 0.022, 130.761, 0.129, 7.334, 7.287, 0.045, 0.089, 4.498, 1.384, 5.304, 5.041, 117.654, 7.084, 5.148, 25.933, 4.926, 25.873, 3.561, 3.583, 103.239, 3.607], [4.485, 6.313, 4.656, 12.518, 12.716, 4.617, 12.761, 4.653, 6.367, 12.777, 6.148, 1.704, 10.734, 132.751, 127.655, 4.671, 4.664, 12.336, 10.754, 5.929, 4.702, 0.0, 4.766, 11.47, 9.919, 8.562, 4.568, 134.06, 11.608, 24.483, 9.637, 4.681, 134.044, 4.593, 10.283, 10.429, 4.689, 4.621, 6.101, 4.645, 5.951, 5.655, 122.09, 9.979, 5.673, 30.191, 7.8, 30.081, 4.768, 6.669, 107.603, 7.498], [8.722, 6.214, 0.114, 8.94, 9.219, 0.16, 9.175, 0.129, 6.216, 9.206, 4.569, 3.747, 7.735, 129.278, 123.186, 0.096, 0.103, 8.89, 7.676, 5.375, 0.081, 4.766, 0.0, 8.339, 7.061, 11.798, 1.629, 130.837, 8.261, 20.905, 7.915, 0.102, 130.755, 0.173, 7.354, 7.301, 0.076, 0.169, 4.556, 1.447, 5.371, 5.11, 117.577, 7.105, 5.217, 25.901, 4.956, 25.843, 3.631, 3.615, 103.161, 3.609], [15.956, 6.541, 8.367, 1.584, 1.541, 8.362, 1.756, 8.34, 6.466, 1.735, 5.457, 11.412, 0.762, 121.301, 122.172, 8.37, 8.371, 1.17, 0.716, 6.235, 8.327, 11.47, 8.339, 0.0, 1.592, 19.767, 7.313, 122.697, 0.606, 13.021, 3.394, 8.333, 122.652, 8.442, 1.219, 1.06, 8.371, 8.338, 5.509, 7.351, 6.203, 6.382, 116.346, 1.528, 6.44, 19.372, 3.681, 19.162, 6.74, 4.937, 102.349, 4.776], [14.402, 5.013, 7.076, 2.989, 3.051, 7.065, 3.206, 7.046, 4.94, 3.199, 3.868, 9.937, 0.831, 122.835, 123.164, 7.081, 7.08, 2.656, 0.904, 4.658, 7.039, 9.919, 7.061, 1.592, 0.0, 18.28, 5.893, 124.192, 1.917, 14.596, 2.349, 7.043, 124.158, 7.146, 0.373, 0.732, 7.084, 7.039, 3.92, 5.953, 4.626, 4.797, 117.369, 0.063, 4.857, 20.93, 2.19, 20.73, 5.169, 3.516, 103.304, 3.665], [4.565, 14.835, 11.731, 20.615, 20.861, 11.718, 20.856, 11.75, 14.894, 20.881, 14.649, 8.356, 19.066, 141.019, 129.702, 11.735, 11.732, 20.504, 19.059, 14.491, 11.779, 8.562, 11.798, 19.767, 18.28, 0.0, 12.479, 142.464, 19.815, 32.639, 18.192, 11.766, 142.417, 11.651, 18.629, 18.708, 11.742, 11.738, 14.604, 12.467, 14.513, 14.217, 124.374, 18.336, 14.235, 37.577, 16.1, 37.553, 13.248, 14.834, 109.579, 15.307], [8.929, 4.59, 1.571, 8.147, 8.385, 1.534, 8.389, 1.537, 4.59, 8.412, 2.972, 4.132, 6.64, 128.544, 124.187, 1.586, 1.581, 8.025, 6.614, 3.747, 1.563, 4.568, 1.629, 7.313, 5.893, 12.479, 0.0, 129.996, 7.336, 20.174, 6.44, 1.552, 129.941, 1.606, 6.221, 6.254, 1.602, 1.509, 2.955, 0.202, 3.742, 3.484, 118.542, 5.944, 3.593, 25.634, 3.708, 25.534, 2.015, 2.381, 104.186, 2.943], [138.506, 128.139, 130.887, 121.905, 121.641, 130.892, 121.668, 130.864, 128.069, 121.639, 127.912, 134.109, 123.399, 9.821, 143.915, 130.886, 130.888, 121.987, 123.405, 128.276, 130.842, 134.06, 130.837, 122.697, 124.192, 142.464, 129.996, 0.0, 122.662, 109.96, 124.526, 130.852, 2.565, 130.966, 123.839, 123.756, 130.883, 130.87, 127.96, 130.019, 128.249, 128.519, 138.191, 124.134, 128.527, 106.898, 126.366, 106.676, 129.294, 127.632, 136.055, 127.244], [16.09, 6.931, 8.297, 1.078, 1.145, 8.295, 1.288, 8.271, 6.857, 1.282, 5.702, 11.461, 1.166, 121.218, 121.586, 8.298, 8.299, 0.749, 1.027, 6.565, 8.255, 11.608, 8.261, 0.606, 1.917, 19.815, 7.336, 122.662, 0.0, 12.88, 3.938, 8.262, 122.604, 8.373, 1.566, 1.233, 8.298, 8.272, 5.755, 7.357, 6.532, 6.691, 115.764, 1.855, 6.756, 19.021, 3.813, 18.828, 6.928, 4.99, 101.758, 4.659], [28.968, 19.208, 20.96, 12.027, 11.794, 20.967, 11.785, 20.938, 19.133, 11.762, 18.457, 24.307, 13.769, 108.382, 116.34, 20.958, 20.96, 12.164, 13.736, 19.097, 20.914, 24.483, 20.905, 13.021, 14.596, 32.639, 20.174, 109.96, 12.88, 0.0, 15.547, 20.925, 109.859, 21.04, 14.226, 14.054, 20.954, 20.946, 18.508, 20.177, 19.066, 19.287, 110.251, 14.535, 19.325, 8.647, 16.683, 8.121, 19.762, 17.862, 97.007, 17.338], [14.006, 3.686, 7.896, 4.978, 4.914, 7.871, 5.148, 7.862, 3.612, 5.124, 3.626, 10.056, 2.805, 123.351, 125.47, 7.906, 7.903, 4.56, 2.983, 3.751, 7.871, 9.637, 7.915, 3.394, 2.349, 18.192, 6.44, 124.526, 3.938, 15.547, 0.0, 7.868, 124.541, 7.952, 2.538, 3.01, 7.916, 7.845, 3.663, 6.565, 3.725, 4.003, 119.663, 2.385, 4.004, 22.486, 3.192, 22.233, 5.01, 4.379, 105.621, 5.159], [8.66, 6.131, 0.035, 8.951, 9.227, 0.064, 9.187, 0.028, 6.134, 9.217, 4.503, 3.685, 7.723, 129.301, 123.286, 0.038, 0.037, 8.896, 7.667, 5.295, 0.022, 4.681, 0.102, 8.333, 7.043, 11.766, 1.552, 130.852, 8.262, 20.925, 7.868, 0.0, 130.771, 0.115, 7.338, 7.292, 0.05, 0.067, 4.489, 1.375, 5.292, 5.028, 117.676, 7.087, 5.135, 25.95, 4.927, 25.89, 3.547, 3.583, 103.261, 3.616], [138.501, 128.168, 130.807, 121.832, 121.573, 130.812, 121.594, 130.784, 128.097, 121.566, 127.899, 134.06, 123.361, 7.296, 141.81, 130.805, 130.807, 121.923, 123.363, 128.287, 130.761, 134.044, 130.755, 122.652, 124.158, 142.417, 129.941, 2.565, 122.604, 109.859, 124.541, 130.771, 0.0, 130.886, 123.803, 123.71, 130.801, 130.79, 127.947, 129.96, 128.26, 128.527, 136.119, 124.1, 128.537, 106.607, 126.327, 106.4, 129.276, 127.582, 134.164, 127.168], [8.553, 6.17, 0.08, 9.065, 9.34, 0.082, 9.3, 0.102, 6.175, 9.331, 4.568, 3.578, 7.829, 129.416, 123.342, 0.086, 0.082, 9.009, 7.774, 5.34, 0.129, 4.593, 0.173, 8.442, 7.146, 11.651, 1.606, 130.966, 8.373, 21.04, 7.952, 0.115, 130.886, 0.0, 7.443, 7.4, 0.1, 0.108, 4.553, 1.435, 5.337, 5.07, 117.735, 7.191, 5.175, 26.062, 5.025, 26.002, 3.586, 3.68, 103.315, 3.728], [14.767, 5.366, 7.372, 2.643, 2.69, 7.362, 2.855, 7.342, 5.292, 2.846, 4.24, 10.282, 0.458, 122.474, 122.936, 7.376, 7.376, 2.297, 0.541, 5.026, 7.334, 10.283, 7.354, 1.219, 0.373, 18.629, 6.221, 123.839, 1.566, 14.226, 2.538, 7.338, 123.803, 7.443, 0.0, 0.473, 7.379, 7.337, 4.292, 6.275, 4.994, 5.167, 117.133, 0.309, 5.227, 20.566, 2.531, 20.363, 5.537, 3.84, 103.084, 3.9], [14.915, 5.725, 7.326, 2.284, 2.379, 7.32, 2.509, 7.298, 5.652, 2.507, 4.473, 10.352, 0.49, 122.354, 122.467, 7.329, 7.33, 1.982, 0.378, 5.335, 7.287, 10.429, 7.301, 1.06, 0.732, 18.708, 6.254, 123.756, 1.233, 14.054, 3.01, 7.292, 123.71, 7.4, 0.473, 0.0, 7.331, 7.295, 4.525, 6.293, 5.303, 5.458, 116.666, 0.676, 5.525, 20.252, 2.631, 20.061, 5.719, 3.877, 102.613, 3.769], [8.652, 6.18, 0.041, 8.983, 9.26, 0.09, 9.218, 0.068, 6.183, 9.249, 4.553, 3.677, 7.762, 129.328, 123.26, 0.022, 0.029, 8.93, 7.706, 5.345, 0.045, 4.689, 0.076, 8.371, 7.084, 11.742, 1.602, 130.883, 8.298, 20.954, 7.916, 0.05, 130.801, 0.1, 7.379, 7.331, 0.0, 0.105, 4.539, 1.425, 5.341, 5.078, 117.651, 7.129, 5.184, 25.965, 4.972, 25.906, 3.597, 3.628, 103.234, 3.649], [8.613, 6.081, 0.066, 8.967, 9.241, 0.026, 9.203, 0.039, 6.085, 9.233, 4.466, 3.639, 7.723, 129.323, 123.353, 0.084, 0.077, 8.909, 7.669, 5.248, 0.089, 4.621, 0.169, 8.338, 7.039, 11.738, 1.509, 130.87, 8.272, 20.946, 7.845, 0.067, 130.79, 0.108, 7.337, 7.295, 0.105, 0.0, 4.452, 1.335, 5.245, 4.98, 117.743, 7.084, 5.086, 25.989, 4.917, 25.927, 3.497, 3.571, 103.328, 3.63], [10.557, 2.047, 4.511, 6.768, 6.899, 4.477, 7.004, 4.477, 2.012, 7.008, 0.052, 6.398, 4.75, 126.675, 125.459, 4.525, 4.521, 6.504, 4.804, 1.217, 4.498, 6.101, 4.556, 5.509, 3.92, 14.604, 2.955, 127.96, 5.755, 18.508, 3.663, 4.489, 127.947, 4.553, 4.292, 4.525, 4.539, 4.452, 0.0, 3.117, 1.193, 1.147, 119.744, 3.982, 1.259, 24.734, 2.092, 24.56, 1.359, 1.866, 105.515, 3.234], [8.973, 4.783, 1.395, 8.15, 8.394, 1.36, 8.391, 1.361, 4.782, 8.415, 3.133, 4.134, 6.688, 128.552, 124.008, 1.409, 1.405, 8.038, 6.657, 3.935, 1.384, 4.645, 1.447, 7.351, 5.953, 12.467, 0.202, 130.019, 7.357, 20.177, 6.565, 1.375, 129.96, 1.435, 6.275, 6.293, 1.425, 1.335, 3.117, 0.0, 3.931, 3.676, 118.366, 6.003, 3.786, 25.577, 3.776, 25.482, 2.211, 2.436, 104.004, 2.901], [10.282, 0.89, 5.308, 7.586, 7.674, 5.269, 7.812, 5.275, 0.872, 7.81, 1.226, 6.562, 5.442, 127.053, 126.644, 5.324, 5.319, 7.278, 5.53, 0.032, 5.304, 5.951, 5.371, 6.203, 4.626, 14.513, 3.742, 128.249, 6.532, 19.066, 3.725, 5.292, 128.26, 5.337, 4.994, 5.303, 5.341, 5.245, 1.193, 3.931, 0.0, 0.311, 120.924, 4.69, 0.279, 25.554, 3.136, 25.356, 1.766, 3.058, 106.702, 4.427], [10.003, 1.107, 5.044, 7.735, 7.835, 5.003, 7.964, 5.011, 1.106, 7.964, 1.19, 6.251, 5.62, 127.31, 126.596, 5.06, 5.055, 7.439, 5.7, 0.299, 5.041, 5.655, 5.11, 6.382, 4.797, 14.217, 3.484, 128.519, 6.691, 19.287, 4.003, 5.028, 128.527, 5.07, 5.167, 5.458, 5.078, 4.98, 1.147, 3.676, 0.311, 0.0, 120.884, 4.861, 0.122, 25.71, 3.191, 25.52, 1.487, 2.977, 106.647, 4.351], [124.566, 121.696, 117.691, 114.791, 114.941, 117.738, 114.665, 117.705, 121.641, 114.701, 119.706, 120.562, 116.862, 129.516, 6.526, 117.673, 117.68, 115.235, 116.681, 120.951, 117.654, 122.09, 117.577, 116.346, 117.369, 124.374, 118.542, 138.191, 115.764, 110.251, 119.663, 117.676, 136.119, 117.735, 117.133, 116.666, 117.651, 117.743, 119.744, 118.366, 120.924, 120.884, 0.0, 117.323, 121.0, 101.764, 117.893, 102.372, 120.014, 117.917, 15.261, 116.548], [14.463, 5.076, 7.12, 2.927, 2.988, 7.11, 3.143, 7.09, 5.002, 3.137, 3.93, 9.992, 0.768, 122.776, 123.119, 7.125, 7.125, 2.593, 0.84, 4.722, 7.084, 9.979, 7.105, 1.528, 0.063, 18.336, 5.944, 124.134, 1.855, 14.535, 2.385, 7.087, 124.1, 7.191, 0.309, 0.676, 7.129, 7.084, 3.982, 6.003, 4.69, 4.861, 117.323, 0.0, 4.921, 20.866, 2.244, 20.666, 5.23, 3.566, 103.259, 3.697], [10.003, 0.996, 5.15, 7.804, 7.9, 5.109, 8.032, 5.117, 0.999, 8.032, 1.301, 6.301, 5.678, 127.327, 126.713, 5.166, 5.161, 7.504, 5.761, 0.256, 5.148, 5.673, 5.217, 6.44, 4.857, 14.235, 3.593, 128.527, 6.756, 19.325, 4.004, 5.135, 128.537, 5.175, 5.227, 5.525, 5.184, 5.086, 1.259, 3.786, 0.279, 0.122, 121.0, 4.921, 0.0, 25.778, 3.287, 25.584, 1.589, 3.096, 106.765, 4.47], [34.54, 25.911, 25.984, 17.976, 17.88, 26.005, 17.746, 25.97, 25.836, 17.746, 24.682, 29.604, 20.125, 104.608, 107.89, 25.976, 25.98, 18.276, 20.027, 25.586, 25.933, 30.191, 25.901, 19.372, 20.93, 37.577, 25.634, 106.898, 19.021, 8.647, 22.486, 25.95, 106.607, 26.062, 20.566, 20.252, 25.965, 25.989, 24.734, 25.577, 25.554, 25.71, 101.764, 20.866, 25.778, 0.0, 22.688, 0.679, 25.795, 23.601, 88.675, 22.694], [12.285, 3.821, 4.959, 4.768, 4.937, 4.944, 5.008, 4.928, 3.762, 5.019, 2.042, 7.75, 2.967, 124.983, 123.634, 4.966, 4.964, 4.55, 2.967, 3.167, 4.926, 7.8, 4.956, 3.681, 2.19, 16.1, 3.708, 126.366, 3.813, 16.683, 3.192, 4.927, 126.327, 5.025, 2.531, 2.631, 4.972, 4.917, 2.092, 3.776, 3.136, 3.191, 117.893, 2.244, 3.287, 22.688, 0.0, 22.526, 3.129, 1.349, 103.715, 1.985], [34.452, 25.693, 25.923, 17.793, 17.684, 25.943, 17.56, 25.909, 25.618, 17.558, 24.507, 29.531, 19.918, 104.445, 108.505, 25.916, 25.92, 18.08, 19.826, 25.389, 25.873, 30.081, 25.843, 19.162, 20.73, 37.553, 25.534, 106.676, 18.828, 8.121, 22.233, 25.89, 106.4, 26.002, 20.363, 20.061, 25.906, 25.927, 24.56, 25.482, 25.356, 25.52, 102.372, 20.666, 25.584, 0.679, 22.526, 0.0, 25.642, 23.468, 89.306, 22.591], [9.242, 2.583, 3.562, 7.896, 8.06, 3.521, 8.137, 3.529, 2.588, 8.147, 1.402, 5.045, 5.993, 127.986, 125.695, 3.579, 3.573, 7.67, 6.026, 1.766, 3.561, 4.768, 3.631, 6.74, 5.169, 13.248, 2.015, 129.294, 6.928, 19.762, 5.01, 3.547, 129.276, 3.586, 5.537, 5.719, 3.597, 3.497, 1.359, 2.211, 1.766, 1.487, 120.014, 5.23, 1.589, 25.795, 3.129, 25.642, 0.0, 2.297, 105.72, 3.559], [11.133, 3.908, 3.614, 5.856, 6.069, 3.598, 6.099, 3.582, 3.869, 6.118, 1.836, 6.478, 4.258, 126.208, 123.623, 3.621, 3.62, 5.698, 4.235, 3.082, 3.583, 6.669, 3.615, 4.937, 3.516, 14.834, 2.381, 127.632, 4.99, 17.862, 4.379, 3.583, 127.582, 3.68, 3.84, 3.877, 3.628, 3.571, 1.866, 2.436, 3.058, 2.977, 117.917, 3.566, 3.096, 23.601, 1.349, 23.468, 2.297, 0.0, 103.671, 1.373], [109.93, 107.495, 103.275, 100.813, 100.982, 103.322, 100.699, 103.29, 107.443, 100.738, 105.478, 106.045, 102.833, 127.961, 20.125, 103.256, 103.264, 101.26, 102.648, 106.729, 103.239, 107.603, 103.161, 102.349, 103.304, 109.579, 104.186, 136.055, 101.758, 97.007, 105.621, 103.261, 134.164, 103.315, 103.084, 102.613, 103.234, 103.328, 105.515, 104.004, 106.702, 106.647, 15.261, 103.259, 106.765, 88.675, 103.715, 89.306, 105.72, 103.671, 0.0, 102.299], [11.871, 5.266, 3.651, 5.338, 5.611, 3.652, 5.575, 3.626, 5.224, 5.604, 3.202, 7.019, 4.231, 125.72, 122.252, 3.651, 3.652, 5.281, 4.147, 4.451, 3.607, 7.498, 3.609, 4.776, 3.665, 15.307, 2.943, 127.244, 4.659, 17.338, 5.159, 3.616, 127.168, 3.728, 3.9, 3.769, 3.649, 3.63, 3.234, 2.901, 4.427, 4.351, 116.548, 3.697, 4.47, 22.694, 1.985, 22.591, 3.559, 1.373, 102.299, 0.0]]
iterations = {key:[] for key in path}
model = cp_model.CpModel()
# time slots|
mornings = []
afternoons = []
evenings = []
# xij= np.array([[model.NewBoolVar(f"{path[i]}_{path[j]}") for j in range(0,len(path))] for i in range(0,len(path)) ])
for i in range(len(path)):
morning = "morning_" + path[i]
evening = "evening_" + path[i]
afternoon = "afternoon_" + path[i]
morning = model.NewBoolVar(morning)
evening = model.NewBoolVar(evening)
afternoon = model.NewBoolVar(afternoon)
iterations[path[i]].extend([morning,afternoon,evening])
mornings.append(morning)
afternoons.append(afternoon)
evenings.append(evening)
# p = len(path)
# ui = []
# for i in range(p):
# ui.append(model.NewBoolVar(f"ui_{i}"))
# locations = []
# for loc in path:
# locations.append(model.NewBoolVar(loc))
days = list(zip(mornings,afternoons,evenings))
for day in days:
model.Add( day[0] + day[1] + day[2] == 1)
## cost for each poi
# variables = []
# for items in iterations.values():
# variables.extend([np.random.randint(0,10) * item for item in items])
# model.Add(sum(mornings) <= len(path)//2)
# model.Add(sum(afternoons) <= len(path)//2)
# model.Add(sum(evenings) <= len(path)//2)
# values = list(iterations.values())
# for val in values:
# for i in range(1,len(val),3):
# model.Add(val[i-1] <= val[i])
for i in range(1,len(mornings)):
model.Add(mornings[i - 1] + mornings[i] == 1)
for i in range(1,len(evenings),3):
model.Add(evenings[i - 1] + evenings[i] == 1)
#### tsp
# for i in range(len(path)):
# model.Add(sum([xij[i,j] for j in range(len(path)) if i != j]) == 1)
# for j in range(len(path)):
# model.Add(sum([xij[i,j] for i in range(len(path)) if i!=j]) == 1)
# for i in range(1,p):
# for j in range(1,p):
# if i != j and (i!=0 and j!=0):
# model.Add(ui[j] >= ui[i] + 1 - 2*p * (1-xij[i][j]))
pois = []
for i in range(len(cij)):
for day in days:
pois.extend([int(cij[i][0]) for d in day])
model.Maximize(sum(pois))
# model.Minimize(sum([xij[i][j] * np.random.randint(0,5) for i in range(len(path)) for j in range(len(path)) if i != j]))
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.OPTIMAL:
print('Maximum of objective function: %i' % solver.ObjectiveValue())
print()
for day in pois:
print(f'{day} value: ', solver.Value(day))
indices = []
for i in range(len(days)):
for j in range(len(days[i])):
if solver.Value(days[i][j]) == 1:
indices.append((i,j))
else:
print('no Optimal Solution')
data = np.array([[solver.Value(day[0]) for day in days],[solver.Value(day[1]) for day in days],[solver.Value(day[2]) for day in days]]).T
mux = pd.MultiIndex.from_arrays([path])
df = pd.DataFrame(data,columns=['morning',
'afternoon','evening'],index=mux)
df
morning_data = np.array([solver.Value(poi) for poi in mornings])
df_mornings = pd.DataFrame(morning_data,columns=['morning'])
df_mornings = df_mornings[df_mornings['morning'] == 1]
morning_data = [path[i] for i in df_mornings.index]
df_mornings = pd.DataFrame(morning_data,columns=['morning'])
df_mornings = df_mornings.reset_index()
df_mornings = df_mornings['morning']
afternoon_data = np.array([solver.Value(poi) for poi in afternoons])
df_afternoon = pd.DataFrame(afternoon_data,columns=['afternoon'])
df_afternoon = df_afternoon[df_afternoon['afternoon'] == 1]
afternoon_data = [path[i] for i in df_afternoon.index]
df_afternoon = pd.DataFrame(afternoon_data,columns=['afternoon'])
df_afternoon = df_afternoon.reset_index()
df_afternoon = df_afternoon['afternoon']
evening_data = np.array([solver.Value(poi) for poi in evenings])
df_evening = pd.DataFrame(evening_data,columns=['evening'])
df_evening = df_evening[df_evening['evening'] == 1]
evening_data = [path[i] for i in df_evening.index]
df_evening = pd.DataFrame(evening_data,columns=['evening'])
df_evening = df_evening['evening']
df_evening
df_poi = pd.DataFrame([df_mornings,df_afternoon,df_evening]).T
df_poi
import re
indices = []
df_poi.dropna(inplace=True)
for i,poi in df_poi.iterrows():
indices.append(int(re.match(r"([a-z]+)([0-9]+)",poi['morning']).groups()[1]))
indices.append(int(re.match(r"([a-z]+)([0-9]+)",poi['afternoon']).groups()[1]))
indices.append(int(re.match(r"([a-z]+)([0-9]+)",poi['evening']).groups()[1]))
# df_poi.apply(lambda x : indices.extend([int(re.match(r"([a-z]+)([0-9]+)",node).groups()[1]) for node in x]))
import matplotlib.pyplot as plt
def plot_path(path):
x_axes = [item.coordinate['lat'] for item in path]
y_axes = [item.coordinate['lon'] for item in path]
plt.figure(figsize=(3, 9))
plt.plot(x_axes, y_axes, 'o--')
plt.plot(x_axes[0], y_axes[0], 'go--', linewidth=2, markersize=12, color="r")
plt.plot(x_axes[-1], y_axes[-1], 'go--', linewidth=2, linestyle="dashed", markersize=12, color='r')
x = np.arange(39, 42, 0.5)
y = np.arange(28.75,29, 0.01)
plt.xticks(x)
plt.yticks(y)
for i, x in enumerate(x_axes):
plt.annotate(str(i) + path[i].item_type, (x_axes[i], y_axes[i]))
plt.show()
plot_path([items[i] for i in indices])
col_len = {}
for col in df_poi.columns:
col_len[col] = df_poi[col].count()
col_len
min_col = min(col_len,key=col_len.get)
max_col = max(col_len,key=col_len.get)
mean_val = int(np.mean(list(col_len.values())))
diff = col_len[max_col] - col_len[min_col]
values = df_poi[max_col][diff+1:]
# df_poi[min_col] = df_poi[min_col].fillna(values)
# df_poi[min_col] = df_poi[min_col].isna()
df_poi
na = df_poi.index[df_poi[min_col].isna()].tolist()
vals = list(values)
for row in df_poi[min_col].iloc[na]:
print(row)
df_poi[min_col] = df_poi[min_col].fillna(values)
df[max_col] = df_poi[max_col][:len(df[max_col]) - 1]
df_poi
morning_col = [f for f in df_poi['morning'].tolist() if not pd.isna(f)]
after_col = df_poi['afternoon'].tolist()
evening_col = df_poi['evening'].tolist()
all_col = morning_col + after_col + evening_col
df_poi.iloc[:-diff//2]
"""Simple travelling salesman problem between cities."""
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
import requests
import json
API_KEY = '5b3ce3597851110001cf624859a9e4cf86a3409abd7387ad2d5cac7a'
url = 'https://api.openrouteservice.org/v2/matrix/driving-car'
# coordinates = []
def create_data_model():
"""Stores the data for the problem."""
def get_distance(item1: Item, item2: Item):
cord1 = item1.coordinate
cord2 = item2.coordinate
tuple1 = (cord1['lat'], cord1['lon'])
tuple2 = (cord2['lat'], cord2['lon'])
return haversine(tuple1, tuple2)
global coordinates
data = {}
distance = []
for i in range(len(items)):
for j in range(len(items)):
coordinates.append(get_distance(items[i],items[j]))
# coordinates.append([items[i].coordinate['lon'],items[i].coordinate['lat']])
distance.append(coordinates)
coordinates = []
data['distance_matrix'] = distance
# body = {'locations': coordinates, 'metrics': ['distance'], 'units': 'km'}
# header = {'Authorization': API_KEY}
# try:
# response = requests.post(url=url, json=body, headers=header)
# if response.status_code == requests.codes.ok:
# data['distance_matrix'] = json.loads(response.text)['distances']
# except ValueError as err:
# print('distance matrix err: ', err)
data['num_vehicles'] = 1
data['depot'] = 0
return data
path = []
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {} miles'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route for vehicle 0:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
path.append(items[manager.IndexToNode(index)])
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Route distance: {}miles\n'.format(route_distance)
def main():
"""Entry point of the program."""
# Instantiate the data problem.
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(manager, routing, solution)
if __name__ == '__main__':
main()
path
plot_path(path)
import gmplot
attractions_lats, attractions_lngs = zip(*coordinates)
attractions_lats , attractions_lngs = attractions_lngs ,attractions_lats
apikey = 'AIzaSyCNvDXCZJSLvw9618045G3856O8x5EqeKw'
gmap = gmplot.GoogleMapPlotter(37.766956, 44, 14, apikey=apikey)
gmap.scatter(attractions_lats, attractions_lngs, color='#3B0B39', size=40, marker=False)
gmap.marker(37.770776, 44, color='cornflowerblue')
gmap.draw('map.html')
[print(f"{p.coordinate['lon']},{p.coordinate['lat']},") for p in path]
tst = [41.00449,28.852695,
40.972599,28.782539,
40.964237,28.799963,
40.961559,28.81106,
40.964157,28.826265,
40.979065,28.854675,
40.9756,28.857127,
40.974094,28.867846,
40.974113,28.868027,
40.977695,28.873381,
40.993099,28.886101,
40.995377,28.909706,
40.995335,28.909763,
41.006756,28.915522,
41.045597,28.896212,
41.005661,28.975103,
41.005661,28.975103,
41.007801,28.9431,
41.006027,28.922728,
41.004036,28.927195,
41.003357,28.928682,
40.996113,28.928612,
40.996471,28.921476,
40.993034,28.92271,
40.99279,28.922642]
[print(f"{tst[i-1]},{tst[i]},") for i in range(1,len(tst),2)]
tst=[Item("architecture",{"name":"","id":"Q7126259","coordinate":{'lat': 40.426304, 'lon': -3.690776},"guestrating":"7"}),
Item("historic",{"name":"","id":"Q7126259","coordinate":{'lat': 40.426304, 'lon': -3.690776},"guestrating":"7"}),
Item("religion",{"name":"","id":"N5033133939","coordinate":{'lat': 40.424965, 'lon': -3.69284},"guestrating":"7"}),
Item("historic",{"name":"","id":"N5033133939","coordinate":{'lat': 40.424965, 'lon': -3.69284},"guestrating":"7"}),
Item("religion",{"name":"","id":"W32897827","coordinate":{'lat': 40.424412, 'lon': -3.693882},"guestrating":"7"}),
Item("historic",{"name":"","id":"Q289693","coordinate":{'lat': 25.3575, 'lon': 55.391945},"guestrating":"7"}),
Item("architecture",{"name":"","id":"Q56275601","coordinate":{'lat': 24.382299, 'lon': 55.715698},"guestrating":"6"}),
Item("historic",{"name":"","id":"Q48969546","coordinate":{'lat': 25.522223, 'lon': 55.62611},"guestrating":"7"}),
Item("architecture",{"name":"","id":"N4477143891","coordinate":{'lat': 41.050713, 'lon': 29.011703},"guestrating":"7"}),
Item("historic",{"name":"","id":"N415157636","coordinate":{'lat': 41.005661, 'lon': 28.975103},"guestrating":"7"}),
Item("architecture",{"name":"","id":"W408822809","coordinate":{'lat': 40.428207, 'lon': 29.721098},"guestrating":"7"}),
Item("hotel",{"name":"","id":"230411","coordinate":{'lat': 25.249397, 'lon': 55.347909},"guestrating":"9.0"}),
Item("architecture",{"name":"","id":"Q3336839","coordinate":{'lat': 25.260555, 'lon': 55.314445},"guestrating":"3"}),
Item("religion",{"name":"","id":"W491182166","coordinate":{'lat': 25.265011, 'lon': 55.299423},"guestrating":"3"}),
Item("religion",{"name":"","id":"R7214752","coordinate":{'lat': 25.264427, 'lon': 55.296806},"guestrating":"3"}),
Item("hotel",{"name":"","id":"487455","coordinate":{'lat': 40.9756, 'lon': 28.857127},"guestrating":"8.0"}),
Item("historic",{"name":"","id":"Q6078807","coordinate":{'lat': 40.993099, 'lon': 28.886101},"guestrating":"3"}),
Item("religion",{"name":"","id":"N976474174","coordinate":{'lat': 41.00449, 'lon': 28.852695},"guestrating":"3"}),
Item("religion",{"name":"","id":"N4504319791","coordinate":{'lat': 41.006756, 'lon': 28.915522},"guestrating":"3"})]
[print(f"{tst[i].coordinate['lat']},{tst[i].coordinate['lon']},") for i in range(len(tst))]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/mani2106/Competition-Notebooks/blob/master/Hitachi_data_engg/Hitachi_Data_Engg_Data_Prep.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Import data and libraries
```
!unzip /content/d522aa9cf97211e9.zip
from pathlib import Path
import pandas as pd
DATA_PATH = Path('/content/DataSet')
train_data = pd.read_csv(DATA_PATH/'Train.csv', index_col=[0])
train_data.head()
```
### Look at the data
```
train_data.info()
```
### Missing data info
```
train_data.isna().sum()
columns_with_missing_vals = list((train_data.isna().sum()[lambda x: x>0].index))
columns_with_missing_vals
```
### Explore columns with missing variables
#### Country Code
```
train_data.country_code_destination.value_counts().plot.bar()
train_data.country_code_destination.value_counts()
```
### Plotting the Railway stations
```
import plotly.graph_objects as go
from sklearn.preprocessing import LabelEncoder
# Just for legend in the map
le = LabelEncoder()
train_data['target_enc'] = le.fit_transform(train_data['target'])
le_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))
le_name_mapping
high_data = train_data[train_data['target_enc'] == 0]
low_data = train_data[train_data['target_enc'] == 1]
med_data = train_data[train_data['target_enc'] == 2]
df_data = []
for df, l, c in zip([high_data, low_data, med_data], ["high", "low", "med"], ['red', 'blue', 'green']):
df_data.append(go.Scattergeo(
lon = df['longitude_source'],
lat = df['latitude_destination'],
mode = 'markers',
marker_color = c,
name=l
))
fig = go.Figure(data=df_data)
fig.update_layout(
title = 'Most trafficked Railway stations',
geo = dict(
scope='europe',
showland=True
)
)
fig.show()
```
### Look at rows with missing data
```
null_df = train_data[train_data.isnull()]
null_df.shape, train_data.shape
train_data.dropna(how='all').shape
```
This means that there no rows with all na values, The map made with coordinates makes it clear that the data is about railway stations across Belgium,Netherlands, France and the UK(maybe the City of London), there are stations data without coordinates which can be filled with data if only the names of stations were known which is not the case here.
### Data preprocessing
```
from sklearn.preprocessing import OneHotEncoder, RobustScaler, LabelEncoder
```
#### Missing values
As discussed before the missing data can be filled if we knew the names of the railway stations, especially the columns with **latitude**, **longitude** and **country codes**. So any imputation on other columns is useless, so let's drop the rows.
```
train_data.dropna(how='any', inplace=True)
```
#### Scaling and encoding data
```
# These need to be label encoded or one hot encoded
# columns with date time
# .filter(like=['current_date', 'current_time', 'country_code_source', 'country_code_destination'], axis=1)
object_col_df = train_data.select_dtypes(include=['object']).copy()
```
Peculiar columns include `current_date`, `current_time` which are datetime columns and they are represented as features in other columns eg. `current_year`, `current_week` so they can be safely removed from the dataset.
```
print(f"""One hot encoded Columns that will be created for some data columns
Number of trains: {len(object_col_df.train_name.unique())}
Number of stations: {pd.np.unique(object_col_df[['source_name', 'destination_name']].values.ravel()).shape[0]}
Number of countries: {pd.np.unique(object_col_df[['country_code_source', 'country_code_destination']].values.ravel()).shape[0]}""")
target = object_col_df['target']
object_col_df.head()
```
Form `datetime` column from the `current_date` and `current_time` and remove the same
```
object_col_df.loc[:, 'datetime'] = pd.to_datetime(object_col_df.loc[:, 'current_date']+ ' ' + object_col_df.loc[:, 'current_time'])
object_col_df.drop(columns=['current_date', 'current_time', 'current_day'], inplace=True)
object_col_df.head()
```
Get the date and time specific features from `datetime` column
```
import math
object_col_df['DT_M'] = object_col_df['datetime'].dt.month.astype(pd.np.int8)
object_col_df['DT_W'] = object_col_df['datetime'].dt.weekofyear.astype(pd.np.int8)
object_col_df['DT_D'] = object_col_df['datetime'].dt.dayofyear.astype(pd.np.int16)
object_col_df['DT_hour'] = object_col_df['datetime'].dt.hour.astype(pd.np.int8)
object_col_df['DT_day_week'] = object_col_df['datetime'].dt.dayofweek.astype(pd.np.int8)
object_col_df['DT_day_month'] = object_col_df['datetime'].dt.day.astype(pd.np.int8)
object_col_df['DT_week_month'] = object_col_df['datetime'].dt.day/7
object_col_df['DT_week_month'] = object_col_df['DT_week_month'].apply(lambda x: math.ceil(x)).astype(pd.np.int8)
object_col_df.drop(columns=['datetime', 'target'], inplace=True)
```
Filter again
```
date_feat = object_col_df.select_dtypes(exclude=['object']).copy()
object_col_df = object_col_df.select_dtypes(exclude=[pd.np.int8, pd.np.int16]).copy()
object_col_df.info()
```
Join date features with original dataframes
```
train_data = train_data.join(date_feat, how='outer')
```
Let's prep the data now.
```
ohe = OneHotEncoder(handle_unknown='error', drop='first')
```
###### Before
```
object_col_df.shape
```
###### After
```
ohe_arr = ohe.fit_transform(X=object_col_df)
ohe_arr.shape
```
As expected one hot encoding made too many columns so a **PCA** may be required.
##### Target Encoding
```
le = LabelEncoder()
target_enc = le.fit_transform(target)
le_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))
le_name_mapping
```
#### Filtering Scalable data
We have excluded the `date` features created above because they would lose meaning if they are scaled. we will also **exclude the coordinate columns** because we have the `source` and `destination` stations names, since there can only **one pair of coordinates** representing a station, we could confirm by checking the unique count of the respective columns.
```
# These need to be scaled
num_col_df = train_data.select_dtypes(include=['int', 'float']).copy()
assert len(num_col_df['latitude_source'].unique()) == len(train_data['source_name'].unique())
```
##### Scaling
```
num_col_df = num_col_df.filter(like="mean")
scaler = RobustScaler()
```
###### Before
```
num_col_df.describe()
```
###### After
```
scaled_num = scaler.fit_transform(num_col_df)
pd.DataFrame(scaled_num, columns=num_col_df.columns).describe()
scaled_num.shape
```
### Gather data and save preprocessing objects
```
unneeded_cols = [
'current_date', 'current_time', 'current_year', 'current_week', 'current_day', # Since we made date features with the datetime column
'latitude_destination', 'latitude_source', 'longitude_destination', 'longitude_source' # Since we have the station names
]
# Save everything in a dictionary
encoders_and_data = {
'target': le,
'ohe': ohe,
'ohe_arr': ohe_arr,
'scaler': scaler,
'scaled_num': scaled_num,
'unneeded_cols': unneeded_cols,
'date_feat': date_feat
}
import joblib
with open('encoders_and_data.joblib', 'wb') as f:
joblib.dump(encoders_and_data, f)
```
| github_jupyter |
```
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
res = requests.get("https://github.com/google")
soup = BeautifulSoup(res.content,"html.parser")
# Sayda sayısına erişim
pages = soup.find_all("div",class_ ="pagination")[0].find_all("a")
#print(pages)
a = str(pages[5])
pagess = (a[-6:-4])
#print(pagess)
a_name, a_desc, a_dil, a_star, a_fork, a_lisans = [],[],[],[],[],[]
for pagesnumber in range(1, int(pagess) + 1 ):
pagesrequest = requests.get("https://github.com/google?page="+ str(pagesnumber))
#print(pagesrequest)
source = pagesrequest.text
soup = BeautifulSoup(source, "html.parser")
repo_list = soup.find('div', attrs={"class": "repo-list"})
repos = repo_list.find_all('li')
Format = """
Isim: {name}
Aciklama: {desc}
DİL : {lang}
LİSANS:{lisans}
FORK: {fork}
STAR: {star}
"""
##FORK: {fork}
for repo in repos:
#repo simi
name = repo.find("a").text.strip()
desc = "Açıklama yok"
if repo.find('p'): ## Eğer açıklama varsa
desc = repo.find('p').text.strip()
try:
lang = repo.find("span",class_="mr-3").text.strip()
except AttributeError:
lang = "Dil yok"
if lang == "Apache-2.0":
lang = "Dil yok"
star = repo.find("a" ,class_="mr-3").next_sibling.next_sibling.text.strip()
fork = repo.find("a", class_="mr-3").text.strip()
try:
lisans = repo.find("span",class_="mr-3").next_sibling.next_sibling.text.strip()
except:
lisans = "Lisans yok"
if lisans.isdigit():
lisans = "Lisans yok"
elif lisans == "" :
lisans = "Lisans yok"
a_name.append(name)
a_desc.append(desc)
a_dil.append(lang)
a_star.append(star)
a_fork.append(fork)
a_lisans.append(lisans)
print(Format.format(name=a_name, desc=a_desc, lang=a_dil,star=a_star,fork=a_fork,lisans=a_lisans))
df = pd.DataFrame({'isim':a_name,'Aciklama':a_desc,'Dil':a_dil,'Star':a_star,'Fork':a_fork,'Lisans':a_lisans})
print (df.head())
# veri ayrıştırma işlemi bitti csv dosyasına kaydettik
df.to_csv('google-github-analiz.csv')
df = pd.read_csv("google-github-analiz.csv")
df.head()
df = df.drop(columns=["Unnamed: 0"])
df = df.drop(columns=["Unnamed: 0.1"])
df.head()
df.describe()
# Programlama dili olarak python
# Lisans olarak Apache-2.0
# eksik veri yok
df.isnull().sum()
# Kullanılan tüm diller
df['Dil'].unique()
import matplotlib.pyplot as plt
plt.figure(figsize=(16,6))
df['Dil'].value_counts().plot.bar()
#Dil olarak 50 kadardil yok gozukuyor 1666 adet te 50 cok onemlı bır oran değil
plt.figure(figsize=(15,12))
df['Dil'].value_counts().plot.pie(autopct='%9.0f%%')
plt.xlabel(" ",fontsize = 20)
plt.ylabel(" ", fontsize = 20)
plt.title("Google Github Repository Programlama Dilleri")
import matplotlib.pyplot as plt
plt.figure(figsize=(16,6))
df['Lisans'].value_counts().plot.bar()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ravi-prakash1907/Machine-Learning-for-Cyber-Security/blob/main/Classifiers/Naive%20Bayes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Naive Bayes Classifier
**Steps:**
1. Get the prediction labels' probability
2. Get the probability for all the dependent varieables (labels') probabilities for corresponding possibilities
3. Use Naive Bayes' Classifier
4. Predict based on the Maximum A Posteriori (_MAP_)
```
## libraries
import pandas as pd
import numpy as np
df = pd.read_csv('Datasets/stolenCars.csv')
df.head()
df.shape
df.describe()
```
---
### Naive Bayes' Classifier
_It takes the tuples for set of the:_
1. Dependent variables
2. Values of any datapoint
3. Name of cloumn i.e. to be predicted
```
## gives rows with given set of cols
def getOneLabel(df,label,predLabel):
tempDf = pd.read_csv('Datasets/stolenCars.csv')
x = [df.loc[i][label] in predLabel for i in df.index]
count = 0
ind = []
for val in x:
if not val:
ind.append(count)
count += 1
#print(ind)
tempDf.drop(ind, inplace=True)
return tempDf
## basic classifier
def naiveBayesianPredictor(df, labels, given, predCol, describe = False):
possibleCases = list(df[predCol].unique())
finalPredDescription = {}
for possibleCase in possibleCases:
tempDF = getOneLabel(df, predCol, [possibleCase])
preds = len(tempDF) / len(df)
for index in range(len(given)):
partialProb = tempDF[labels[index]].value_counts() / len(tempDF)
preds *= partialProb[given[index]] #Naive bayesian classifier
finalPredDescription[possibleCase] = preds
## converting predictions to pandas Series
finalPredDescription = pd.Series(finalPredDescription)
## choosing MAP (Maximum A Posteriori)
finalPred = str(finalPredDescription[finalPredDescription == max(finalPredDescription)].index[0])
if describe:
return finalPred,finalPredDescription
else:
return finalPred
## testing set
labels = ('Color','Type', 'Origin')
given = ('Red', 'SUV', 'Domestic')
predCol = 'Stolen?'
## prediction
finalPrediction = naiveBayesianPredictor(df, labels, given, predCol, describe=True)
probTable = pd.DataFrame([finalPrediction[1].index, finalPrediction[1].values], columns = ['Predicted Labels', 'Probability'])
## result
print("""Hence, as per final prediction: \nWill a car with specification {} be {}? : {} \n
Probability Table: """.format(given,
predCol,
finalPrediction[0]))
pd.DataFrame([finalPrediction[1]])
```
---
## Intermediate Probability Tables
```
def getProbTable(df,dependentCol,predCol = 'Stolen?'):
possibleCases = list(df[predCol].unique())
possibleCases # ['Yes', 'No']
sets = pd.DataFrame()
for possibleCase in possibleCases:
#possibleCase = 'Yes'
targetDF = getOneLabel(df, predCol, [possibleCase])
PcolGIVENcase = targetDF[dependentCol].value_counts()/len(targetDF)
header = "P("+dependentCol+"|"+possibleCase+")"
sets[header] = PcolGIVENcase
sets.append(PcolGIVENcase)
return sets
## printing intermediate tables (for all three dependent columns)
for col in df.columns[1:-1]:
print("\nProbability Table for",col,"-\n")
x = getProbTable(df,col)
print(x, "\n\n"+"-"*35+"\n")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/deepmipt/DeepPavlov/blob/master/examples/gobot_md_yaml_configs_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Use RASA DSL to Configure DeepPavlov's GO-Bot
At DeepPavlov, we support a variety of industry-wide and popular standards to support developing Conversational AI solutions.
DSLs, known as Domain-Specific Languages, provide a rich mechanism to define the behavior, or "the what", while
the underlying system uses the parser to transform these definitions into commands that implement this behavior, or "the how" using the system's components.
Until very recently we supported two such DSLs, including industry-standard [AIML](http://docs.deeppavlov.ai/en/master/features/skills/aiml_skill.html), as well as [DSL](http://docs.deeppavlov.ai/en/master/features/skills/dsl_skill.html) designed by one of our partners, EORA.
In this tutorial, you will learn how to use another industrial DSL, or, better said, set of DSLs, introduced by RASA.ai,
to build simple goal-oriented chatbots using DeepPavlov's GO-bot.
This is the very beginning of our work focused on supporting RASA DSLs as a way to configure DeepPavlov-based goal-oriented chatbots,
and therefore not all elements of the RASA DSLs are supported. It is also worth mentioning that in 0.12.0 release we want to focus on supporting tools
to define the domain logic behind the goal-oriented assistant, and files like `config.yml` and others are out of scope for this release.
To configure a DeepPavlov-based goal-oriented chatbot using these DSLs, you need to have at least three basic config files:
* `stories.md` (or `stories-{trn, tst, val}.md` but these are just subsamples)
* `nlu.md`
* `domain.yml`
These files allow you to define 3 key elements of the chatbot, including product-level stories, NLU training data, and your chatbot's domain.
## Concepts Behind Stories.md, NLU.md, and Domain.yml
### `stories.md`
`stories.md` is a mechanism used to teach your chatbot how to respond to user messages. It allows you to control your chatbot's dialog management.
These "stories" model real conversations between a user and a chatbot. This Markdown-based file is used to define a list of
*stories*, and each *story* can have a list of one or more *intents* with (optional) corresponding *slots*, where each *intent*
has one or more corresponding *actions* taken by the chatbot.
These actions, in general, can be anything, from simple message replies, to programmable actions that call APIs of other services.
*Note:* In this version, supported actions are limited to simple message replies.
In a way, it can be seen as a *dialogues dataset*.
*Note: Stories do not provide an ultimative instruction of how the bot should behave: it is up to the training process to infer the implicit underlying patterns controlling the dialogue.*
If you are looking for a way to make the bot follow the story templates strictly as defined,
there is a known hack:
the more times the model sees the training data, the better the model models the data,
so the the desired behavior is achieved when the accuracy on the training data is 1.
Such a situation is illustrated in section Basic Chatbot.
#### format
Stories file is a markdown file of the following format:
```markdown
## story_title(not used by algorithm, but useful to work with for humans)
* user_action_label{"1st_slot_present_in_action": "slot1_value", .., "Nth_slot_present_in_action": "slotN_value"}
- system_respective_utterance
* another_user_action_of_the_same_format
- another_system_response
...
## another_story_title
...
```
**See examples below in this tutorial**
### `nlu.md`
`nlu.md` represents an NLU model of your chatbot. It allows you to provide training examples that show how your chatbot should
understand user messages, and then train a model through these examples.
While DeepPavlov's GO-bot supports JSON-based DSTC-2 format for training data, this Markdown Format introduced by RASA is the easiest one for humans to read and write.
#### format
NLU file is a markdown file of the following format:
```markdown
## intent:possible_user_action_label_1
- An example of user text that has the possible_user_action_label_1 action label
- Another example of user text that has the possible_user_action_label_1 action label
...
## intent:possible_user_action_label_N
- An example of user text that has the (possible_user_action_label_N)[action_label] action label
<!-- Slotfilling dataset is provided as an inline markup of user texts -->
...
```
**See examples below in this tutorial**
### `domain.yml`
`domain.yml` helps you to define the universe your chatbot lives in: what user inputs it expects to get, what actions it should be able to predict,
how to respond, and what information to store.
This YML format is relatively simple, and it can be seen as a dictionary of all components of your chatbot, including but not limited to intents,
actions, responses, and other things.
#### format
Domain file is a YAML file of the following format:
```yaml
# slots section lists the possible slot names (aka slot types)
# that are used in the domain (i.e. relevant for bot's tasks)
# currently only type: text is supported
slots:
slot1_name:
type: text
...
slotN_name:
type: text
# entities list now follows the slots list 2nd level keys
# and is present to support upcoming features. Stay tuned for updates with this!
entities:
- slot1_name
...
- slotN_name
# intents section lists the intents that can appear in the stories
# being kept together they do describe the user-side part of go-bot's experience
intents:
- user_action_label
- another_user_action_of_the_same_format
...
# responses section lists the system response templates.
# Despite system response' titles being usually informative themselves
# (one could even find them more appropriate when no actual "Natural Language" is needed
# (e.g. for buttons actions in bot apps))
# It is though extremely useful to be able to serialize the response title to text.
# That's what this section content is needed for.
responses:
system_utterance_1:
- text: "The text that system responds with"
another_system_response:
- text: "Here some text again"
```
**See examples below in this tutorial**
## Basic Chatbot
Let's build the simplest chatbot possible.
This chatbot will be capable of processing three intents: *greeting*, *goodbye*, and *thanks*.
```
DP_MIN_DEMO_DIR = "dp_minimal_demo_dir" # we will work in this folder
import os
%cd /content
os.makedirs(DP_MIN_DEMO_DIR, exist_ok=True)
%cd {DP_MIN_DEMO_DIR}
```
### Stories.md: Basic Stories Example
`stories.md` is pretty straightforward in this case. In it you define 3 stories, each having its own intent and response (utterance).
Take into account the fact that you can combine all of these intents under one story, or add two intents to one story, and third to another one.
```
%%writefile stories.md
## greet
* greet
- utter_greet
## thank
* thank
- utter_noworries
## goodbye
* bye
- utter_bye
```
### nlu.md: Basic NLU Training Data Example
`nlu.md` has an NLU training data that enables DeepPavlov to recognize user phrases as belonging to one of the intents defined in `domain.yml`.
```
%%writefile nlu.md
## intent:greet
- Hi
- Hey
- Hi bot
- Hey bot
- Hello
- Good morning
- hi again
- hi folks
## intent:bye
- goodbye
- goodnight
- good bye
- good night
- see ya
- toodle-oo
- bye bye
- gotta go
- farewell
## intent:thank
- Thanks
- Thank you
- Thank you so much
- Thanks bot
- Thanks for that
- cheers
```
### domain.yml: Basic Domain Example
In this demo, `domain.yml` contains the list of:
* possible user action intents, and
* possible system response actions
*Note:* Entities and slots are omitted in this example. See the more sophisticated example below to see how they can be defined in the `domain.yml`.
```
%%writefile domain.yml
intents:
- greet
- bye
- thank
responses:
utter_noworries:
- text: No worries!
utter_greet:
- text: Hi
utter_bye:
- text: Bye!
```
The next step is to install the `deeppavlov` library.
```
!pip install git+https://github.com/deepmipt/DeepPavlov.git@feature/gobot-md-yaml-config
!python -m deeppavlov install gobot_simple_dstc2
```
Define the path to our DSL-based configuration files above (the folder we are in right now) and the folder used to store the trained bot.
```
from deeppavlov import configs
from deeppavlov.core.common.file import read_json
gobot_config = read_json(configs.go_bot.gobot_md_yaml_minimal)
gobot_config['metadata']['variables']['DATA_PATH'] = '.'
gobot_config['metadata']['variables']['MODEL_PATH'] = '.'
```
Since our data is basically the mock tutorial data we will use the same subsamples for all of the train (training set), test (test set) and valid (validation set) subsamples.
However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.
```
!cp stories.md stories-trn.md
!cp stories.md stories-tst.md
!cp stories.md stories-val.md
```
The next step is to train the bot:
```
from deeppavlov import train_model
train_model(gobot_config, download=True)
```
Finally, it's time to build our bot and experiment with it:
```
from deeppavlov import build_model
bot = build_model(gobot_config)
bot.reset()
bot(["start"])
bot(["Hi"])[0][0].actions_tuple
```
Our bot answers with "greeting" to our "greeting". How will it respond to some grateful message?
```
bot.reset()
bot(["start"])
bot(["Thanks!"])[0][0].actions_tuple
```
Ok, "no worries" is an expected response. Let's check if the "goodbye" user message is processed with the corresponding reply:
```
bot.reset()
bot(["start"])
bot_response_actions = bot(["bye"])[0][0].actions_tuple
import yaml
system_utter2text = yaml.load(open("domain.yml"))["responses"]
system_utter2text[bot_response_actions[0]][0]["text"]
```
## Advanced Chatbot: Building a Restaurants Bot inspired by the DSTC Schema-Guided Dialogue Dataset
```
DP_BIG_DEMO_DIR = "dp_big_demo_dir" # we'll work in this directory
import os
%cd /content
os.makedirs(DP_BIG_DEMO_DIR, exist_ok=True)
%cd {DP_BIG_DEMO_DIR}
```
While the previous demo was focused on figuring out how to work with the very simple goal-oriented chatbot, the reality of chatbots is rarely that simple.
Take, for example, the use case for restaurants. People can search for them, ask about the menus, or book tables. These activities require a substantially
more advanced configuration.
In the purpose of this more realistic demo, we decided to go through a rather unusual route. To simplify the process of defining the domain and behavior
of this chatbot, we took a famous industrial research dataset provided by the Dialogue Systems Technology Challenge known as DSTC, also known as [Schema Dataset](https://github.com/google-research-datasets/dstc8-schema-guided-dialogue).
This dataset contains a huge number of the annotated human-machine conversations
crowdsourced in an [M2M manner](https://arxiv.org/pdf/1801.04871.pdf) for various real-life scenarios and domains.
One of these domains is dedicated to *Restaurants*. In it, users are performing a variety of the goal-oriented tasks like searching for restaurants or booking tables via interaction with the bot.
Given the power and elegance of the DSTC format, we took liberty to use our internal **automatic conversion tool** to directly **transform** its data into the set of _stories.md_, _nlu.md_, _domain.yml_.
*Note: the dataset used is this demo is quite large. The dataset files listings are provided in form of file subset listings. Feel free to examine the files yourself.*
##### Download the data used in this tutorial section
```
# let's get the mentioned converted Schema-dataset subset
!wget http://files.deeppavlov.ai/datasets/schema_resto_md_yaml_v2.tar.gz
!tar -zxf schema_resto_md_yaml_v2.tar.gz
```
#### Technical Note: Automatic Conversion from DSTC Schema Format to RASA DSLs
Schema dataset is provided in DSTC (Dialogue State Tracking Challenge) [format](https://github.com/google-research-datasets/dstc8-schema-guided-dialogue).
The DSTC format has its own advantages: it is very detailed and allows for various additional info to be incorporated into the dataset itself.
In it, there are two major components - Schema Representation, and Dialog Representation. The first component is dedicated to describing sets of Intents, Slots, and Entities that are used by a given service through an API. The second component is focused on describing actual dialogs that happen between users and services. It also includes actual labels for the aforementioned Intents, Slots, and Entities defined in the Schema component.
However, while DSTC format is quite rich for building state-of-the-art systems that participate in the annual DSTC competitions, it takes a serious effort for the real-world developers to collect and annotate data using this format. In contrast, RASA DSLs we're illustrating here are quite different from the DSTC: they are meant to be neat and minimalistic, and to allow developers to define their domains from a rather scarce input information.
As mentioned in the beginning of this part of the tutorial, we've performed an automatical conversion of the Schema Restaurants dataset from the DSTC format to RASA DSLs.
#### Slot Filler
Any typical goal-oriented chatbot system uses a standard approach to define the way it works in the form of a pipeline. DeepPavlov's Go-Bot is quite permissive in which components
can be used in it's pipeline; however, Slot Filler is the required one.
Slot Filler, also known as slotfiller, is necessary to recognize and normalize slot-filling information provided in the user's utterances.
For example, when user says that she wants to "book a table in *London*", slotfiller's job is to recognize that *London* in this phrase represents the required slot `city`.
For the purposes of this demo, we are providing the pretrained slotfiller for the dataset. The small notebook on how the slotfiller was trained will be provided in one of the upcoming releases.
```
from deeppavlov import configs, train_model, build_model
from deeppavlov.core.common.file import read_json
!python -m deeppavlov download schema_resto_md_yaml/ner_config.json
!python -m deeppavlov download schema_resto_md_yaml/slotfiller_config.json
slotfill_config = read_json("schema_resto_md_yaml/slotfiller_config.json")
slotfiller = build_model(slotfill_config, download=True)
slotfiller(["i'm looking for a thai food somewhere in SFO"])
```
Seems OK. Let's save our slotfiller config to train and evaluate the restaurants bot, finally.
```
import json
json.dump(slotfill_config, open('slotfill_config.json', 'wt'))
```
#### Known Limitations
While slotfilling technology uses the power of the industry-standard Named Entity Recognition (NER) method to recognize key slots in the given phrases,
the quality of slot recognition can be substantially increased by combining this process with the data already known to the bot's developer.
For example, having a finite list of cities that are supported by a given end-user solution (e.g., several cities in the Greater Seattle Area for local restaurant chain)
aids slotfiller in a significant way. Typically, this information is stored in the database, though it may also be provided in the loose files like CSV (comma-separated values).
However, in order to focus on the support of the RASA DSLs, we made a conscious decision to omit the support of such data in this demo. An additional demo highligting usage of such data will be provided in one of the consequent releases.
Nevertheless our demo goal-oriented bot should still be able to generalize and use the global patterns in the conversations.
### Stories.md: Advanced Stories Example
`stories.md`
*Note: As said above, this file has been auto-generated from the DSTC schema.*
Like in the Basic Demo, Stories here define a variety of interactions between user and our bot.
```
STORIES_FPATH = "schema_resto_md_yaml/stories.md"
!echo "stories file size (lines): $(wc -l {STORIES_FPATH})"
!echo -e '\n\npart of stories file is listed below\n'
!head -500 {STORIES_FPATH} | tail -30
```
### nlu.md: Advanced NLU Training Data Example
`nlu.md`
*Note: As said above, this file has been auto-generated from the DSTC schema, and it's quite large. Below you can see only a part of this file. Feel free to examine the entire file.*
Like in the Basic Demo, `nlu.md` shows the examples of the user utterances for the supported intent classes.
The slotfilling and NER information is provided in the form of the inline mark-up.
```
NLU_FPATH = "schema_resto_md_yaml/nlu.md"
!echo "nlu file size (lines): $(wc -l {NLU_FPATH})"
!echo -e '\n\npart of nlu file is listed below\n'
!head -50 {NLU_FPATH} | tail -20
```
Let's take a closer look to some specific intent examples.
```
!grep --no-group-separator -m 10 -A 1 -P "(INFORM_Cuisine|INFORM_City)" {NLU_FPATH}
```
### domain.yml: Advanced Domain Example
`domain.yml`
*Note: As said above, this file has been auto-generated from the DSTC schema, and it's quite large. Below you can see only a part of this file. Feel free to examine the entire file.*
The domain file now provides the list of slots and entities as defined by the DSTC schema, as well as the supported intent classes and system response text templates.
```
DOMAIN_FPATH = "schema_resto_md_yaml/domain.yml"
!echo "domain file size (lines): $(wc -l {DOMAIN_FPATH})"
!echo -e '\n\nmost of domain file is listed below, just some portion of intents and response templates is skipped \n'
!head -40 {DOMAIN_FPATH} && echo "..."
!grep -B 1 -A 10 responses {DOMAIN_FPATH} && echo "..."
!grep --no-group-separator -A 1 OFFER_City: {DOMAIN_FPATH} && echo "..."
!grep --no-group-separator -A 1 CONFIRM_Time: {DOMAIN_FPATH} && echo "..."
```
Now that we have all three key files, like in the Basic Demo, we can now proceed with our bot's training.
```
from deeppavlov import configs
from deeppavlov.core.common.file import read_json
gobot_config = read_json(configs.go_bot.gobot_md_yaml_minimal)
gobot_config['chainer']['pipe'][-1]['slot_filler'] = {"config_path": "slotfill_config.json"}
gobot_config['metadata']['variables']['DATA_PATH'] = 'schema_resto_md_yaml'
gobot_config['metadata']['variables']['MODEL_PATH'] = '.'
```
Since our data is the tutorial data we will use the same subsamples for all of train (training set), test (test set), and valid (validation set) subsamples.
However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.
```
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-trn.md
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-tst.md
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-val.md
from deeppavlov import train_model
train_model(gobot_config, download=False);
bot = build_model(gobot_config)
```
Let's see whether the bot works at all:
```
bot.reset()
bot(["Hey!"])[0][0].actions_tuple
bot.reset()
```
Ok, let's have a conversation:
```
bot.reset()
bot(["Hi!"])[0][0].actions_tuple
```
Awesome. Seems like our bot performs well. Let's get some action!
```
bot(["I'd like to find a restaurant for this evening"])[0][0].actions_tuple
```
The bot replies with the request to provide the necessary information, and we give it back:
```
bot(["Somewhere in Oakland, sushi and for two people please"])[0][0].actions_tuple
```
And so on.
```
bot(["Cool! That's what I was looking for, thanks!"])[0][0].actions_tuple
```
Let's say goodbye to our bot
```
bot(["Bye bot"])[0][0].actions_tuple
```
While it'd be nice for it to reply "Good bye!", it didn't. Why?
Given that the DSTC dataset doesn't support this utterance, our bot can't properly react to such user's response. So to make our bot a bit more polite we have to add the -bye -bye utterances to the training data.
Notice that you will have to add it to all 3 files (note that in case of `domain.yml` you have to add one line to *intents* and another two to *responses* sections of the file):
#### stories.md
```
...
## goodbye
* bye
- utter_bye
...
```
#### nlu.md
```
...
## intent:bye
- goodbye
- goodnight
...
```
#### domain.yml
```
...
intents:
- bye
...
responses:
utter_bye:
- text: Bye!
...
```
You will also have to re-use our stories.md for train, test, and valid stories.md files. Again, for the purposes of this demo, we use the same files for stories.md.
However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.
```
!sed -i -e "s|^\s*$|\* bye\n - utter_BYE\n|g" {STORIES_FPATH} # add bye to each story
!echo -e " utter_BYE:\n - text: \"Bye!\"" >> {DOMAIN_FPATH} # add bye to nlu example
!grep -m 1 -A 3 bye ../dp_minimal_demo_dir/nlu.md >> {NLU_FPATH} && echo "" >> {NLU_FPATH} # add bye to response templates
!sed -ie "s|intents:|intents:\n - bye|g" {DOMAIN_FPATH} # add bye to domain intents
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-trn.md
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-tst.md
!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-val.md
```
Re-training our bot:
```
!rm -rf model # remove the previous trained model
from deeppavlov import train_model
train_model(gobot_config, download=False);
bot = build_model(gobot_config)
```
Checking it:
```
bot.reset()
bot(["Hi!"])[0][0].actions_tuple
bot(["I'd like to find a restaurant for this evening"])[0][0].actions_tuple
bot(["Somewhere in Oakland, sushi and for two people please"])[0][0].actions_tuple
bot(["Cool! That's what I was looking for, thanks!"])[0][0].actions_tuple
bot(["Bye bot"])[0][0].actions_tuple
```
## Comparing with RASA
Now that we've run through a couple of demos, let's make sure that our configs work the same way as they do using RASA framework.
```
%cd /content
!mkdir rasa_demo
%cd rasa_demo
```
Let's install the RASA library
```
!pip install rasa
!python -m spacy download en_core_web_md
!python -m spacy link en_core_web_md en
!mkdir data
!cp ../dp_big_demo_dir/schema_resto_md_yaml/{stories,nlu}.md data
!cp ../dp_big_demo_dir/schema_resto_md_yaml/domain.yml .
```
We'll also use some simple rasa environment config from their repo
```
!wget https://raw.githubusercontent.com/RasaHQ/rasa/1.10.x/examples/moodbot/config.yml
!rasa train
```
And when the bot is trained you can interact with it in the interactive mode:
```
!cat | rasa shell
```
Excellent. While this more Advanced demo uses the auto-generated set of stories, intents, slots, and entities, these files were successfully processed by RASA's framework, and RASA-based trained bot supports the same dialog flow.
## Final words
While DSTC dataset is quite rich and powerful, real-world use cases rarely if ever give bot developers a luxury of such well-annotated detailed dataset for their use cases.
By using the power of DSLs like RASA's, developers can significantly cut the time needed to design their bots from scratch.
In this tutorial, we showed you two demos that show how you can define the domain logic of your assistant using RASA DSLs and build your own bot using the power of the DeepPavlov Go-Bot ML-driven technology.
We encourage you to try it out, and we would love to hear your thoughts on it.
More in-depth examples of the ML-driven goal-oriented bots and their features are coming with the future releases, so stay tuned!
| github_jupyter |
# <div style="text-align: center">Top 5 Data Visualization Libraries Tutorial </div>
<img src='https://i1.wp.com/opensourceforu.com/wp-content/uploads/2017/01/Figure-1-Python-visualisation-libraries.jpg' height=600 width=700>
<div style="text-align:center">last update: <b>31/12/2018</b></div>
>###### You may be interested have a look at 10 Steps to Become a Data Scientist:
1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)
2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)
3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)
4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)
5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)
6. [Data visualization](https://www.kaggle.com/mjbahmani/top-5-data-visualization-libraries-tutorial)
7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)
8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)
9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)
10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)
---------------------------------------------------------------------
You can Fork and Run this kernel on Github:
> ###### [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)
-------------------------------------------------------------------------------------------------------------
**I hope you find this kernel helpful and some <font color="red"><b>UPVOTES</b></font> would be very much appreciated**
-----------
<div style="text-align: center"> <font color="blue"> a simple example you will learn in this notebook</font></div>
```
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
```
<a id="top"></a> <br>
## Notebook Content
1. [Introduction](#1)
1. [Loading Packages](#2)
1. [version](#21)
1. [Setup](#22)
1. [Data Collection](#23)
1. [Matplotlib](#3)
1. [Scatterplots](#31)
1. [ Line Plots](#32)
1. [Bar Charts](#33)
1. [Histograms](#34)
1. [Box and Whisker Plots](#35)
1. [Heatmaps](#36)
1. [Animations](#37)
1. [Interactivity](#38)
1. [DataFrame.plot](#39)
1. [Seaborn](#40)
1. [Seaborn Vs Matplotlib](#37)
1. [Useful Python Data Visualization Libraries](#38)
1. [Plotly](#60)
1. [New to Plotly?](#61)
1. [Plotly Offline from Command Line](#62)
1. [Bokeh](#63)
1. [networkx](#64)
1. [Read more](#39)
1. [Courses](#40)
1. [Ebooks](#41)
1. [Cheat sheet](#41)
1. [Conclusion](#39)
1. [References](#40)
<a id="1"></a> <br>
## 1- Introduction
If you've followed my other kernels so far. You have noticed that for those who are <b>beginners</b>, I've introduced a course "<b> <a href='https://www.kaggle.com/mjbahmani/10-steps-to-become-a-data-scientist' >10 Steps to Become a Data Scientist</a> </b>". In this kernel we will start another step with each other. There are plenty of <b>Kernels</b> that can help you learn <b>Python 's Libraries</b> from scratch but here in <b>Kaggle</b>, I want to Analysis <font color="green"><b>Meta Kaggle</b></font> a popular Dataset.
After reading, you can use it to Analysis other real dataset and use it as a template to deal with <b>ML</b> problems.
It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .
I am open to getting your feedback for improving this **kernel** together.
<a id="2"></a> <br>
## 2- Loading Packages
In this kernel we are using the following packages:
```
from matplotlib.figure import Figure
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
```
<a id="21"></a> <br>
## 2-1 version
```
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
```
<a id="22"></a> <br>
## 2-2 Setup
A few tiny adjustments for better **code readability**
```
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
```
<a id="23"></a> <br>
## 2-3 Data Collection
**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]
<img src='https://www.iaeemwc.com/assets/images/Blog/data-collection.jpg' height=300 width=300>
I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
```
# import kernels and users to play with it
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
```
**<< Note 1 >>**
* Each row is an observation (also known as : sample, example, instance, record)
* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate)
###### [Go to top](#top)
```
#command--> 2
users.sample(1)
```
### Please **replace** your username and find your userid
We suppose that userid==authoruserid and use userid for both kernels and users dataset
```
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
```
But if we had , we can just use **dropna()**(be careful sometimes you should not do this!)
```
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
```
<a id="231"></a> <br>
## 2-3-1 Features
Features can be from following types:
1. numeric
1. categorical
1. ordinal
1. datetime
1. coordinates
Find the type of features in **Meta Kaggle**?!
<br>
For getting some information about the dataset you can use **info()** command
###### [Go to top](#top)
```
#command--> 4
print(users.info())
```
<a id="232"></a> <br>
## 2-3-2 Explorer Dataset
1. Dimensions of the dataset.
1. Peek at the data itself.
1. Statistical summary of all attributes.
1. Breakdown of the data by the class variable.
Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects.
###### [Go to top](#top)
```
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
```
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property.
You see number of unique item for Species with command below:
```
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
```
To check the first 5 rows of the data set, we can use head(5).
```
kernels.head(5)
```
To check out last 5 row of the data set, we use tail() function
```
#command--> 9
users.tail()
```
To pop up 5 random rows from the data set, we can use **sample(5)** function
```
kernels.sample(5)
```
To give a statistical summary about the dataset, we can use **describe()**
```
kernels.describe()
```
<a id="235"></a> <br>
## 2-3-5 Find yourself in Users datset
```
#command--> 12
users[users['Id']==userid]
```
<a id="236"></a> <br>
## 2-3-6 Find your kernels in Kernels dataset
```
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
```
<a id="3"></a> <br>
## 3- Data Visualization Libraries
Before you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.
* 1- matplotlib
matplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.
* 2- Seaborn
Seaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.
* 3- ggplot
ggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.
* 4- Bokeh
Like ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.
* 5- pygal
Like Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.
* 6- Plotly
You might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.
* 7- geoplotlib
geoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.
* 8- Gleam
Gleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.
* 9- missingno
Dealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.
* 10- Leather
Leather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.
At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF.
* 11- Chartify
Chartify is a Python library that makes it easy for data scientists to create charts.
Why use Chartify?
1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.
1. Smart default styles: Create pretty charts with very little customization required.
1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.
1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.
Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python
![cheatsheet ][1]
[Reference][2]
[1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png
[2]: https://blog.modeanalytics.com/python-data-visualization-libraries/
<a id="4"></a> <br>
## 4- Matplotlib
This Matplotlib tutorial takes you through the basics Python data visualization: the anatomy of a plot, pyplot and pylab, and much more
###### [Go to top](#top)
You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands.
`%matplotlib notebook` provides an interactive environment.
We can use html cell magic to display the image.
```
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.3, 3.8, 1.2, 2.5], [11, 25, 9, 26], color='darkgreen', marker='^')
plt.xlim(0.5, 4.5)
plt.show()
```
Simple and powerful visualizations can be generated using the Matplotlib Python Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.
Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code.
<a id="41"></a> <br>
## 4-1 Scatterplots
```
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
```
<a id="52"></a> <br>
## 4-2 Line Plots
```
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
```
<a id="43"></a> <br>
## 4-3 Bar Charts
```
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
from random import randint
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
```
<a id="44"></a> <br>
## 4-4 Histograms
```
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
```
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
```
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
```
<a id="45"></a> <br>
## 4-5 Box and Whisker Plots
In descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
```
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
```
<a id="46"></a> <br>
## 4-6 Heatmaps
```
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
```
<a id="47"></a> <br>
## 4-7 Animations
```
import matplotlib.animation as animation
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
```
<a id="48"></a> <br>
## 4-8 Interactivity
```
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
```
<a id="49"></a> <br>
## 4-9 DataFrame.plot
```
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
```
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.
`kind` :
- `'line'` : line plot (default)
- `'bar'` : vertical bar plot
- `'barh'` : horizontal bar plot
- `'hist'` : histogram
- `'box'` : boxplot
- `'kde'` : Kernel Density Estimation plot
- `'density'` : same as 'kde'
- `'area'` : area plot
- `'pie'` : pie plot
- `'scatter'` : scatter plot
- `'hexbin'` : hexbin plot
###### [Go to top](#top)
```
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
```
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
```
df.plot.kde();
```
<a id="5"></a> <br>
# 5- Seaborn
As you have just read, **Seaborn** is complimentary to Matplotlib and it specifically targets statistical data visualization. But it goes even further than that: Seaborn extends Matplotlib and that’s why it can address the two biggest frustrations of working with Matplotlib. Or, as Michael Waskom says in the “introduction to Seaborn”: “If matplotlib “tries to make easy things easy and hard things possible”, seaborn tries to make a well-defined set of hard things easy too.”
One of these hard things or frustrations had to do with the default Matplotlib parameters. Seaborn works with different parameters, which undoubtedly speaks to those users that don’t use the default looks of the Matplotlib plots
Seaborn is a library for making statistical graphics in Python. It is built on top of matplotlib and closely integrated with pandas data structures.
Here is some of the functionality that seaborn offers:
A dataset-oriented API for examining relationships between multiple variables
Specialized support for using categorical variables to show observations or aggregate statistics
Options for visualizing univariate or bivariate distributions and for comparing them between subsets of data
Automatic estimation and plotting of linear regression models for different kinds dependent variables
Convenient views onto the overall structure of complex datasets
High-level abstractions for structuring multi-plot grids that let you easily build complex visualizations
Concise control over matplotlib figure styling with several built-in themes
Tools for choosing color palettes that faithfully reveal patterns in your data
Seaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots.
Here’s an example of what this means:
[Go to top](#top)
<a id="51"></a> <br>
## 5-1 Seaborn Vs Matplotlib
It is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”
Seaborn helps resolve the two major problems faced by Matplotlib; the problems are
* Default Matplotlib parameters
* Working with data frames
As Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.
Important Features of Seaborn
Seaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −
* Built in themes for styling matplotlib graphics
* Visualizing univariate and bivariate data
* Fitting in and visualizing linear regression models
* Plotting statistical time series data
* Seaborn works well with NumPy and Pandas data structures
* It comes with built in themes for styling Matplotlib graphics
In most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.
[Go to top](#top)
```
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
```
How many NA elements in every column.
<a id="52"></a> <br>
## 5-2 kdeplot
```
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
```
<a id="53"></a> <br>
## 5-3 jointplot
```
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
```
<a id="54"></a> <br>
## 5-4 andrews_curves
```
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
```
<a id="55"></a> <br>
## 5-5 Heatmap
```
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
```
<a id="56"></a> <br>
## 5-6 distplot
```
sns.distplot(yourkernels['TotalVotes']);
```
<a id="6"></a> <br>
## 6- Plotly
How to use **Plotly** offline inside IPython notebooks.
<a id="61"></a> <br>
## 6-1 New to Plotly?
Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.
[Go to top](#top)
```
# example for plotly
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from plotly import tools
from sklearn import datasets
import plotly.figure_factory as ff
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
```
<a id="62"></a> <br>
## 6-2 Plotly Offline from Command Line
You can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.
[Go to top](#top)
```
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
```
<a id="7"></a> <br>
# 7- Bokeh
**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.
Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:
[Go to top](#top)
```
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
```
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)
The basic steps to creating plots with the bokeh.plotting interface are:
Prepare some data
In this case plain python lists, but could also be NumPy arrays or Pandas series.
Tell Bokeh where to generate output
In this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.
Call figure()
This creates a plot with typical default options and easy customization of title, tools, and axes labels.
Add renderers
In this case, we use line() for our data, specifying visual customizations like colors, legends and widths.
Ask Bokeh to show() or save() the results.
These functions save the plot to an HTML file and optionally display it in a browser.
Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.
The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:
[Go to top](#top)
```
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
```
<a id="8"></a> <br>
# 8- NetworkX
NetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
```
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
```
<a id="9"></a> <br>
# 9- Read more
you can start to learn and review your knowledge about ML with a perfect dataset and try to learn and memorize the workflow for your journey in Data science world with read more sources, here I want to give some courses, e-books and cheatsheet:
<a id="91"></a> <br>
## 9-1 Courses
There are a lot of online courses that can help you develop your knowledge, here I have just listed some of them:
1. [Machine Learning Certification by Stanford University (Coursera)](https://www.coursera.org/learn/machine-learning/)
2. [Machine Learning A-Z™: Hands-On Python & R In Data Science (Udemy)](https://www.udemy.com/machinelearning/)
3. [Deep Learning Certification by Andrew Ng from deeplearning.ai (Coursera)](https://www.coursera.org/specializations/deep-learning)
4. [Python for Data Science and Machine Learning Bootcamp (Udemy)](Python for Data Science and Machine Learning Bootcamp (Udemy))
5. [Mathematics for Machine Learning by Imperial College London](https://www.coursera.org/specializations/mathematics-machine-learning)
6. [Deep Learning A-Z™: Hands-On Artificial Neural Networks](https://www.udemy.com/deeplearning/)
7. [Complete Guide to TensorFlow for Deep Learning Tutorial with Python](https://www.udemy.com/complete-guide-to-tensorflow-for-deep-learning-with-python/)
8. [Data Science and Machine Learning Tutorial with Python – Hands On](https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/)
9. [Machine Learning Certification by University of Washington](https://www.coursera.org/specializations/machine-learning)
10. [Data Science and Machine Learning Bootcamp with R](https://www.udemy.com/data-science-and-machine-learning-bootcamp-with-r/)
11. [Creative Applications of Deep Learning with TensorFlow](https://www.class-central.com/course/kadenze-creative-applications-of-deep-learning-with-tensorflow-6679)
12. [Neural Networks for Machine Learning](https://www.class-central.com/mooc/398/coursera-neural-networks-for-machine-learning)
13. [Practical Deep Learning For Coders, Part 1](https://www.class-central.com/mooc/7887/practical-deep-learning-for-coders-part-1)
14. [Machine Learning](https://www.cs.ox.ac.uk/teaching/courses/2014-2015/ml/index.html)
<a id="92"></a> <br>
## 9-2 Ebooks
So you love reading , here is **10 free machine learning books**
1. [Probability and Statistics for Programmers](http://www.greenteapress.com/thinkstats/)
2. [Bayesian Reasoning and Machine Learning](http://web4.cs.ucl.ac.uk/staff/D.Barber/textbook/091117.pdf)
2. [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/)
2. [Understanding Machine Learning](http://www.cs.huji.ac.il/~shais/UnderstandingMachineLearning/index.html)
2. [A Programmer’s Guide to Data Mining](http://guidetodatamining.com/)
2. [Mining of Massive Datasets](http://infolab.stanford.edu/~ullman/mmds/book.pdf)
2. [A Brief Introduction to Neural Networks](http://www.dkriesel.com/_media/science/neuronalenetze-en-zeta2-2col-dkrieselcom.pdf)
2. [Deep Learning](http://www.deeplearningbook.org/)
2. [Natural Language Processing with Python](https://www.researchgate.net/publication/220691633_Natural_Language_Processing_with_Python)
2. [Machine Learning Yearning](http://www.mlyearning.org/)
<a id="93"></a> <br>
## 9-3 Cheat Sheets
Data Science is an ever-growing field, there are numerous tools & techniques to remember. It is not possible for anyone to remember all the functions, operations and formulas of each concept. That’s why we have cheat sheets. But there are a plethora of cheat sheets available out there, choosing the right cheat sheet is a tough task. So, I decided to write this article.
Here I have selected the cheat sheets on the following criteria: comprehensiveness, clarity, and content [26]:
1. [Quick Guide to learn Python for Data Science ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Data-Science-in-Python.pdf)
1. [Python for Data Science Cheat sheet ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/beginners_python_cheat_sheet.pdf)
1. [Python For Data Science Cheat Sheet NumPy](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Numpy_Python_Cheat_Sheet.pdf)
1. [Exploratory Data Analysis in Python]()
1. [Data Exploration using Pandas in Python](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Data-Exploration-in-Python.pdf)
1. [Data Visualisation in Python](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/data-visualisation-infographics1.jpg)
1. [Python For Data Science Cheat Sheet Bokeh](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Python_Bokeh_Cheat_Sheet.pdf)
1. [Cheat Sheet: Scikit Learn ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Scikit-Learn-Infographic.pdf)
1. [MLalgorithms CheatSheet](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/MLalgorithms-.pdf)
1. [Probability Basics Cheat Sheet ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/probability_cheatsheet.pdf)
[Go to top](#top)
<a id="10"></a> <br>
# 10- conclusion
Some of the other popular data visualisation libraries in Python are Bokeh, Geoplotlib, Gleam, Missingno, Dash, Leather, Altair, among others. Python gives a lot of options to visualise data, it is important to identify the method best suited to your needs—from basic plotting to sophisticated and complicated statistical charts, and others. It many also depend on functionalities such as generating vector and interactive files to flexibility offered by these tools.
This kernel it is not completed yet! Following up!
[Go to top](#top)
You can follow me on:
> ###### [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)
--------------------------------------
**I hope you find this kernel helpful and some <font color="red"><b>UPVOTES</b></font> would be very much appreciated**
<a id="11"></a> <br>
# 11- References
1. [Coursera](https://www.coursera.org/specializations/data-science-python)
1. [GitHub](https://github.com/mjbahmani)
1. [analyticsindiamag](https://www.analyticsindiamag.com/top-5-best-data-visualisation-libraries-in-python/)
[Go to top](#top)
Go to first step: [**Course Home Page**](https://www.kaggle.com/mjbahmani/10-steps-to-become-a-data-scientist)
Go to next step : [**Titanic**](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)
| github_jupyter |
# NCS Experiments
© 2020 Nokia
Licensed under the BSD 3 Clause license
SPDX-License-Identifier: BSD-3-Clause
```
%load_ext autoreload
%autoreload 2
timeout = 3600 * 10 # 10 hours
from pathlib import Path
import json
import os
os.environ["snippets_collection"] = "so-ds-feb20"
os.environ["train_snippets_collection"] = "so-ds-feb20"
os.environ["valid_dataset"] = "so-ds-feb20-valid"
os.environ["test_dataset"] = "so-ds-feb20-test"
output_dir = Path("so-ds-feb20")
os.environ["snippets_collection"] = "conala-curated"
os.environ["train_snippets_collection"] = "conala-curated"
os.environ["valid_dataset"] = "conala-curated-0.5-test"
os.environ["test_dataset"] = "conala-curated-0.5-test"
output_dir = Path("conala")
os.environ["snippets_collection"] = "staqc-py-cleaned"
os.environ["train_snippets_collection"] = "staqc-py-cleaned"
os.environ["valid_dataset"] = "staqc-py-raw-valid"
os.environ["test_dataset"] = "staqc-py-raw-test"
output_dir = Path("staqc-py")
os.environ["output_dir"] = str(output_dir)
if not output_dir.exists():
output_dir.mkdir()
```
## Preprocessing hyperparameters
```
text_overrides_ = [{}, {"lemmatize": False}, {"remove_stop": False}] + 8 * [{}]
code_overrides_ = [{},
{"lemmatize": False},
{"remove_stop": False},
{"keep_comments": False},
{"identifier_types": ["call", "import"]}, # without other identifiers
{"identifier_types": [ "attribute", "argument", "keyword_argument", "generic", "import"]}, # without calls
{"identifier_types": [ "attribute", "argument", "keyword_argument", "generic", "call"]}, # without import
{"rstrip_numbers": False},
{"keep_loops": False},
{"keep_bin_ops": False},
{"case_split": False},
]
os.environ["fast_text_overrides"] = "{}"
os.environ.pop("zip_fn", None)
for i, (text_overrides, code_overrides) in enumerate(zip(text_overrides_, code_overrides_)):
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
output_base = str(output_dir/f"ncs_preprocess_{i}")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False, "identifier_types": ["call", "import"]}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
output_base = str(output_dir/f"original_ncs")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + variable names
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["fast_text_overrides"] = json.dumps({})
output_base = str(output_dir/f"original_ncs+varnames")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + zip fn
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False,"identifier_types": ["call", "import"]}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
os.environ["fast_text_overrides"] = json.dumps({})
output_base = str(output_dir/f"original_ncs+zipfn")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + epochs
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False, "identifier_types": ["call", "import"]}
fasttext_overrides = {"epoch": 30}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["fast_text_overrides"] = json.dumps(fasttext_overrides)
output_base = str(output_dir/f"original_ncs+epochs")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + window size
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False, "identifier_types": ["call", "import"]}
fasttext_overrides = {"ws": 20}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["fast_text_overrides"] = json.dumps(fasttext_overrides)
output_base = str(output_dir/f"original_ncs+ws")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + minCount
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False, "identifier_types": ["call", "import"]}
fasttext_overrides = {"minCount": 1}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["fast_text_overrides"] = json.dumps(fasttext_overrides)
output_base = str(output_dir/f"original_ncs+mincount")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Original ncs + minCount + epoch
```
text_overrides = {"lemmatize": False}
code_overrides = {"lemmatize": False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False, "identifier_types": ["call", "import"]}
fasttext_overrides = {"minCount": 1, "epoch": 30}
os.environ.pop("zip_fn", None)
os.environ["text_overrides"] = json.dumps(text_overrides)
os.environ["code_overrides"] = json.dumps(code_overrides)
os.environ["fast_text_overrides"] = json.dumps(fasttext_overrides)
output_base = str(output_dir/f"original_ncs+mincount")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
### Conclusions
## Fasttext hyperparameters
### Fasttext 1 (initial exploration)
```
os.environ["text_overrides"] = "{}"
os.environ["code_overrides"] = "{}"
os.environ.pop("zip_fn", None)
fast_text_overrides_ = [{"ws": 10}, {"ws": 20}, {"ws": 30}, {"dim": 50}, {"epoch": 10}, {"neg": 10}, {"t": 0.01},{"t": 0.001}, {"t": 0.00001}]
for i, fast_text_overrides in enumerate(fast_text_overrides_):
os.environ["fast_text_overrides"] = json.dumps(fast_text_overrides)
output_base = str(output_dir/f"fasttext_{i}")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
#### Observations:
- Increasing window size helps
- Increasing the number of epochs helps
- Increasing the number of negative samples helps
- Lowering the sampling threshold does not help
- Decreasing the embedding dimensionality does not help
Window size, number of epochs, and negative samples all increase the number of times an embedding is updated. The next step is to tune the number of epochs and then check if increasing the window and number of negative samples still helps
### Fasttext 2: epochs
```
os.environ["text_overrides"] = "{}"
os.environ["code_overrides"] = "{}"
os.environ.pop("zip_fn", None)
fast_text_overrides_ = [{"epoch": 15}, {"epoch": 20}, {"epoch": 25}, {"epoch": 30}, {"epoch": 40}, {"epoch": 50}]
for i, fast_text_overrides in enumerate(fast_text_overrides_):
os.environ["fast_text_overrides"] = json.dumps(fast_text_overrides)
output_base = str(output_dir/f"fasttext_2.{i}")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
#### Observations
Training for more than 30 epochs does not help.
### Fasttext 3: epochs and windows
```
os.environ["text_overrides"] = "{}"
os.environ["code_overrides"] = "{}"
os.environ.pop("zip_fn", None)
epochs = [30]
windows = [10, 15, 20, 25, 30, 35, 40]
fast_text_overrides_ = [{"epoch": epoch, "ws": ws} for epoch in epochs for ws in windows]
for i, fast_text_overrides in enumerate(fast_text_overrides_):
os.environ["fast_text_overrides"] = json.dumps(fast_text_overrides)
output_base = str(output_dir/f"fasttext_3.{i}")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
#### Observations
Increasing window size still helps a lot.
### Fasttext 4: mincount
```
os.environ["text_overrides"] = "{}"
os.environ["code_overrides"] = "{}"
os.environ["fast_text_overrides"] = json.dumps({"minCount": 1, "epoch": 30, "ws": 20})
os.environ["zip_fn"] = "zip_descr_end"
output_base = str(output_dir/f"fasttext_4")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Zip function
(How you combine code tokens and description tokens to a single fasttext *sentence/context*.)
```
os.environ["text_overrides"] = "{}"
os.environ["code_overrides"] = "{}"
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "ws": 20, "minCount": 1})
zip_fns = ["zip_descr_start_end", "zip_descr_middle_and_start_end", "zip_descr_middle", "zip_descr_end"]
for i, zip_fn in enumerate(zip_fns):
os.environ["zip_fn"] = zip_fn
output_base = str(output_dir/f"zip_fn.{i}")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
```
## Save best NCS hyperparam configuration
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False})
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "ws": 20, "dim":100, "minCount": 1})
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
os.environ["model_filename"] = str(output_dir/"best_ncs_embedder")
output_base = str(output_dir/f"best")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
## Best NCS ablation epoch
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False})
os.environ["fast_text_overrides"] = json.dumps({"ws": 20, "minCount": 1})
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
#os.environ["model_filename"] = "../trained_models/ncs-embedder-so.feb20"
output_base = str(output_dir/f"best-epoch")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
## Best NCS ablation variables
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False,"identifier_types": ["call", "import"] })
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "ws": 20, "minCount": 1})
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
#os.environ["model_filename"] = "../trained_models/ncs-embedder-so.feb20"
output_base = str(output_dir/f"best-variables")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
## Best NCS ablation zip fn
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False})
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "ws": 20, "minCount": 1})
os.environ.pop("zip_fn", None)
#os.environ["model_filename"] = "../trained_models/ncs-embedder-so.feb20"
output_base = str(output_dir/f"best-zip")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
## Best NCS ablation window size
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False})
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "minCount": 1})
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
#os.environ["model_filename"] = "../trained_models/ncs-embedder-so.feb20"
output_base = str(output_dir/f"best-ws")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
## Best NCS ablation minCount
```
os.environ["text_overrides"] = json.dumps({"lemmatize": False})
os.environ["code_overrides"] = json.dumps({"lemmatize":False, "keep_loops": False, "keep_bin_ops": False, "rstrip_numbers": False})
os.environ["fast_text_overrides"] = json.dumps({"epoch": 30, "ws": 20})
os.environ["zip_fn"] = "zip_descr_middle_and_start_end"
#os.environ["model_filename"] = "../trained_models/ncs-embedder-so.feb20"
output_base = str(output_dir/f"best-mincount")
!python -m nbconvert ncs.ipynb --execute --NbConvertApp.output_base=$output_base --ExecutePreprocessor.timeout=$timeout
os.environ["model_filename"] = ""
```
| github_jupyter |
# In this notebook a simple Q learner will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value). One initial attempt was made to train the Q-learner with multiple processes, but it was unsuccessful.
```
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent import Agent
from functools import partial
NUM_THREADS = 1
LOOKBACK = 252*2 + 28
STARTING_DAYS_AHEAD = 20
POSSIBLE_FRACTIONS = np.arange(0.0, 1.1, 0.1).round(decimals=3).tolist()
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
agents = [Agent(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.9999,
dyna_iterations=0,
name='Agent_{}'.format(i)) for i in index]
POSSIBLE_FRACTIONS
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
```
## Let's show the symbols data, to see how good the recommender has to be.
```
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 10
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
```
## Let's run the trained agent, with the test set
### First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality).
```
TEST_DAYS_AHEAD = 20
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
```
### And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
```
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
import pickle
with open('../../data/simple_q_learner_10_actions.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
```
| github_jupyter |
## Dependencies
```
# Tensorflow
import tensorflow as tf
print('Tested with TensorFLow 1.2.0')
print('Your TensorFlow version:', tf.__version__)
# Feeding function for enqueue data
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
# Rnn common functions
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
# Model builder
from tensorflow.python.estimator import model_fn as model_fn_lib
# Run an experiment
from tensorflow.contrib.learn.python.learn import learn_runner
# Helpers for data processing
import pandas as pd
import numpy as np
import argparse
import random
```
## Loading Data
First, we want to create our word vectors. For simplicity, we're going to be using a pretrained model.
As one of the biggest players in the ML game, Google was able to train a Word2Vec model on a massive Google News dataset that contained over 100 billion different words! From that model, Google [was able to create 3 million word vectors](https://code.google.com/archive/p/word2vec/#Pre-trained_word_and_phrase_vectors), each with a dimensionality of 300.
In an ideal scenario, we'd use those vectors, but since the word vectors matrix is quite large (3.6 GB!), we'll be using a much more manageable matrix that is trained using [GloVe](http://nlp.stanford.edu/projects/glove/), a similar word vector generation model. The matrix will contain 400,000 word vectors, each with a dimensionality of 50.
We're going to be importing two different data structures, one will be a Python list with the 400,000 words, and one will be a 400,000 x 50 dimensional embedding matrix that holds all of the word vector values.
```
# data from: http://ai.stanford.edu/~amaas/data/sentiment/
TRAIN_INPUT = 'data/train.csv'
TEST_INPUT = 'data/test.csv'
# data manually generated
MY_TEST_INPUT = 'data/mytest.csv'
# wordtovec
# https://nlp.stanford.edu/projects/glove/
# the matrix will contain 400,000 word vectors, each with a dimensionality of 50.
word_list = np.load('word_list.npy')
word_list = word_list.tolist() # originally loaded as numpy array
word_list = [word.decode('UTF-8') for word in word_list] # encode words as UTF-8
print('Loaded the word list, length:', len(word_list))
word_vector = np.load('word_vector.npy')
print ('Loaded the word vector, shape:', word_vector.shape)
```
We can also search our word list for a word like "baseball", and then access its corresponding vector through the embedding matrix.
```
baseball_index = word_list.index('baseball')
print('Example: baseball')
print(word_vector[baseball_index])
```
Now that we have our vectors, our first step is taking an input sentence and then constructing the its vector representation. Let's say that we have the input sentence "I thought the movie was incredible and inspiring". In order to get the word vectors, we can use Tensorflow's embedding lookup function. This function takes in two arguments, one for the embedding matrix (the wordVectors matrix in our case), and one for the ids of each of the words. The ids vector can be thought of as the integerized representation of the training set. This is basically just the row index of each of the words. Let's look at a quick example to make this concrete.
```
max_seq_length = 10 # maximum length of sentence
num_dims = 50 # dimensions for each word vector
first_sentence = np.zeros((max_seq_length), dtype='int32')
first_sentence[0] = word_list.index("i")
first_sentence[1] = word_list.index("thought")
first_sentence[2] = word_list.index("the")
first_sentence[3] = word_list.index("movie")
first_sentence[4] = word_list.index("was")
first_sentence[5] = word_list.index("incredible")
first_sentence[6] = word_list.index("and")
first_sentence[7] = word_list.index("inspiring")
# first_sentence[8] = 0
# first_sentence[9] = 0
print(first_sentence.shape)
print(first_sentence) # shows the row index for each word
```
###TODO### Insert image
The 10 x 50 output should contain the 50 dimensional word vectors for each of the 10 words in the sequence.
```
with tf.Session() as sess:
print(tf.nn.embedding_lookup(word_vector, first_sentence).eval().shape)
```
Before creating the ids matrix for the whole training set, let’s first take some time to visualize the type of data that we have. This will help us determine the best value for setting our maximum sequence length. In the previous example, we used a max length of 10, but this value is largely dependent on the inputs you have.
The training set we're going to use is the Imdb movie review dataset. This set has 25,000 movie reviews, with 12,500 positive reviews and 12,500 negative reviews. Each of the reviews is stored in a txt file that we need to parse through. The positive reviews are stored in one directory and the negative reviews are stored in another. The following piece of code will determine total and average number of words in each review.
```
from os import listdir
from os.path import isfile, join
positiveFiles = ['positiveReviews/' + f for f in listdir('positiveReviews/') if isfile(join('positiveReviews/', f))]
negativeFiles = ['negativeReviews/' + f for f in listdir('negativeReviews/') if isfile(join('negativeReviews/', f))]
numWords = []
for pf in positiveFiles:
with open(pf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Positive files finished')
for nf in negativeFiles:
with open(nf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Negative files finished')
numFiles = len(numWords)
print('The total number of files is', numFiles)
print('The total number of words in the files is', sum(numWords))
print('The average number of words in the files is', sum(numWords)/len(numWords))
```
We can also use the Matplot library to visualize this data in a histogram format.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(numWords, 50)
plt.xlabel('Sequence Length')
plt.ylabel('Frequency')
plt.axis([0, 1200, 0, 8000])
plt.show()
```
From the histogram as well as the average number of words per file, we can safely say that most reviews will fall under 250 words, which is the max sequence length value we will set.
```
max_seq_len = 250
```
### Data
```
ids_matrix = np.load('ids_matrix.npy').tolist()
```
## Parameters
```
# Parameters for training
STEPS = 100000
BATCH_SIZE = 64
# Parameters for data processing
REVIEW_KEY = 'review'
SEQUENCE_LENGTH_KEY = 'sequence_length'
```
## Separating train and test data
The training set we're going to use is the Imdb movie review dataset. This set has 25,000 movie reviews, with 12,500 positive reviews and 12,500 negative reviews.
Let's first give a positive label [1, 0] to the first 12500 reviews, and a negative label [0, 1] to the other reviews.
```
POSITIVE_REVIEWS = 12500
# copying sequences
data_sequences = [np.asarray(v, dtype=np.int32) for v in ids_matrix]
# generating labels
data_labels = [[1, 0] if i < POSITIVE_REVIEWS else [0, 1] for i in range(len(ids_matrix))]
# also creating a length column, this will be used by the Dynamic RNN
# see more about it here: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
data_length = [max_seq_len for i in range(len(ids_matrix))]
```
Then, let's shuffle the data and use 90% of the reviews for training and the other 10% for testing.
```
data = list(zip(data_sequences, data_labels, data_length))
random.shuffle(data) # shuffle
data = np.asarray(data)
# separating train and test data
limit = int(len(data) * 0.9)
train_data = data[:limit]
test_data = data[limit:]
```
### Verifying if the train and test data have enough positive and negative examples
```
LABEL_INDEX = 1
def _number_of_pos_labels(df):
pos_labels = 0
for value in df:
if value[LABEL_INDEX] == [1, 0]:
pos_labels += 1
return pos_labels
pos_labels_train = _number_of_pos_labels(train_data)
total_labels_train = len(train_data)
pos_labels_test = _number_of_pos_labels(test_data)
total_labels_test = len(test_data)
print('Total number of positive labels:', pos_labels_train + pos_labels_test)
print('Proportion of positive labels on the Train data:', pos_labels_train/total_labels_train)
print('Proportion of positive labels on the Test data:', pos_labels_test/total_labels_test)
```
## Input functions
```
def get_input_fn(df, batch_size, num_epochs=1, shuffle=True):
def input_fn():
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/data
sequences = np.asarray([v for v in df[:,0]], dtype=np.int32)
labels = np.asarray([v for v in df[:,1]], dtype=np.int32)
length = np.asarray(df[:,2], dtype=np.int32)
dataset = (
tf.contrib.data.Dataset.from_tensor_slices((sequences, labels, length)) # reading data from memory
.repeat(num_epochs) # repeat dataset the number of epochs
.batch(batch_size)
)
# for our "manual" test we don't want to shuffle the data
if shuffle:
dataset = dataset.shuffle(buffer_size=100000)
# create iterator
review, label, length = dataset.make_one_shot_iterator().get_next()
features = {
REVIEW_KEY: review,
SEQUENCE_LENGTH_KEY: length,
}
return features, label
return input_fn
features, label = get_input_fn(train_data, 2)()
with tf.Session() as sess:
items = sess.run(features)
print(items[REVIEW_KEY])
print
items = sess.run(features)
print(items[REVIEW_KEY])
print
train_input_fn = get_input_fn(train_data, BATCH_SIZE, None)
test_input_fn = get_input_fn(test_data, BATCH_SIZE)
```
## Creating the Estimator model
```
def get_model_fn(rnn_cell_sizes,
label_dimension,
dnn_layer_sizes=[],
optimizer='SGD',
learning_rate=0.01,
embed_dim=128):
def model_fn(features, labels, mode):
review = features[REVIEW_KEY]
sequence_length = tf.cast(features[SEQUENCE_LENGTH_KEY], tf.int32)
# Creating embedding
data = tf.Variable(tf.zeros([BATCH_SIZE, max_seq_len, 50]),dtype=tf.float32)
data = tf.nn.embedding_lookup(word_vector, review)
# Each RNN layer will consist of a LSTM cell
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in rnn_cell_sizes]
# Construct the layers
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# Runs the RNN model dynamically
# more about it at:
# https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
outputs, final_state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
# Slice to keep only the last cell of the RNN
last_activations = rnn_common.select_last_activations(outputs, sequence_length)
# Construct dense layers on top of the last cell of the RNN
for units in dnn_layer_sizes:
last_activations = tf.layers.dense(
last_activations, units, activation=tf.nn.relu)
# Final dense layer for prediction
predictions = tf.layers.dense(last_activations, label_dimension)
predictions_softmax = tf.nn.softmax(predictions)
loss = None
train_op = None
preds_op = {
'prediction': predictions_softmax,
'label': labels
}
eval_op = {
"accuracy": tf.metrics.accuracy(
tf.argmax(input=predictions_softmax, axis=1),
tf.argmax(input=labels, axis=1))
}
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.losses.softmax_cross_entropy(labels, predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer=optimizer,
learning_rate=learning_rate)
return model_fn_lib.EstimatorSpec(mode,
predictions=predictions_softmax,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_op)
return model_fn
model_fn = get_model_fn(rnn_cell_sizes=[64], # size of the hidden layers
label_dimension=2, # since are just 2 classes
dnn_layer_sizes=[128, 64], # size of units in the dense layers on top of the RNN
optimizer='Adam',
learning_rate=0.001,
embed_dim=512)
```
## Create and Run Experiment
```
# create experiment
def generate_experiment_fn():
"""
Create an experiment function given hyperparameters.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
"""
def _experiment_fn(run_config, hparams):
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=test_input_fn,
train_steps=STEPS
)
return _experiment_fn
# run experiment
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir='testing2'))
```
## Making Predictions
Let's generate our own sentence to see how the model classifies them.
```
def generate_data_row(sentence, label):
length = max_seq_length
sequence = np.zeros((length), dtype='int32')
for i, word in enumerate(sentence):
sequence[i] = word_list.index(word)
return sequence, label, length
data_sequences = [np.asarray(v, dtype=np.int32) for v in ids_matrix]
# generating labels
data_labels = [[1, 0] if i < POSITIVE_REVIEWS else [0, 1] for i in range(len(ids_matrix))]
# also creating a length column, this will be used by the Dynamic RNN
# see more about it here: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
data_length = [max_seq_len for i in range(len(ids_matrix))]
first_sentence[0] = word_list.index("i")
first_sentence[1] = word_list.index("thought")
first_sentence[2] = word_list.index("the")
first_sentence[3] = word_list.index("movie")
first_sentence[4] = word_list.index("was")
first_sentence[5] = word_list.index("incredible")
first_sentence[6] = word_list.index("and")
first_sentence[7] = word_list.index("inspiring")
# first_sentence[8] = 0
# first_sentence[9] = 0
print(first_sentence.shape)
print(first_sentence) # shows the row index for each word
preds = estimator.predict(input_fn=my_test_input_fn, as_iterable=True)
sentences = _get_csv_column(MY_TEST_INPUT, 'review')
print()
for p, s in zip(preds, sentences):
print('sentence:', s)
print('bad review:', p[0], 'good review:', p[1])
print('-' * 10)
```
| github_jupyter |
## Model testing and selection
> In this notebook we test various RNN models to predict a company's following day closing stock price.
At the end we select one model to train all of our stocks.
### 1) Import data_manager module and other libraries
The data_manager module has helper classes and methods for working with our stock data.
> The module has the following class that we will use in this notebook:
> ###### SimpleSequence -
Sequence class that creates input (x) and target (y) for RNN training or prediction,
based on given window size (x) and target (y) lengths.
The sequence is created from end of day normalized adjusted close stock pricess.
> ###### MultiSequence -
Sequence class that creates input (x) and target (y) for RNN training or prediction,
based on given window size (x) and target (y) lengths.
The sequence is created from three features i) end of day normalized adjusted close stock pricess
ii) log normal returns and iii) normalized MFI index.
We will also use a few other helper methods such as `'companies()'` and `'split_data()'` methods from data_manager module.
```
%load_ext autoreload
%aimport data_manager
%autoreload 1
from data_manager import *
```
### 2) Import company list
> Here we read a csv file and import a list of company trade symbols
```
#read list of companies from csv file
stocks = companies()
tickers = stocks.values.tolist()
#Select stock to perform tests
ticker = tickers[2][1]
print("Stock ticker selected for testing: {}".format(ticker))
```
### 3) RNN Models
> In this step we select four RNN models that we will train and evaluate how accurate they are on unseen data.
```
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Bidirectional
from keras.optimizers import RMSprop
def fixed_model(X,y, learn_rate):
"""
RNN model with one LSTM layer (output of 5) and 1 fully connected output tanh layer
Parameter
-----------
X: numpy array
input sequence data.
y: numpy array
target sequence data.
learn_rate: float
Neural network learning rate.
"""
model = Sequential()
model.add(LSTM(5,input_shape=(X.shape[1:])))
model.add(Dense(y.shape[1], activation='tanh'))
# compile the model
optimizer = RMSprop(lr=learn_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def dynamic_model(X,y, learn_rate):
"""
RNN model with one LSTM layer (output based on input sequence length) and 1 fully connected output tanh layer
Parameter
-----------
X: numpy array
input sequence data.
y: numpy array
target sequence data.
learn_rate: float
Neural network learning rate.
"""
model = Sequential()
model.add(LSTM(X.shape[1],input_shape=(X.shape[1:])))
model.add(Dense(y.shape[1], activation='tanh'))
# compile the model
optimizer = RMSprop(lr=learn_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def bidirectional_model(X,y, learn_rate):
"""
Bidirectional RNN model with one LSTM layer (output based on input sequence length),
one fully connected layer (output based on input sequence length)
and 1 fully connected output tanh layer
Parameter
-----------
X: numpy array
input sequence data.
y: numpy array
target sequence data.
learn_rate: float
Neural network learning rate.
"""
model = Sequential()
model.add(Bidirectional(LSTM(X.shape[1],return_sequences=False), input_shape=(X.shape[1:])))
model.add(Dense(X.shape[1]))
model.add(Dense(y.shape[1], activation='tanh'))
# compile the model
optimizer = RMSprop(lr=learn_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def stacked_model(X,y, learn_rate):
"""
Stacked RNN model with two LSTM layers and 1 fully connected output tanh layer.
First LSTM layer has output of 10 and the second has 5.
Parameter
-----------
X: numpy array
input sequence data.
y: numpy array
target sequence data.
learn_rate: float
Neural network learning rate.
"""
model = Sequential()
model.add(LSTM(10,return_sequences=True, input_shape=(X.shape[1:])))
model.add(LSTM(5))
model.add(Dense(y.shape[1], activation='tanh'))
# compile the model
optimizer = RMSprop(lr=learn_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
#Create list of our models for use by the testing function.
models =[]
models.append(("Fixed",fixed_model))
models.append(("Dynamic",dynamic_model))
models.append(("Bidirectional",bidirectional_model))
models.append(("Stacked",stacked_model))
```
### 4) Testing function
> Here we define a `'test_model()'` function to evaluate each RNN model.
```
from collections import OrderedDict
def test_model(ticker,epochs,models,seq,window_sizes):
"""
Function to test the performance of our RNN models
Parameter
-----------
stock: str
Compnay trade ticker.
epoch: int
Number of epochs to train RNN.
models: list of RNN model functions
Each item is a tuple where 1st item is string name of model
and the 2nd is a model function that accepts X,y and learn_rate paramenter.
seq: Data sequence object
Object that has input X and target y sequence data.
window_sizes: list
A list of different window size (sequence length X input) to test.
Returns:
---------
Returns an ordered dictionary with the result of the model testing as six list;
'Window Size', 'Sequence Name','Model Name',
'Training Error','Testing Error' and 'Param Count'.
"""
#test result data
sizes = []
#seq_name = []
model_name = []
train_errors = []
test_errors = []
param_count = []
for window_size in window_sizes:
print("\nWindow size: {}".format(window_size))
print('----------------')
for model_item in models:
seq_obj = seq[1](ticker,window_size,1)
X_train,y_train,X_test,y_test = split_data(seq_obj)
model = model_item[1](X_train,y_train,0.001)
# fit model!
model.fit(X_train, y_train, epochs=epochs, batch_size=50, verbose=0)
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
testing_error = model.evaluate(X_test, y_test, verbose=0)
msg = " > Model: {0:<15} Param count: {1:} \tTraining error: {2:.4f}\tTesting error: {3:.4f}"
print(msg.format(model_item[0],model.count_params(),training_error,testing_error))
#update result variables
param_count.append(model.count_params())
sizes.append(window_size)
#seq_name.append(seq[0])
model_name.append(model_item[0])
train_errors.append(float("{0:.4f}".format(training_error)))
test_errors.append(float("{0:.4f}".format( testing_error)))
table= OrderedDict()
table['Window Size'] = sizes
table['Sequence Name'] = [seq[0] for _ in range(len(sizes))]
table['Model Name'] = model_name
table['Ticker'] = [ticker for _ in range(len(sizes))]
table['Training Error'] = train_errors
table['Testing Error'] = test_errors
table['Param Count'] = param_count
return table
def update_test_table(*argv):
"""Updates a model testing table
"""
file_path = "./data/model_test.csv"
table = pd.read_csv(file_path)
tickers = set( table['Ticker'].values.tolist())
for item in argv:
#first check if already exist
check = item['Ticker'][0]
if check in tickers:
#drop items
idx = table[(table['Ticker']== check) & (table['Sequence Name']== item['Sequence Name'][0])].index
table = table.drop(idx)
#append current test
table = table.append(pd.DataFrame(item))
table = table.reset_index(drop=True)
table.to_csv(file_path, index = False)
def get_test_table():
"""Get testing table and returned as DataFrame
"""
file_path = "./data/model_test.csv"
return pd.read_csv(file_path)
```
### 5) Perform model testing
> We test each model using a one feature input sequence and a three feature input sequence of different sequence size or window size.
> * The first test uses the `'SimpleSequence()'` class from the `data_manager` to evaluate how well it performs with the four RNN
models. The `'SimpleSequence()'` is a one feature sequence based on normalized stock prices.
> * In the second test we use the `'MultiSequence()'` class from the `data_manager`. The `'MultiSequence()'` is a three normalize feature sequence; closing stock prices, log normal daily returns and MFI index.
> * The goals of the testing are to 1) decide which input sequence is better, 2) select the best performing window size and 3) choose the best RNN model that best captures the target variable.
```
seed = 7
np.random.seed(seed)
#Model testing variables
epochs =100
window_sizes =[5,7,10,20]
print("*** Simple Sequence Model Test for {} ***".format(ticker))
print("=" * 45)
seq_name = ('Simple',SimpleSequence)
test_1 = test_model(ticker,epochs,models,seq_name,window_sizes)
update_test_table(test_1)
print("*** Multi Sequence Model Test for {} ***".format(ticker))
print("=" * 45)
seq_name = ('Multi',MultiSequence)
test_2 = test_model(ticker,epochs,models,seq_name,window_sizes)
update_test_table(test_2)
```
### 6) Evaluate and summarize test results
```
#update and get model testing table
#table = update_test_table(test_1,test_2)
table = get_test_table()
```
#### Summarize model testing by sequence
```
pd.pivot_table(table, values=['Training Error','Testing Error'], index=['Sequence Name']
,aggfunc={'Training Error':np.mean, 'Testing Error':np.mean} )
```
#### Summarize model testing by Ticker symbol and window size
```
pd.pivot_table(table, values=['Training Error','Testing Error'], index=['Ticker','Window Size']
,aggfunc={'Training Error':np.mean, 'Testing Error':np.mean} )
```
#### Summarize model testing by sequence and window size
```
pd.pivot_table(table, values=['Training Error','Testing Error'], index=['Sequence Name','Window Size']
,aggfunc={'Training Error':np.mean, 'Testing Error':np.mean} )
```
#### Summarize model testing by RNN model
```
pd.pivot_table(table, values=['Training Error','Testing Error'], index=['Model Name']
,aggfunc={'Training Error':np.mean, 'Testing Error':np.mean} )
```
#### Summarize model testing by sequence and RNN model
```
pd.pivot_table(table, values=['Training Error','Testing Error'], index=['Sequence Name' ,'Model Name']
,aggfunc={'Training Error':np.mean, 'Testing Error':np.mean} )
```
#### Summarize model testing by model parameter count
```
pd.pivot_table(table, values='Param Count', index=['Sequence Name','Model Name'], columns=['Window Size'])
```
### Testing observations
* The multi sequence input performed better than the simple sequence input. This is evident since the training and testing errors are both smaller for the multi sequence.
* Not one particular window size captured the target variable the best.
* The dynamic and the bidirectional models performed the best as they have smallest training and testing errors.
* The model parameter count between the different models is negligible and we can perform our training on a cpu.
* All the models can probably get an improvement by adding a dropout layer since the testing error was larger than the training in every case. Further testing is needed to check if a higher epoch count can decrease the variance between training and testing error.
### Conclusion and model selection
Based on the model testing results we arrive at the following conclusions:
* We will use the multi sequence input since it better captures the target variable.
* Since no window size outperformed we will pass a list of Window sizes to our final model and return the best performing model.
* We choose the bidirectional model since its the best performing model.
### 7) Live model testing
* In in this section we define a live model which is the bidirectional model but with a dropout layer.
* We test the live model with different dropout and learning rates to uncover the optiomal rates.
* We use a window size of 10 at this point since we are only interested in finding the best learnng and drop out rates.
* We also perform a test to gage the optimal number of epochs
```
def live_model(X,y, learn_rate,dropout):
"""
RNN model with following layers:
1) one LSTM layer (output size based on X input sequence length)
2) Dropout (based on given dropout rate)
3) fully connected tanh output layer of 1
Parameter
-----------
X: numpy array
input sequence data.
y: numpy array
target sequence data.
learn_rate: float
Neural network learning rate.
dropout: float
Dropout rate.
"""
model = Sequential()
model.add(Bidirectional(LSTM(X.shape[1],return_sequences=False), input_shape=(X.shape[1:])))
model.add(Dense(X.shape[1]))
model.add(Dropout(dropout))
model.add(Dense(y.shape[1], activation='tanh'))
# compile the model
optimizer = RMSprop(lr=learn_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
%matplotlib inline
import matplotlib.pyplot as plt
window_size = 10
dropouts = [0.0,0.25,0.4,0.50]
learn_rates = [0.01,0.001,0.0001]
batch_size = 50
epochs_live = 100
def test_live(X_train,y_train,X_test,y_test):
best_model = None
lowest_test_error = 2.0
best_learn_rate = 0.0
best_dropout_rate = 0.0
for rate in learn_rates:
print("\nLearn rate: {0:.4f}".format(rate))
print('---------------------')
lengend = []
for dropout in dropouts:
model = live_model(X_train,y_train,rate,dropout)
history = model.fit(X_train, y_train, epochs=epochs_live, batch_size=batch_size, verbose=0)
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
testing_error = model.evaluate(X_test, y_test, verbose=0)
msg = " > Dropout: {0:.2f} Training error: {1:.4f}\tTesting error: {2:.4f}"
print(msg.format(dropout, training_error,testing_error))
#check if test error
if lowest_test_error > testing_error:
best_model = model
lowest_test_error = testing_error
best_learn_rate = rate
best_dropout_rate = dropout
#plot loss function
plt.plot(history.history['loss'])
lengend.append("Drop {0:.4f}".format(dropout))
plt.title("Learn rate {0:.4f}".format(rate))
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(lengend,loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
return (best_model,lowest_test_error,best_learn_rate,best_dropout_rate)
seq_obj = MultiSequence(ticker,window_size,1)
dataset = seq_obj.original_data
X_train,y_train,X_test,y_test = split_data(seq_obj)
print("*** Live Model Testing ***")
print("=" * 40)
results = test_live(X_train,y_train,X_test,y_test)
print("*** Best Live Model Summary***")
print("=" * 40)
print("Testing error: {0:.4f}".format(results[1]))
print("Best learning rate: {}".format(results[2]))
print("Best dropout rate: {}".format(results[3]))
```
### Learn rate and dropout testing results
> * Looking at testing results we can see that learn of 0.01 and 0.001 performed better than 0.0001.
> * The dropout rates of 0.0, 0.25 and 0.40 had the best results.
### Epoch testing
> We perform a test to try and find the optimal epoch count.
```
#get fourt tickers to perform out epoch test
ticker_epochs = [tickers[i][1] for i in range(4)]
window_size = 10
dropout_rate = 0.25
epochs_list = [50,100,200,500,1000]
batch_size = 50
learn_rate = 0.001
def test_epochs():
"""
"""
for symbol in ticker_epochs:
print("\nSymbol: {}".format(symbol))
print('---------------------')
seq_obj = MultiSequence(symbol,window_size,1)
X_train,y_train,X_test,y_test = split_data(seq_obj)
lowest_test_error = 2.0
best_epoch = 0
for epoch in epochs_list:
model = live_model(X_train,y_train,learn_rate,dropout_rate)
model.fit(X_train, y_train, epochs=epoch, batch_size=batch_size, verbose=0)
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
testing_error = model.evaluate(X_test, y_test, verbose=0)
msg = " > Epoch: {0:} \tTraining error: {1:.4f}\tTesting error: {2:.4f}"
print(msg.format(epoch, training_error,testing_error))
if lowest_test_error > testing_error:
lowest_test_error = testing_error
best_epoch = epoch
#print best epoch for symbol
print(" ==> Best epoch {0:} with testing error of {1:.4f}".format(best_epoch,lowest_test_error))
print("*** Epoch Model Testing ***")
print("=" * 40)
test_epochs()
```
### Epoch testing conclusion
> Our epoch testing finds that there is no optimal epoch count but that we should try 100 and 200 and then return the model that performs the best.
### Best model selection
> * Here we put together everything we learn from our testing to select model for a given ticker.
> * To select the best model for a ticker we define a function that accepts a list of window sizes, drop out rates, learn rates
and epoch.
> * We graph the model peformance versus original dataset.
```
%matplotlib inline
import matplotlib.pyplot as plt
ticker = tickers[0][1]
window_sizes = [5,7,10]
dropouts = [0.0,0.25,0.4]
learn_rates = [0.01,0.001]
epochs = [100,200,500]
batch_size = 50
def best_model(ticker, window_sizes, learn_rates, dropouts, epochs, batch_size):
"""
"""
#our best model variables
best_model = None
lowest_test_error = 2.0
best_training_error =0.0
best_learn_rate = 0.0
best_dropout_rate = 0.0
best_epoch = 0
best_window_size = 0
counter = 1
for window_size in window_sizes:
print("\nWindow size: {}".format(window_size))
print('---------------------')
#prepare our sequence data
seq_obj = MultiSequence(ticker,window_size,1)
X_train,y_train,X_test,y_test = split_data(seq_obj)
for rate in learn_rates:
for dropout in dropouts:
for epoch in epochs:
model = live_model(X_train,y_train,rate,dropout)
model.fit(X_train, y_train, epochs=epoch, batch_size=batch_size, verbose=0)
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
testing_error = model.evaluate(X_test, y_test, verbose=0)
msg = " > Learn rate: {0:.4f} Dropout: {1:.2f}"
msg += " Epoch: {2:} Training error: {3:.4f} Testing error: {4:.4f}"
msg = str(counter) + " " +msg.format(rate,dropout, epoch, training_error, testing_error)
print(msg)
#check if test error
if lowest_test_error > testing_error:
best_model = model
lowest_test_error = testing_error
best_learn_rate = rate
best_dropout_rate = dropout
best_epoch = epoch
best_training_error = training_error
best_window_size = window_size
#increase our print counter
counter += 1
best_dict ={}
best_dict["ticker"] = ticker
best_dict["model"] = best_model
best_dict["test_error"] = "{0:.4f}".format(lowest_test_error)
best_dict["learn_rate"] = best_learn_rate
best_dict["dropout"] = best_dropout_rate
best_dict["epoch"] = best_epoch
best_dict["train_error"] = "{0:.4f}".format(best_training_error)
best_dict["window_size"] = best_window_size
return best_dict
print("*** Best Model Selection for {} ***".format(ticker))
print("=" * 40)
results = best_model(ticker, window_sizes, learn_rates, dropouts, epochs, batch_size)
print("*** Best Model Selected Summary for {} ***".format(results["ticker"]))
print("=" * 40)
print("Window size: {}".format(results["window_size"]))
print("Train error: {}".format(results["train_error"]))
print("Testing error: {}".format(results["test_error"]))
print("Learning rate: {}".format(results["learn_rate"]))
print("Dropout rate: {}".format(results["dropout"]))
print("Epochs: {}".format(results["epoch"]))
seq_obj = MultiSequence(results["ticker"],results["window_size"],1)
dataset = seq_obj.original_data
X_train,y_train,X_test,y_test = split_data(seq_obj)
graph_prediction(results["model"], X_train,X_test,dataset,results["window_size"])
```
| github_jupyter |
```
# List all device
from tensorflow.python.client import device_lib
# print(device_lib.list_local_devices())
# Check available GPU
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
# The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
# Importing the libraries
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Reshape, Lambda, GRU
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.activations import softmax
from keras.optimizers import SGD
import math
import pickle
dataset = pd.read_csv("../data/dowjones/AMZN_2006-01-01_to_2018-01-01.csv", index_col='Date', parse_dates=['Date'])
dataset.head()
dataset = dataset[["Close", "Name"]]
dataset = dataset.pivot_table(values='Close', index=dataset.index, columns='Name', aggfunc='first')
dataset.head()
x_train = dataset.values[:750]
x_train.shape
x_test = dataset.values[510:1000]
x_test.shape
i = 0
timestep = 240
train_gen = TimeseriesGenerator(x_train, x_train,
length=timestep, sampling_rate=1,
batch_size=510)
test_gen = TimeseriesGenerator(x_test, x_test,
length=timestep, sampling_rate=1,
batch_size=250)
print(f"x train shape: {x_train.shape}")
print(f"x test shape: {x_test.shape}")
# x_train = train_gen[0][0]
# y_train = train_gen[0][1]
# y_test = test_gen[0][0]
# y_test = test_gen[0][1]
# Reshaping X_train for efficient modelling
# X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
# X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1))
# expected input data shape: (batch_size, timesteps, data_dim)
regressor = Sequential()
# regressor.add(LSTM(units=25, input_shape=(timestep, 1), dropout=0.1))
# regressor.add(GRU(units=25, return_sequences=True, input_shape=(timestep, 1), dropout=0.3, recurrent_dropout=0.3))
# regressor.add(GRU(25, return_sequences=True, dropout=0.3))
# regressor.add(GRU(25, dropout=0.3, recurrent_dropout=0.3))
regressor.add(LSTM(units=100, input_shape=(timestep, 1), dropout=0.1, recurrent_dropout=0.1))
# regressor.add(LSTM(25, return_sequences=True, dropout=0.1))
# regressor.add(LSTM(25, return_sequences=True, dropout=0.1))
# regressor.add(LSTM(50, dropout=0.3, recurrent_dropout=0.3))
# regressor.add(Dense(10,input_shape=(timestep, ), activation='relu'))
# regressor.add(Dense(100, activation='relu'))
# regressor.add(Dense(100, activation='relu'))
# regressor.add(Reshape((31, 2)))
# regressor.add(Lambda(lambda x: softmax(x, axis=-1)))
# regressor.add(Dense(2, activation='softmax'))
regressor.add(Dense(1, activation='relu'))
regressor.compile(loss='mean_squared_error',
optimizer='rmsprop',
metrics=['mean_squared_error'])
# regressor.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['accuracy'])
# regressor.fit(x, y, epochs=1000,batch_size=1000, validation_split=0.2, callbacks = [EarlyStopping(monitor='val_loss', mode='min', patience=20),
# ModelCheckpoint(filepath='../model/LSTM/best_model.h5', monitor='val_acc', save_best_only=True)])
regressor.fit_generator(train_gen, steps_per_epoch=len(train_gen),
epochs=1000, validation_data=test_gen,
callbacks=[
EarlyStopping(monitor='val_loss',
mode='min', patience=10),
ModelCheckpoint(filepath="../model/mymodel.h5",
monitor='val_acc',
save_best_only=True)])
predict = regressor.predict_generator(test_gen)
predict.shape
predict[:10]
result = x_test[-250:]
result.shape
result[:10]
```
| github_jupyter |
---
description: 'First things first: let''s start with a good model!'
---
# Models
Welcome to the "**Models**" tutorial of the "_From Zero to Hero_" series. In this notebook we will talk about the features offered by the `models` _Avalanche_ sub-module.
### Support for pytorch Modules
Every continual learning experiment needs a model to train incrementally. You can use any `torch.nn.Module`, even pretrained models. The `models` sub-module provides for you the most commonly used architectures in the CL literature.
You can use any model provided in the [Pytorch](https://pytorch.org/) official ecosystem models as well as the ones provided by [pytorchcv](https://pypi.org/project/pytorchcv/)!
```
!pip install git+https://github.com/ContinualAI/avalanche.git
from avalanche.models import SimpleCNN
from avalanche.models import SimpleMLP
from avalanche.models import SimpleMLP_TinyImageNet
from avalanche.models import MobilenetV1
model = SimpleCNN()
print(model)
```
## Dynamic Model Expansion
A continual learning model may change over time. As an example, a classifier may add new units for previously unseen classes, while progressive networks add a new set units after each experience. Avalanche provides `DynamicModule`s to support these use cases. `DynamicModule`s are `torch.nn.Module`s that provide an addition method, `adaptation`, that is used to update the model's architecture. The method takes a single argument, the data from the current experience.
For example, an IncrementalClassifier updates the number of output units:
```
from avalanche.benchmarks import SplitMNIST
from avalanche.models import IncrementalClassifier
benchmark = SplitMNIST(5, shuffle=False)
model = IncrementalClassifier(in_features=784)
print(model)
for exp in benchmark.train_stream:
model.adaptation(exp.dataset)
print(model)
```
As you can see, after each call to the `adaptation` method, the model adds 2 new units to account for the new classes. Notice that no learning occurs at this point since the method only modifies the model's architecture.
Keep in mind that when you use Avalanche strategies you don't have to call the adaptation yourself. Avalanche strategies automatically call the model's adaptation and update the optimizer to include the new parameters.
## Multi-Task models
Some models, such as multi-head classifiers, are designed to exploit task labels. In Avalanche, such models are implemented as `MultiTaskModule`s. These are dynamic models (since they need to be updated whenever they encounter a new task) that have an additional `task_labels` argument in their `forward` method. `task_labels` is a tensor with a task id for each sample.
```
from avalanche.benchmarks import SplitMNIST
from avalanche.models import MultiHeadClassifier
benchmark = SplitMNIST(5, shuffle=False, return_task_id=True)
model = MultiHeadClassifier(in_features=784)
print(model)
for exp in benchmark.train_stream:
model.adaptation(exp.dataset)
print(model)
```
When you use a `MultiHeadClassifier`, a new head is initialized whenever a new task is encountered. Avalanche strategies automatically recognizes multi-task modules and provide the task labels to them.
### How to define a multi-task Module
If you want to define a custom multi-task module you need to override two methods: `adaptation` (if needed), and `forward_single_task`. The `forward` method of the base class will split the mini-batch by task-id and provide single task mini-batches to `forward_single_task`.
```
from avalanche.models import MultiTaskModule
class CustomMTModule(MultiTaskModule):
def __init__(self, in_features, initial_out_features=2):
super().__init__()
def adaptation(self, dataset):
super().adaptation(dataset)
# your adaptation goes here
def forward_single_task(self, x, task_label):
# your forward goes here.
# task_label is a single integer
# the mini-batch is split by task-id inside the forward method.
pass
```
Alternatively, if you only want to convert a single-head model into a multi-head model, you can use the `as_multitask` wrapper, which converts the model for you.
```
from avalanche.models import as_multitask
model = SimpleCNN()
print(model)
mt_model = as_multitask(model, 'classifier')
print(mt_model)
```
## 🤝 Run it on Google Colab
You can run _this chapter_ and play with it on Google Colaboratory: [](https://colab.research.google.com/github/ContinualAI/avalanche/blob/master/notebooks/from-zero-to-hero-tutorial/02_models.ipynb)
| github_jupyter |
# ML Architecture

In this section we will build the componenets related to development environment.As shown in the figure we will work on:
1. Training the Model
2. Building Feature Extractor
3. Building APIs for connecting ML services to the world wide web.
# Environment Configuration
This module involves the list of things required to start our ML model deployment.
1. Github Account
2. Git bash terminal
3. Folking repository from [link]
4. Creating Virtual Environmnet
5. Installing Text Editor
Below are the series of steps that can be followed to configure them.
1. Create github account
2. Install Git bash terminal from [https://git-scm.com/downloads]
3. Go to Command propt and cofigure name and email_id
* git config --global user.name "your name"
* git config -- global user.email youremailaddress@x.com
<br>
Check on cmd to verify config by typing:-
* git config user.name
* git config user.email
4. Folk a repository
5. Opening Pull request to your repo instead of the original repo
* git remote set-url origin [link]
6. Create a branch
* git checkout -b test-branch-2
7. Do commit
* git commit --allow-empty -m "opening project"
8. Pull request
* git push origin test-branch-2
9. Creating virtual env---
Go into the required folder
* python -m venv deploy
---Check throug typing
* dir
10. Activate your Virtual Environment
* [env_name==deploy]\Scripts\activate
11. Deactivate your Virtual Environment
* deactivate
12. Installing requirement files
* pip install -r requirements.txt
13. Select any text editor
* Subime
* Vim
* Emac
* Pycharm
# Building Our Regression Package
## Directory

The above files can be categorized based on the task they intend to do.We can broadly divide our task in the following categories:
1. Package Building
2. Versioning and Logging
3. Preprocessing
4. Feature Engineering
5. Building ML Pipeline
6. Model training
7. Model Prediction
8. Utility Modules
## Saving the dataset for training and testing
1. Download the train.csv and test.csv files from the [Kaggle Competiton link](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data)
2. Save train.csv and test.csv files in the datasets folder of your directory
## Versioning and Logging of deployed model/package
Important for:
1. Reproducibility
Can store information about:
* Input data
* Time frmae in which predicitons were made
2. Clues for Debugging
3. Conduct audits to meet Regulatory Requirements on the predictions we are making.
**Version file**
<br>
The Version file contains the specified version in the format [Major.Mino.Patch] eg: 0.1.0.
<br>
**init.py**
<br>
To set the version by reading from a file
**Config/logging_config.py**
1. get_console_handler()<br>
for logging into a console
2. get_file_handler()<br>
for logging into a file<br>
3. Format to store the metadata which contains datetime , logging function etc
4. get_logger()<br>
For calling logger from different modules.
Check **data_management.py** file for usage
5. errors.py<br>
Custom errors to give us more specific erros at the time of logging.
Check **features.py** file for usage
## Creating and loading package
We will be taking top down approach where we will first install the built regression package and then will go in details on how can we build this package
1. requirements.txt<br>
List of libraries to be installed
2. setup .py<br>
List all the details about the package
**To build package**<br>
python packages\regression_model\setup.py sdist bdist_wheel
sdist - Source Distribution<br>
bdist_wheel - Wheel distribution
*Note-Modern pip uses wheel distribution*
Check the dist folder to find the created package

**To install package locally**<br>
pip install -e packages\regression_model

## Running training pipeline

**Running training pipeline**<br>
We can see the role of logger in the output

The pickle file is saved under the trained_models folder
## Preprocessing
**preprocessors.py**<br>
Lists all the preprocessing task involved in this model building exercise.
* Categorical Imputer
* Numerical Imputer
* Temporal Variable estimator
* RareLabel Categorical Encoder
* Categorical Encoder
* Drop Unnecessary Features
**validation.py**<br>
This module validate inputs and it's basically another layer of checking and safety to make sure that any values that come into our model all handled in a way that allows us to continue with prediction.
*Check predict module for usage*
## Feature Engineering
**1. features.py**<br>
List all the feature engineering work.Here we have just a single feature engineering task of log transformation.However this module can be far more complex than this example for eg:
* Accessing a database to pick precalclulated features.
* Third party API call to gather information .eg - Weather
* A separate model to generate features that will be an input feature to our current model.
The features can be a very complicated section of your application and it could indeed be imported as a totally separate package with its own versioning.
## ML Pipeline
One of the often reason for ML models to break in production is the reproducibility in offline and online environment.Hence, it is necessary to collate all the preprocessing tasks and create a pipeline that can be leverage both at the time of training and at the time of inference.
Current Pipeline does the following tasks:
1. Categorical Imputer
2. Numerical Imputer
3. Temporal Variable
4. Rare Label Encoder
5. Categorical Encoder
6. Log transform
7. Drop Features
7. MinMax Scalar
8. Model Selection
## Training
**train_pipeline.py**<br>
This module involves training the model leveraging all the modules defined above.
Major tasks performed in this module:
1. Load Dataset
2. Train-Test Split
3. Trasformation on Target Variable
4. Running Pipeline
5. Saving Pipeline
## Predict
## Utility modules
1. **Data Management.py**<br>
Lists all the utility functions required in model training.
2. **config.py**<br>
Configuration file with all the parameters
# Good practices
1. Use version control
2. Write tests ! Unit, Integration , Acceptance tests
3. Trunk based development and peer reviews
4. Understand your system dependencies
5. Use CI/CD
# REST API
Representational State Transfer(REST) Application Program Interface(API)
Serving our model using API has the following advantages:
1. Serve predictions on the fly to multiple clients ( websites,phone,API etc)
2. Separate model development from client facing layer
3. Combine multiple models at different API endpoints
3. Bring Scale .. by adding more instances of the API application behind any load balancer
We will build our API using the Flask microframework.
Alternatives to look for:
1. Django
2. Pyramid
3. Bottle
4. Tornada
5. API Star etc
Before going and building the API for our model let us undertsand what an API is???
### API
An API is an application programming interface. It is a set of rules that allow programs to talk to each other. The developer creates the API on the server and allows the client to talk to it.
### REST
REST determines how the API looks like.It is a set of rules that developers follow when they create their API.One of these rules states that you should be able to get a piece of data (called a **resource**) when you link to a specific URL.
Each URL is called a **request** while the data sent back to you is called a **response**.
**JSON (JavaScript Object Notation)** a common format for sending and requesting data through a REST API.
### REQUEST
request is made up of four things:
1. **The endpoint** -The endpoint (or route) is the url you request for.
1. **path** - determines the resource you’re requesting for. Think of it like an automatic answering machine that asks you to press 1 for a service, press 2 for another service, 3 for yet another service and so on.
2. **The method** -The method is the type of request you send to the server. You can choose from these five types below:
1. **GET** - Read a resource on a server
2. **POST** - Creates a new resource on a server
3. **PUT and PATCH** - Update a resource on a server
4. **DELETE** - Delete a resource from a server.
3. **The headers** - Headers are used to provide information to both the client and server.eg authentication and providing information about the body content
4. **The data (or body)** - The data (sometimes called “body” or “message”) contains information you want to be sent to the server
# Building Our API Package

## requirements.txt
**Note- Comment Neural Network and other unrequired packeages
Lists the package to build our API Package.
Run the requirements file<br>
pip install -r packages\ml_api\requirements.txt
Ignore the neural network package and error for now
## run.py
It is the entry point to start flask.
**create_app()** is defined under **api\app.py** which creates the flask api and the blueprint.Right now the blueprint is creating multiple endpoint which is defined under **api\controller.py**.
## controller.py
Lists the different endpoint made in this api.
1. Health
2. Version
1. Model Version
2. API Version
3. Regression Prediction
1. Get the json data
2. Validate the input format of the data
3. Make preictions
4. Send predctions , version , errors as json back to client
4. NN Prediction
## validation.py
List the schema details of the data and the validation methods required at the time of running validations.
## Run the flask api
**To test the running of flask webapp**
- cd packages
- cd ml_api
- set FLASK_APP=run.py
- python run.py

Flask app is running at 127.0.0.1:5000
Check the **health endpoint**
http://127.0.0.1.5000/health


## config.py
Similar to the config file for logging format and setup code used in regression package.
It has:
1. Loggers with right handlers
2. Config objects to set particular flask properties.<br>
See usage in app.py<br>
flask_app.config.from_object(config_object)

## tests
1. **tests/conftest.py**
Creating test fixtures which can then later be passed in an argument in tests where they return values.
They can be used to test any endpoint.
2. **tests/test_validation.py**
Test file for the validation of validation.py file
3. **tests/test_controller.py**
Tests different configured endpoints
1. Health endpoint
2. Version endpoint
3. Regression endpoint
4. NN endpoint
Lets test our ***health*** endpoint
**Note-Comment all other tests for now except health endpoint**<br>
tests/test_controller.py

Run - pytest packages/ml_api/tests

Under test controller you can find :
- test_prediction_endpoint_predictions
Do make a note that all the heavy lifting has been excluded from api and is present in the regression package.So the model package contains all the things to test aswell.In that way we are ruling out a scenario where we update our model but fail to update our test data in the api.
# FLASK Crash Course
1. @ -Decorators in flask used for defining endpoint or root. Here we are definining a health endpoint and we can access that using http **GET**.

2. Blueprint - They are like a code template that record operations to execute when registered on the application.

3. Register the template to the application

# Continuos Integration and Continuos Deployment

It talks about automating the stages of development
CI/CD pipeline in case of Machine Learning Model Deployment

## Prerequisite -Creating a Github repo
Create a git repository
Link your local Deployment codes to git repository
Steps:
1. Goto github and create a repo
2. Got to git terminal - > Deploymnet folder
3. Make this folder a git repo by typing
- git init
4. Commit the repo
- git add .
- git commit -m "update"
4. Setup remote instance
- Copy the https link from github repo
- git remote
- git remote add origin [link]
- git push origin master
## Cicle CI
We will be building our CI/CD pipeline on CircleCI - a CI/CD platform.
Features:
1. Hosted Platform i.e will rely on their servers
2. Easy github integration
3. Can take up 1 free project
Alternatives:
1. Jenkins
2. Travis CI
3. Bamboo
4. Gitlab CI
5. Team City
## Setup Circle CI
1. Login to Cicle CI using github account.
2. Add a project "Deployment" from your github repo.
# Reference
1. [Git CheatSheet](https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet)
2. [Git course](https://www.pluralsight.com/courses/code-school-git-real)
3. [Working with folks](https://stackoverflow.com/questions/25545613/how-can-i-push-to-my-fork-from-a-clone-of-the-original-repo)
4. [Testing](https://landing.google.com/sre/sre-book/chapters/testing-reliability/)
5. [Trunk based Development](https://trunkbaseddevelopment.com/)
6. Fluent Python
7. The Devops Handbook<BR>
**PACKAGING**
8. [Python Packaging](https://packaging.python.org/)
9. [Python Versioning](https://packaging.python.org/guides/single-sourcing-package-version/)
10. [ Python Logging](https://docs.python.org/3/library/logging.html)
9. [Python packaging and PyPI](https://www.youtube.com/watch?v=na0hQI5Ep5E)
10. [Setuptools documentation](https://setuptools.readthedocs.io/en/latest/)
11. [Wheel Documentation](https://wheel.readthedocs.io/en/stable/)
12. [Pytest Documentation](https://docs.pytest.org/en/latest/)<BR>
**REST API**
13. [REST API Principles](https://restfulapi.net/rest-architectural-constraints/)
14. [REST API Walkthrough](https://www.smashingmagazine.com/2018/01/understanding-using-rest-api/)
15. [Flask Tutorial](https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world)
16. [Web Frameworks](https://github.com/vinta/awesome-python#web-frameworks)
| github_jupyter |
# PRMT-2183 Is it worth using Attachment MIDs in the pipeline to categorise transfers?
## Hypothesis
**We believe that** we should use attachment MID data to more accurately classify transfers.
**We will know this to be true** when we see a significant number of transfers that are currently understood to be "pending" be re-categorised into either "pending integration" or "still transfering"
Alternatively: how many transfers actually get stuck waiting for COPC large message fragments?
## Approach
Take a sample of pending transfers that include some number of large attachment COPC fragments, all of which are acknowledged.
Measure how many of these gp2gp conversations appear to have sent all COPC messages that are referenced as attachment MIDS in the core EHR message.
See: https://gpitbjss.atlassian.net/wiki/spaces/TW/pages/2552529087/EHR+structure+in+GP2GP
As the data we are analysing is "in the past" if a conversation is still pending, then it was not a successful outcome for the patient.
To simplify this analysis we will use only transfers with no duplicate core EHR (error code 12).
So, using spine data we will calculate:
1. Number of Conversations
2. Number of Conversations without any error code 12
3. Number of above which have core extract received, but do not have final ack message (transfers that would not be categorised as anything else under our new status proposal).
4. Number of above that have COPC messages according to Attachment MIDs
5. Number of above where all COPCs sent are acknowledged
6. Number of the above for which the number of COPC sent is less than the number in Attachment MID
These numbers correspond roughly to this categorisation hierarchy:
```
Looking back at collection of transfers, we could categorise them as follows
-> A - Completed (successfully or otherwise)
-> B - Not Completed (aka "pending")
-> C - EHR was not sent or request was not acknowledged
-> D - EHR was sent (This is more or less our current level of determination)
These next categorisations are viable using the attachment dataset:
-> E - Those where all the attachment fragments were sent (sending complete)
-> F - Those where some attachment fragments were not sent (sending incomplete)
-> G - ???
```
Two ways of interpreting the impact of the enhancement:
1. We can now correctly categorise cases where some COPC were not sent and it got stuck - size of category F
This would be calculated by taking (6) as a percentage of (2).
2. By the process of elimination, we could potentially categorise anything where the EHR was sent, but the final ack is missing into "stuck" or "not stuck", depending on if we are waiting for the transfer to complete or not. - size of category D
This would only be feasible, if there is not some other significant variation within category D (category G). E.g what if attachment fragments are sent but not acked.
This would be calculated by taking (3) as a percentage of (2).
```
import pandas as pd
# Raw gp2gp spine data
gp2gp_spine_data_files = [
"s3://prm-gp2gp-data-sandbox-dev/spine-gp2gp-data/Mar-2021.csv.gz",
"s3://prm-gp2gp-data-sandbox-dev/spine-gp2gp-data/Apr-2021.csv.gz"
]
gp2gp_spine = pd.concat((
pd.read_csv(f, parse_dates=["_time"])
for f in gp2gp_spine_data_files
))
interaction_name_mapping = {
"urn:nhs:names:services:gp2gp/RCMR_IN010000UK05": "req start",
"urn:nhs:names:services:gp2gp/RCMR_IN030000UK06": "req complete",
"urn:nhs:names:services:gp2gp/COPC_IN000001UK01": "COPC",
"urn:nhs:names:services:gp2gp/MCCI_IN010000UK13": "ack"
}
gp2gp_spine['interaction_name']=gp2gp_spine['interactionID'].replace(interaction_name_mapping)
gp2gp_spine = gp2gp_spine.drop_duplicates()
```
Splunk query used to extract the attachment mid data:
```sql
index="spine2vfmmonitor" logReference="MPS0208" attachmentType="mid"
| table _time, attachmentID, conversationID
```
```
attachment_mids_folder="s3://prm-gp2gp-data-sandbox-dev/43-PRMT-2167-attachment-mids/"
attachment_mids_files=["attachment_mids_april_2021.csv","attachment_mids_march_2021.csv"]
attachment_mids=pd.concat([pd.read_csv(attachment_mids_folder + file) for file in attachment_mids_files])
# 1. Filtering for conversations that have started within the dataset
all_messages = gp2gp_spine.copy()
conversation_ids_with_req_start = all_messages.loc[all_messages['interaction_name']=='req start','conversationID'].unique()
messages_from_started_conversations = all_messages[all_messages["conversationID"].isin(conversation_ids_with_req_start)]
print(f"Total number of conversations: {conversation_ids_with_req_start.shape[0]}")
# 2. Filtering for conversations that do not have error code 12
is_message_with_error_code_12 = messages_from_started_conversations['jdiEvent']=='12'
conversation_ids_with_error_code_12 = messages_from_started_conversations.loc[is_message_with_error_code_12,'conversationID'].unique()
conversation_ids_without_error_code_12 = list(set(messages_from_started_conversations['conversationID']) - set(conversation_ids_with_error_code_12))
messages_from_conversations_without_duplicate_ehr_bool = messages_from_started_conversations["conversationID"].isin(conversation_ids_without_error_code_12)
messages_from_conversations_without_duplicate_ehr = messages_from_started_conversations[messages_from_conversations_without_duplicate_ehr_bool]
print(f"Total number of conversations without error code 12: {len(conversation_ids_without_error_code_12)}")
# 3. Conversations that have core extract received, but do not have final ack message
# First filtering for conversations with core extract
is_ehr_message = messages_from_conversations_without_duplicate_ehr['interaction_name']=='req complete'
conversation_ids_with_core_extract = messages_from_conversations_without_duplicate_ehr.loc[is_ehr_message,'conversationID'].unique()
is_message_in_conversation_with_ehr = messages_from_conversations_without_duplicate_ehr["conversationID"].isin(conversation_ids_with_core_extract)
messages_from_conversations_with_ehr = messages_from_conversations_without_duplicate_ehr[is_message_in_conversation_with_ehr]
# Second filtering for conversations that do not have final ack message
ids_of_req_complete_messages = messages_from_conversations_with_ehr.loc[messages_from_conversations_with_ehr['interaction_name']=='req complete','GUID'].unique()
is_message_ehr_ack = messages_from_conversations_with_ehr["messageRef"].isin(ids_of_req_complete_messages)
conversation_ids_with_ehr_ack = messages_from_conversations_with_ehr.loc[is_message_ehr_ack, "conversationID"]
conversation_ids_without_ehr_ack= list(set(messages_from_conversations_with_ehr['conversationID']) - set(conversation_ids_with_ehr_ack))
is_message_in_conversation_without_ehr_ack = messages_from_conversations_with_ehr["conversationID"].isin(conversation_ids_without_ehr_ack)
messages_from_conversations_without_ehr_ack = messages_from_conversations_with_ehr[is_message_in_conversation_without_ehr_ack]
print(f"Total number of conversations that have core extract received, but do not have final ack message: {len(conversation_ids_without_ehr_ack)}")
# 4. Number of above that have COPC messages according to Attachment MID
all_conversations_with_attachment_ids = attachment_mids["conversationID"].unique()
is_message_in_conversation_with_large_attachment = messages_from_conversations_without_ehr_ack["conversationID"].isin(all_conversations_with_attachment_ids)
messages_from_conversations_with_large_attachments = messages_from_conversations_without_ehr_ack[is_message_in_conversation_with_large_attachment]
count_of_conversations_that_should_have_copcs = messages_from_conversations_with_large_attachments["conversationID"].unique().shape[0]
print(f"Total number of conversations that have a COPC messages according to attachment MID:")
count_of_conversations_that_should_have_copcs
is_message_copc = messages_from_conversations_with_large_attachments['interaction_name']=='COPC'
conversations_with_copc = messages_from_conversations_with_large_attachments.loc[is_message_copc, 'conversationID'].unique()
is_message_in_conversation_with_copc = messages_from_conversations_with_large_attachments['conversationID'].isin(conversations_with_copc)
messages_from_conversations_with_copcs = messages_from_conversations_with_large_attachments[is_message_in_conversation_with_copc]
print(f"Total number of conversations that actually have any COPC messages: ")
print(messages_from_conversations_with_copcs["conversationID"].unique().shape[0])
print(f"Total number of conversations that expect to have COPC messages but don't ")
print(count_of_conversations_that_should_have_copcs - messages_from_conversations_with_copcs["conversationID"].unique().shape[0])
# 5. Number of above where all COPCs sent are acknowledged (that are sent by Sender)
# Add in whether message is sent from Sender or Requester
is_req_started_message = messages_from_conversations_with_copcs['interaction_name']=='req start'
requester_lookup = messages_from_conversations_with_copcs.loc[is_req_started_message, ['messageSender', 'conversationID']].rename({"messageSender": "requester"}, axis=1)
messages_from_conversations_with_copcs = messages_from_conversations_with_copcs.merge(requester_lookup, left_on="conversationID", right_on="conversationID", how="left")
messages_from_conversations_with_copcs["Message sender type"] = "Sender"
is_message_from_requester = messages_from_conversations_with_copcs["messageSender"] == messages_from_conversations_with_copcs["requester"]
messages_from_conversations_with_copcs.loc[is_message_from_requester, "Message sender type"] = "Requester"
# Filtering for conversations where COPCs are sent by the Sender
is_copc_message_from_sender = (messages_from_conversations_with_copcs['interaction_name']=='COPC') & (messages_from_conversations_with_copcs['Message sender type'] == "Sender")
copc_message_guids = messages_from_conversations_with_copcs.loc[is_copc_message_from_sender,'GUID'].unique()
all_messagerefs = messages_from_conversations_with_copcs.loc[messages_from_conversations_with_copcs['interaction_name']=='ack','messageRef'].unique()
copcs_guids_without_ack = list(set(copc_message_guids) - set(all_messagerefs))
messages_from_conversations_missing_copc_ack = messages_from_conversations_with_copcs["GUID"].isin(copcs_guids_without_ack)
conversation_ids_copcs_without_ack = messages_from_conversations_with_copcs.loc[messages_from_conversations_missing_copc_ack, "conversationID"].unique()
is_message_a_sender_copc = (messages_from_conversations_with_copcs['Message sender type'] == "Sender") & (messages_from_conversations_with_copcs['interaction_name']=='COPC')
conversation_ids_with_acked_sender_copcs = messages_from_conversations_with_copcs.loc[is_message_a_sender_copc, "conversationID"].unique()
conversation_ids_copcs_with_ack = list(set(conversation_ids_with_acked_sender_copcs) - set(conversation_ids_copcs_without_ack))
is_message_in_conversation_with_sender_copcs_acked = messages_from_conversations_with_copcs["conversationID"].isin(conversation_ids_copcs_with_ack)
messages_from_conversations_with_copcs_acked = messages_from_conversations_with_copcs[is_message_in_conversation_with_sender_copcs_acked]
print(f"Total number of conversations where all COPCs sent are acknowledged:")
messages_from_conversations_with_copcs_acked["conversationID"].unique().shape[0]
# 6. Number of the above for which the number of COPC sent is less than the number in Attachment MID
copc_expected = attachment_mids.drop("_time", axis=1).drop_duplicates().groupby("conversationID").agg("count").rename({"attachmentID": "Number of COPCs expected"}, axis=1).fillna(0)
is_message_acked_sender_copc = (messages_from_conversations_with_copcs_acked['interaction_name']=='COPC') & (messages_from_conversations_with_copcs_acked['Message sender type'] == "Sender")
copcs_seen = messages_from_conversations_with_copcs_acked.loc[is_message_acked_sender_copc, ["conversationID", "GUID"]].fillna(0)
copcs_seen = copcs_seen.drop_duplicates().groupby("conversationID").agg("count").rename({"GUID": "Number of COPCs seen"}, axis=1).fillna(0)
copc_comparison_table = copc_expected.merge(copcs_seen, left_index=True, right_index=True, how="right").fillna(0)
missing_copc_messages = (copc_comparison_table["Number of COPCs seen"] < copc_comparison_table["Number of COPCs expected"]).value_counts()
print(f"Number of the above for which the number of COPC sent is less than the number in Attachment MID:")
missing_copc_messages[True]
missing_copc_messages
```
## Findings: Impact of the enhancement
1. We can now correctly categorise cases where some COPC were not sent and it got stuck - size of category F
```
print("% of transfers that would be re-categorised as a consequence of using attachment MIDs data")
(missing_copc_messages[True] / len(conversation_ids_without_error_code_12)) * 100
```
2. By the process of elimination, we could potentially categorise anything where the EHR was sent, but the final ack is missing into "stuck" or "not stuck", depending on if we are waiting for the transfer to complete or not. - size of category D
This would only be feasible, if there is not some other significant variation within category D (category G). E.g what if attachment fragments are sent but not acked.
This would be calculated by taking (3) as a percentage of (2).
```
print(f"{(len(conversation_ids_without_ehr_ack) / len(conversation_ids_without_error_code_12)) * 100}%")
```
## Addendum
Context
We recently did analysis using the attachment MIDs data to identify whether it would help us identify transfers that have been fully transfers vs. transfers that have missing attachments. We identified a small subset of transfers that had not fully transferred.
Scope
Perform analysis on a sample of transfers (longer than 1 months, maybe 3?) to identify any patterns in these transfers
Are they specific to one supplier?
Are they across a small group of practices, or across many?
Anything else?
```
missing_copc_messages_bool = (copc_comparison_table["Number of COPCs seen"] < copc_comparison_table["Number of COPCs expected"])
conversations_with_missing_copcs=copc_comparison_table[missing_copc_messages_bool].index
transfer_file_location = "s3://prm-gp2gp-data-sandbox-dev/transfers-sample-6/"
transfer_files = [
"2021-3-transfers.parquet",
"2021-4-transfers.parquet",
]
transfer_input_files = [transfer_file_location + f for f in transfer_files]
transfers_raw = pd.concat((
pd.read_parquet(f)
for f in transfer_input_files
))
transfers=transfers_raw.copy().set_index("conversation_id")
transfers_by_supplier_pathway=transfers.groupby(by=["sending_supplier", "requesting_supplier"]).agg({"date_requested": "count"}).rename({"date_requested": "Number of Transfers"}, axis=1)
transfers_with_missing_copcs = transfers.loc[conversations_with_missing_copcs]
missing_copcs_by_supplier_pathway=transfers_with_missing_copcs.groupby(by=["sending_supplier", "requesting_supplier"]).agg({"date_requested": "count"}).rename({"date_requested": "Number of Transfers with Missing COPC"}, axis=1)
supplier_pathways_missing_copc_comparison_table=transfers_by_supplier_pathway.merge(missing_copcs_by_supplier_pathway, left_index=True, right_index=True, how="outer").fillna(0)
supplier_pathways_missing_copc_comparison_table["Estimated % Missing"] = supplier_pathways_missing_copc_comparison_table["Number of Transfers with Missing COPC"]/supplier_pathways_missing_copc_comparison_table["Number of Transfers"]*100
supplier_pathways_missing_copc_comparison_table.sort_values(by="Number of Transfers", ascending=False)
# check if there's anything going on with the statuses
transfers_with_missing_copcs.groupby(by=["status", "failure_reason"]).agg({"date_requested": "count"}).rename({"date_requested": "Number of Transfers with Missing COPC"}, axis=1)
transfers_with_missing_copcs["requesting_practice_asid"].value_counts().value_counts()
transfers_with_missing_copcs["sending_practice_asid"].value_counts().value_counts()
```
## Addendum Findings
1. Missing COPCs appear to be far more likely when EMIS is the sender.
2. For the failure reasons, while the majority are transferred not integrated, there is also a large volume of technical failures with a final error.
3. There does not appear to be a practice specific issue, No single practice has this issue mroe than twice as either a sender or requestor.
| github_jupyter |
```
# Зависимости
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import BaggingRegressor, BaggingClassifier, RandomForestRegressor, RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier
from xgboost import XGBRegressor, XGBClassifier
from sklearn.metrics import mean_squared_error, f1_score
# Генерируем уникальный seed
my_code = "Soloviev"
seed_limit = 2 ** 32
my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit
# Читаем данные из файла
example_data = pd.read_csv("datasets/Fish.csv")
example_data.head()
# Определим размер валидационной и тестовой выборок
val_test_size = round(0.2*len(example_data))
print(val_test_size)
# Создадим обучающую, валидационную и тестовую выборки
random_state = my_seed
train_val, test = train_test_split(example_data, test_size=val_test_size, random_state=random_state)
train, val = train_test_split(train_val, test_size=val_test_size, random_state=random_state)
print(len(train), len(val), len(test))
# Значения в числовых столбцах преобразуем к отрезку [0,1].
# Для настройки скалировщика используем только обучающую выборку.
num_columns = ['Weight', 'Length1', 'Length2', 'Length3', 'Height', 'Width']
ct = ColumnTransformer(transformers=[('numerical', MinMaxScaler(), num_columns)], remainder='passthrough')
ct.fit(train)
# Преобразуем значения, тип данных приводим к DataFrame
sc_train = pd.DataFrame(ct.transform(train))
sc_test = pd.DataFrame(ct.transform(test))
sc_val = pd.DataFrame(ct.transform(val))
# Устанавливаем названия столбцов
column_names = num_columns + ['Species']
sc_train.columns = column_names
sc_test.columns = column_names
sc_val.columns = column_names
# Явно укажем типы данных, это важно для xgboost
types = {
'Weight' : 'float64',
'Length1' : 'float64',
'Length2' : 'float64',
'Length3' : 'float64',
'Height' : 'float64',
'Width' : 'float64',
'Species' : 'category'
}
sc_train = sc_train.astype(types)
sc_test = sc_test.astype(types)
sc_val = sc_val.astype(types)
# Задание №1 - анализ различных типов ансамблей решений в задаче регрессии
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingRegressor.html
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
# Выбираем 4 числовых переменных, три их них будут предикторами, одна - зависимой переменной
n = 4
labels = random.sample(num_columns, n)
y_label = labels[0]
x_labels = labels[1:]
print(x_labels)
print(y_label)
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_label]
y_test = sc_test[y_label]
y_val = sc_val[y_label]
# Создайте 4 различных модели с использованием следующих классов:
# BaggingRegressor, RandomForestRegressor, GradientBoostingRegressor, XGBRegressor.
# Решите получившуюся задачу регрессии с помощью созданных моделей и сравните их эффективность.
# Укажите, какая модель решает задачу лучше других.
score_list = []
r_model_list = [BaggingRegressor(), RandomForestRegressor(), GradientBoostingRegressor(), XGBRegressor()]
for i in range(len(r_model_list)):
r_model_list[i].fit(x_train, y_train)
score_list.append(r_model_list[i].score(x_val, y_val))
print(score_list[i])
best_model_r = r_model_list[score_list.index(max(score_list))]
print(best_model_r)
test_pred = best_model_r.predict(x_test)
print(mean_squared_error(y_test, test_pred), sep='\n')
# Задание №2 - анализ различных типов ансамблей в задаче классификации
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
# Выбираем 2 числовых переменных, которые будут параметрами элементов набора данных
# Метка класса всегда 'Species'
n = 2
x_labels = random.sample(num_columns, n)
y_label = 'Species'
print(x_labels)
print(y_label)
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_label]
y_test = sc_test[y_label]
y_val = sc_val[y_label]
x_train
# Создайте 4 модели с различными критериями ветвления criterion : 'gini', 'entropy' и splitter : 'best', 'random'.
# Решите получившуюся задачу классификации с помощью созданных моделей и сравните их эффективность.
# При необходимости применяйте параметры max_depth, min_samples_split, min_samples_leaf
# Укажите, какая модель решает задачу лучше других.
score_list = []
c_model_list = [BaggingClassifier(), RandomForestClassifier(), GradientBoostingClassifier(), XGBClassifier()]
for i in range(len(c_model_list)):
c_model_list[i].fit(x_train, y_train)
score_list.append(c_model_list[i].score(x_val, y_val))
print(score_list[i])
best_model_c = c_model_list[score_list.index(max(score_list))]
print(best_model_c)
test_pred = best_model_c.predict(x_test)
f1 = f1_score(y_test, test_pred, average='weighted')
print(f1, sep='\n')
```
| github_jupyter |
# Custom finite difference coefficients in Devito
## Introduction
When taking the numerical derivative of a function in Devito, the default behaviour is for 'standard' finite difference weights (obtained via a Taylor series expansion about the point of differentiation) to be applied. Consider the following example for some field $u(\mathbf{x},t)$, where $\mathbf{x}=(x,y)$. Let us define a computational domain/grid and differentiate our field with respect to $x$.
```
import numpy as np
import sympy as sp
from devito import Grid, TimeFunction
# Create our grid (computational domain)
Lx = 10
Ly = Lx
Nx = 11
Ny = Nx
dx = Lx/(Nx-1)
dy = dx
grid = Grid(shape=(Nx,Ny), extent=(Lx,Ly))
# Define u(x,y,t) on this grid
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
# Define symbol for laplacian replacement
H = sp.symbols('H')
```
Now, lets look at the output of $\partial u/\partial x$:
```
print(u.dx.evaluate)
```
By default the 'standard' Taylor series expansion result, where `h_x` represents the $x$-direction grid spacing, is returned. However, there may be instances when a user wishes to use 'non-standard' weights when, for example, implementing a dispersion-relation-preserving (DRP) scheme. See e.g.
[1] Christopher K.W. Tam, Jay C. Webb (1993). ”Dispersion-Relation-Preserving Finite Difference Schemes for Computational Acoustics.” **J. Comput. Phys.**, 107(2), 262--281. https://doi.org/10.1006/jcph.1993.1142
for further details. The use of such modified weights is facilitated in Devito via the 'symbolic' finite difference coefficents functionality. Let us start by re-defining the function $u(\mathbf{x},t)$ in the following manner:
```
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2, coefficients='symbolic')
```
Note the addition of the `coefficients='symbolic'` keyword. Now, when printing $\partial u/\partial x$ we obtain:
```
print(u.dx.evaluate)
```
Owing to the addition of the `coefficients='symbolic'` keyword the weights have been replaced by sympy functions. Now, take for example the weight `W(x - h_x, 1, u(t, x, y), x)`, the notation is as follows:
* The first `x - h_x` refers to the spatial location of the weight w.r.t. the evaluation point `x`.
* The `1` refers to the order of the derivative.
* `u(t, x, y)` refers to the function with which the weight is associated.
* Finally, the `x` refers to the dimension along which the derivative is being taken.
Symbolic coefficients can then be manipulated using the Devito 'Coefficient' and 'Substitutions' objects. First, let us consider an example where we wish to replace the coefficients with a set of constants throughout the entire computational domain.
```
from devito import Coefficient, Substitutions # Import the Devito Coefficient and Substitutions objects
# Grab the grid spatial dimensions: Note x[0] will correspond to the x-direction and x[1] to y-direction
x = grid.dimensions
# Form a Coefficient object and then a replacement rules object (to pass to a Devito equation):
u_x_coeffs = Coefficient(1, u, x[0], np.array([-0.6, 0.1, 0.6]))
coeffs = Substitutions(u_x_coeffs)
```
Devito Coefficient ojects take arguments in the following order:
1. Derivative order (in the above example this is the first derivative)
2. Function to which the coefficients 'belong' (in the above example this is the time function `u`)
3. Dimension on which coefficients will be applied (in the above example this is the x-direction)
4. Coefficient data. Since, in the above example, the coefficients have been applied as a 1-d numpy array replacement will occur at the equation level. (Note that other options are in development and will be the subject of future notebooks).
Now, lets form a Devito equation, pass it the Substitutions object, and take a look at the output:
```
from devito import Eq
eq = Eq(u.dt+u.dx, coefficients=coeffs)
print(eq.evaluate)
```
We see that in the above equation the standard weights for the first derivative of `u` in the $x$-direction have now been replaced with our user defined weights. Note that since no replacement rules were defined for the time derivative (`u.dt`) standard weights have replaced the symbolic weights.
Now, let us consider a more complete example.
## Example: Finite difference modeling for a large velocity-contrast acousitc wave model
It is advised to read through the 'Introduction to seismic modelling' notebook located in devito/examples/seismic/tutorials/01_modelling.ipynb before proceeding with this example since much introductory material will be ommited here. The example now considered is based on an example introduced in
[2] Yang Liu (2013). ”Globally optimal finite-difference schemes based on least squares.” **GEOPHYSICS**, 78(4), 113--132. https://doi.org/10.1190/geo2012-0480.1.
See figure 18 of [2] for further details. Note that here we will simply use Devito to 'reproduce' the simulations leading to two results presented in the aforementioned figure. No analysis of the results will be carried out. The domain under consideration has a sptaial extent of $2km \times 2km$ and, letting $x$ be the horizontal coordinate and $z$ the depth, a velocity profile such that $v_1(x,z)=1500ms^{-1}$ for $z\leq1200m$ and $v_2(x,z)=4000ms^{-1}$ for $z>1200m$.
```
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Model, plot_velocity
%matplotlib inline
# Define a physical size
Lx = 2000
Lz = Lx
h = 10
Nx = int(Lx/h)+1
Nz = Nx
shape = (Nx, Nz) # Number of grid point
spacing = (h, h) # Grid spacing in m. The domain size is now 2km by 2km
origin = (0., 0.)
# Define a velocity profile. The velocity is in km/s
v = np.empty(shape, dtype=np.float32)
v[:, :121] = 1.5
v[:, 121:] = 4.0
# With the velocity and model size defined, we can create the seismic model that
# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points
nbl = 10
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,
space_order=20, nbl=nbl, bcs="damp")
plot_velocity(model)
```
The seismic wave source term will be modelled as a Ricker Wavelet with a peak-frequency of $25$Hz located at $(1000m,800m)$. Before applying the DRP scheme, we begin by generating a 'reference' solution using a spatially high-order standard finite difference scheme and time step well below the model's critical time-step. The scheme will be 2nd order in time.
```
from examples.seismic import TimeAxis
t0 = 0. # Simulation starts a t=0
tn = 500. # Simulation lasts 0.5 seconds (500 ms)
dt = 1.0 # Time step of 0.2ms
time_range = TimeAxis(start=t0, stop=tn, step=dt)
#NBVAL_IGNORE_OUTPUT
from examples.seismic import RickerSource
f0 = 0.015 # Source peak frequency is 25Hz (0.025 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
# First, position source centrally in all dimensions, then set depth
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = 800. # Depth is 800m
# We can plot the time signature to see the wavelet
src.show()
```
Now let us define our wavefield and PDE:
```
# Define the wavefield with the size of the model and the time dimension
u = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20)
# We can now write the PDE
pde = model.m * u.dt2 - H + model.damp * u.dt
# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step
# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as
# a time marching updating equation known as a stencil using customized SymPy functions
from devito import solve
stencil = Eq(u.forward, solve(pde, u.forward).subs({H: u.laplace}))
# Finally we define the source injection and receiver read function to generate the corresponding code
src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)
```
Now, lets create the operator and execute the time marching scheme:
```
from devito import Operator
op = Operator([stencil] + src_term, subs=model.spacing_map)
#NBVAL_IGNORE_OUTPUT
op(time=time_range.num-1, dt=dt)
```
And plot the result:
```
#import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
Lx = 2000
Lz = 2000
abs_lay = nbl*h
dx = h
dz = dx
X, Z = np.mgrid[-abs_lay: Lx+abs_lay+1e-10: dx, -abs_lay: Lz+abs_lay+1e-10: dz]
levels = 100
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(111)
cont = ax1.contourf(X,Z,u.data[0,:,:], levels, cmap=cm.binary)
fig.colorbar(cont)
ax1.axis([0, Lx, 0, Lz])
ax1.set_xlabel('$x$')
ax1.set_ylabel('$z$')
ax1.set_title('$u(x,z,500)$')
plt.gca().invert_yaxis()
plt.show()
```
We will now reimplement the above model applying the DRP scheme presented in [2].
First, since we wish to apply different custom FD coefficients in the upper on lower layers we need to define these two 'subdomains' using the `Devito SubDomain` functionality:
```
from devito import SubDomain
# Define our 'upper' and 'lower' SubDomains:
class Upper(SubDomain):
name = 'upper'
def define(self, dimensions):
x, z = dimensions
# We want our upper layer to span the entire x-dimension and all
# but the bottom 80 (+boundary layer) cells in the z-direction, which is achieved via
# the following notation:
return {x: x, z: ('left', 80+nbl)}
class Lower(SubDomain):
name = 'lower'
def define(self, dimensions):
x, z = dimensions
# We want our lower layer to span the entire x-dimension and all
# but the top 121 (+boundary layer) cells in the z-direction.
return {x: x, z: ('right', 121+nbl)}
# Create these subdomains:
ur = Upper()
lr = Lower()
```
We now create our model incoporating these subdomains:
```
#NBVAL_IGNORE_OUTPUT
# Our scheme will now be 10th order (or less) in space.
order = 10
# Create our model passing it our 'upper' and 'lower' subdomains:
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,
space_order=order, nbl=nbl, subdomains=(ur,lr), bcs="damp")
```
And re-define model related objects. Note that now our wave-field will be defined with `coefficients='symbolic'`.
```
t0 = 0. # Simulation starts a t=0
tn = 500. # Simulation last 1 second (500 ms)
dt = 1.0 # Time step of 1.0ms
time_range = TimeAxis(start=t0, stop=tn, step=dt)
f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = 800. # Depth is 800m
# New wave-field
u_DRP = TimeFunction(name="u_DRP", grid=model.grid, time_order=2, space_order=order, coefficients='symbolic')
```
We now create a stencil for each of our 'Upper' and 'Lower' subdomains defining different custom FD weights within each of these subdomains.
```
# The underlying pde is the same in both subdomains
pde_DRP = model.m * u_DRP.dt2 - H + model.damp * u_DRP.dt
# Define our custom FD coefficients:
x, z = model.grid.dimensions
# Upper layer
weights_u = np.array([ 2.00462e-03, -1.63274e-02, 7.72781e-02,
-3.15476e-01, 1.77768e+00, -3.05033e+00,
1.77768e+00, -3.15476e-01, 7.72781e-02,
-1.63274e-02, 2.00462e-03])
# Lower layer
weights_l = np.array([ 0. , 0. , 0.0274017,
-0.223818, 1.64875 , -2.90467,
1.64875 , -0.223818, 0.0274017,
0. , 0. ])
# Create the Devito Coefficient objects:
ux_u_coeffs = Coefficient(2, u_DRP, x, weights_u/x.spacing**2)
uz_u_coeffs = Coefficient(2, u_DRP, z, weights_u/z.spacing**2)
ux_l_coeffs = Coefficient(2, u_DRP, x, weights_l/x.spacing**2)
uz_l_coeffs = Coefficient(2, u_DRP, z, weights_l/z.spacing**2)
# And the replacement rules:
coeffs_u = Substitutions(ux_u_coeffs,uz_u_coeffs)
coeffs_l = Substitutions(ux_l_coeffs,uz_l_coeffs)
# Create a stencil for each subdomain:
stencil_u = Eq(u_DRP.forward, solve(pde_DRP, u_DRP.forward).subs({H: u_DRP.laplace}),
subdomain = model.grid.subdomains['upper'], coefficients=coeffs_u)
stencil_l = Eq(u_DRP.forward, solve(pde_DRP, u_DRP.forward).subs({H: u_DRP.laplace}),
subdomain = model.grid.subdomains['lower'], coefficients=coeffs_l)
# Source term:
src_term = src.inject(field=u_DRP.forward, expr=src * dt**2 / model.m)
# Create the operator, incoporating both upper and lower stencils:
op = Operator([stencil_u, stencil_l] + src_term, subs=model.spacing_map)
```
And now execute the operator:
```
#NBVAL_IGNORE_OUTPUT
op(time=time_range.num-1, dt=dt)
```
And plot the new results:
```
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(111)
cont = ax1.contourf(X,Z,u_DRP.data[0,:,:], levels, cmap=cm.binary)
fig.colorbar(cont)
ax1.axis([0, Lx, 0, Lz])
ax1.set_xlabel('$x$')
ax1.set_ylabel('$z$')
ax1.set_title('$u_{DRP}(x,z,500)$')
plt.gca().invert_yaxis()
plt.show()
```
Finally, for comparison, lets plot the difference between the standard 20th order and optimized 10th order models:
```
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(111)
cont = ax1.contourf(X,Z,abs(u_DRP.data[0,:,:]-u.data[0,:,:]), levels, cmap=cm.binary)
fig.colorbar(cont)
ax1.axis([0, Lx, 0, Lz])
ax1.set_xlabel('$x$')
ax1.set_ylabel('$z$')
plt.gca().invert_yaxis()
plt.show()
#NBVAL_IGNORE_OUTPUT
# Wavefield norm checks
assert np.isclose(np.linalg.norm(u.data[-1]), 139.108, atol=0, rtol=1e-4)
assert np.isclose(np.linalg.norm(u_DRP.data[-1]), 83.636, atol=0, rtol=1e-4)
```
| github_jupyter |
```
import os
import pickle
from tqdm import tqdm, trange
import os
import numpy as np
from collections import defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
%matplotlib inline
sns.set_style("darkgrid")
import matplotlib.cm as cm
```
## Now visualize the empirical game matrix
```
# f = open('cross_play_evals/results_cross_play_20K_episodes_binding_for_payoff_matrix_1_2021_03_23_12_37_37.pickle', "rb")
# f = open('cross_play_evals/results_cross_play_20K_episodes_binding_prosocial_2021_05_15_09_10_05.pickle', "rb")
f = open('cross_play_evals/results_more_games_cross_play_20K_episodes_binding_prosocial_2021_05_20_01_53_31.pickle', 'rb')
binary_data = f.read() # 10 mins max
results = pickle.loads(binary_data) # 1 min
res_dict = defaultdict(list)
for res in tqdm(results):
res_dict[(res[0], res[1])].append(res[3])
k = list(res_dict.keys())
k
len(res_dict[k[0]])
res_dict[k[0]][0]
import datetime
from ray import tune
import os
import torch
import pickle
from tqdm import tqdm, trange
from tqdm.contrib.concurrent import process_map
import numpy as np
from collections import defaultdict
import multiprocessing
from marltoolbox.algos.alternating_offers.alt_offers_training import AltOffersTraining, run_episode
from marltoolbox.algos.alternating_offers.envs.alt_offers_env import AltOffersEnv
from marltoolbox.experiments.tune_class_api.alternating_offers.cond_params import cond_params_iter, cond_params_iter_pop_training, default_cond_params, round_cond_params
# eval_episodes = 128
# checkpoint_dir_name = 'checkpoint_312' # for 20K episodes
# cond_params - changing agent parameters and not trial parameters here
cond_params_list = list(cond_params_iter())
# root_path = '/home/alex_grig_lyzhov/ray_results/cross_play_20K_episodes/2021_03_18/22_47_17'
# binding = False
# root_path = '/home/alex_grig_lyzhov/ray_results/cross_play_20K_episodes_binding/2021_03_20/20_17_24'
# binding = True
# root_path = '/home/alexander/ray_results/cross_play_20K_episodes_cheap/2021_03_24/05_30_06'
# binding = False
# root_path = '/home/alexander/ray_results/cross_play_20K_episodes_binding/2021_03_23/12_37_37'
# binding = True
root_path = '/home/alexander/ray_results/cross_play_20K_episodes_binding_prosocial/2021_05_15/09_10_05' # IMPORTANT!!!!!!!
binding = True
eval_episodes = 64
# checkpoint_dir_name = 'checkpoint_1'
checkpoint_dir_name = 'checkpoint_312' # for 20K episodes
# binding = True
# default_cond_params = default_cond_params
# root_path = '/home/alexander/ray_results/population_experiments/2021_04_20/14_01_26'
# cond_params_list = list(cond_params_iter_pop_training())
# root_path = '/home/alexander/ray_results/cross_play_20K_episodes_best_response_fixed_agent_0/2021_05_03/00_35_50'
# root_path = '/home/alexander/ray_results/cross_play_20K_episodes_best_response_fixed_agent_1/2021_05_03/09_05_24'
# cond_params_list = list(cond_params_iter())
default_indices = []
trial_info = []
for i, trial_name in enumerate(os.listdir(root_path)):
trial_path = os.path.join(root_path, trial_name)
# print(trial_path)
if os.path.isdir(trial_path):
cur_trial_info = {}
cur_trial_info['params'] = pickle.load(open(os.path.join(trial_path, 'params.pkl'), 'rb'))
model_path = os.path.join(trial_path, checkpoint_dir_name, 'model.pth')
with open(model_path, 'rb') as f:
state = torch.load(f)
cur_trial_info['agents'] = []
for agent_i in range(2):
cur_trial_info['agents'].append(state['agent%s' % agent_i]['model_state']) # either a single model or a list of population models
trial_info.append(cur_trial_info)
# disable for population training
if cur_trial_info['params']['cond_params'] == default_cond_params: # if custom params == default params
default_indices.append(len(trial_info)-1) # correspond to default_vs_default plays
len(matches['custom_vs_custom_1'])
# [m['params']['cond_params'] for m in matches['custom_vs_custom_1']]
matches = {}
# matches['custom_vs_custom_1'] = [trial for trial in trial_info if (trial['params']['match_mode'] == 'custom_vs_custom_1')]
# matches['custom_vs_custom_2'] = [trial for trial in trial_info if (trial['params']['match_mode'] == 'custom_vs_custom_2')]
# matches['default_vs_custom'] = [trial for trial in trial_info if (trial['params']['match_mode'] == 'default_vs_custom')]
# matches['custom_vs_default'] = [trial for trial in trial_info if (trial['params']['match_mode'] == 'custom_vs_default')]
# disable for population training
matches['default_vs_default'] = [trial_info[default_i] for default_i in default_indices]
all_training_modes = list(set([trial['params']['match_mode'] for trial in trial_info]))
for mode in all_training_modes:
matches[mode] = [trial for trial in trial_info if trial['params']['match_mode'] == mode]
# now sort training matches for various regimes in the order prescribed by cond_params_list
match_indices = defaultdict(list)
for match_regime, match_trials in matches.items():
# disable for population training
if match_regime == 'default_vs_default':
match_indices['default_vs_default'] = [list(range(len(matches['default_vs_default'])))
for cond_params_i in range(len(cond_params_list))]
else:
for cond_params_i, cond_params in enumerate(cond_params_list):
# eq_list = [(i, trial) for (i, trial) in enumerate(match_trials) if round_cond_params(trial['params']['cond_params']) == cond_params]
# print(cond_params)
eq_list = [(i, trial) for (i, trial) in enumerate(match_trials) if round_cond_params(trial['params']['cond_params']) == round_cond_params(cond_params)]
# print(cond_params)
# print(len(eq_list))
indices = [eq_trial[0] for eq_trial in eq_list]
match_indices[match_regime].append(indices)
# matrices_list = []
cond_params_i_list = [0,] + list(range(7, 14)) # must be synced to corresponding loop in eval
matrix = np.zeros((len(cond_params_i_list)*5, len(cond_params_i_list)*5, 2))
for run_i in range(5):
for run_j in range(5):
# args_list = []
# for agent_0_match_regime, agent_1_match_regime in (
# ('custom_vs_custom_1', 'custom_vs_custom_2'), # include custom_vs_custom_2 agent 1 which isn't playing with others really.
# # ('default_vs_default', 'default_vs_default'), # kinda mirrors custom_vs_custom_1, default_vs_default with 0?
# # ('custom_vs_custom_1', 'default_vs_default'), # 2.1
# # ('default_vs_default', 'custom_vs_custom_1'), # 2.2
# # ('default_vs_custom', 'custom_vs_custom_1'), # 3.1 # includes default_vs_custom, don't have all the cells for it
# # ('custom_vs_custom_1', 'default_vs_custom'), # 4.1 # this was disrupted by stop of computation!
# ):
agent_0_match_regime = 'custom_vs_custom_1'
agent_1_match_regime = 'custom_vs_custom_2'
len_sum = 0
# cond_params_i_list = list(range(7, 14))
mat0 = np.zeros((len(cond_params_i_list), len(cond_params_i_list)))
mat1 = np.zeros((len(cond_params_i_list), len(cond_params_i_list)))
# for ind_0, cond_params_i_0 in enumerate([0,] + list(range(7, 14))): # over all types of agents
for ind_0, cond_params_i_0 in enumerate(cond_params_i_list): # over all types of agents
for ind_1, cond_params_i_1 in enumerate(cond_params_i_list): # over all types of agents
# trial_0_indices = match_indices[agent_0_match_regime][cond_params_i_0][2:3]
# trial_1_indices = match_indices[agent_1_match_regime][cond_params_i_1][2:3]
trial_0_indices = match_indices[agent_0_match_regime][cond_params_i_0]
trial_1_indices = match_indices[agent_1_match_regime][cond_params_i_1]
# print(trial_0_indices)
# print(trial_1_indices)
eval_trials_list = [(trial_0_i, trial_1_i) for trial_0_i in trial_0_indices for trial_1_i in trial_1_indices]
len_sum += len(eval_trials_list)
# print(trial_0_indices)
# print(trial_1_indices)
# if agent_0_match_regime == agent_1_match_regime: # removing this clause completely!
# eval_trials_list = [(trial_0_i, trial_0_i) for trial_0_i in trial_0_indices] # to match agents trained together
# else:
# print(agent_0_match_regime, agent_1_match_regime, cond_params_i_0, cond_params_i_1, len_sum, len(cur_res_dict))
# r0 = np.concatenate([inner['player0_share_of_max'] for inner in cur_res_dict[len_sum-len(eval_trials_list):len_sum]]).mean()
# r1 = np.concatenate([inner['player1_share_of_max'] for inner in cur_res_dict[len_sum-len(eval_trials_list):len_sum]]).mean()
# print(len(cur_res_dict))
# print(len_sum)
# print(len(eval_trials_list))
# print(len(cur_res_dict[len_sum-len(eval_trials_list):len_sum]))
# print(agent_0_match_regime, agent_1_match_regime, cond_params_i_0, cond_params_i_1, r0, r1)
cur_res_dict = res_dict[(agent_0_match_regime, agent_1_match_regime), 0]
# print(eval_trials_list)
# print(run_i, run_j)
# print((trial_0_indices[run_i], trial_1_indices[run_j]))
eval_trials_ind = eval_trials_list.index((trial_0_indices[run_i], trial_1_indices[run_j]))
# r0 = cur_res_dict[len_sum - len(eval_trials_list) + eval_trials_ind]['player0_share_of_max'].mean()
# r1 = cur_res_dict[len_sum - len(eval_trials_list) + eval_trials_ind]['player1_share_of_max'].mean()
r0 = cur_res_dict[len_sum - len(eval_trials_list) + eval_trials_ind][0]
r1 = cur_res_dict[len_sum - len(eval_trials_list) + eval_trials_ind][2]
matrix[run_i*len(cond_params_i_list)+ind_0, run_j*len(cond_params_i_list)+ind_1, 0] = r0
matrix[run_i*len(cond_params_i_list)+ind_0, run_j*len(cond_params_i_list)+ind_1, 1] = r1
# mat0[ind_0, ind_1] = r0
# mat1[ind_0, ind_1] = r1
# mat0 = mat0[1:8, 1:8]
# mat1 = mat1[1:8, 1:8]
# matrices_list.append(np.dstack((mat0, mat1)))
# for trial_0_i, trial_1_i in eval_trials_list:
# trial_0 = matches[agent_0_match_regime][trial_0_i]
# trial_1 = matches[agent_1_match_regime][trial_1_i]
# if 'default' in agent_0_match_regime.split('_vs_')[0]:
# agent_0_params = default_cond_params
# else:
# agent_0_params = trial_0['params']['cond_params']
# if 'default' in agent_1_match_regime.split('_vs_')[1]:
# agent_1_params = default_cond_params
# else:
# agent_1_params = trial_1['params']['cond_params']
# else:
# args_list.append(((agent_0_match_regime, agent_1_match_regime), 0,
# (trial_0_i, trial_1_i), (agent_0_params, agent_1_params),))
matrix = np.delete(matrix, [8*i for i in range(5)], axis=0)
matrix = np.delete(matrix, [8*i for i in range(5)], axis=1)
matrix.shape
# pickle.dump(matrix, open('empirical_game_matrices_prosociality_coeff_0.3', 'wb'))
pickle.dump(matrix, open('empirical_game_matrices_prosociality_coeff_1.0', 'wb'))
matrix.shape
m = pickle.load(open('empirical_game_matrices_prosociality_coeff_0.3', 'rb'))
m.shape
np.diagonal(m[1:7, 8:14, :]).mean()
m[1:7, 8:14, :][np.where(~np.eye(6, dtype=bool))].mean()
m[1:7, 8:14, :].shape
sns.heatmap(m[1:7, 8:14, 0])
sns.heatmap(m[1:7, 8:14, 1])
sns.heatmap(m[1:7, 8:14, :].sum(axis=2))
```
### Syncing to my plot values
```
print(x[1:7], y[1:7]) # default vs custom, starts from 1
# [mat0[0, i] for i in range(1, mat0.shape[0])], [mat1[0, i] for i in range(1, mat1.shape[0])] # NO, doesn't match at all!
np.array([mat0[0, i] for i in range(1, mat0.shape[0])]) - np.array((0.651958, 0.57897437, 0.5209278, 0.4904595, 0.44816065, 0.3652433))
np.array([mat1[0, i] for i in range(1, mat1.shape[0])]) - np.array((0.2946116, 0.30257654, 0.31327945, 0.29468426, 0.29970247, 0.25992543))
for i in range(5):
mat0 = matrix[7*i:7*(i+1), 7*i:7*(i+1), 0]
mat1 = matrix[7*i:7*(i+1), 7*i:7*(i+1), 1]
print(np.array([mat0[0, i] for i in range(1, mat0.shape[0])]) - np.array((0.651958, 0.57897437, 0.5209278, 0.4904595, 0.44816065, 0.3652433)))
print(x[7:14], y[7:14]) # custom vs custom, starts from 0
np.array([mat0[i, i] for i in range(mat0.shape[0])]) - np.array((0.7376215, 0.6129167, 0.54170287, 0.4994951, 0.46781248, 0.4270572, 0.42730743))
np.array([mat1[i, i] for i in range(mat1.shape[0])]) - np.array((0.30001095, 0.39960298, 0.41811663, 0.42247146, 0.40760374, 0.40898776, 0.3982173))
# Oh OK. Actually it's all within the std now! Hurray!
# So it's closer to what I had on the plot for custom vs custom, but there's also much less variation between runs for custom vs custom. So it's OK.
```
## Bootstrap
```
# bootstrap (mat0, mat1) 20 times -> 20 replicate bimatrices
# for each replicate bimatrix, compute meta-policy for principal 0
# for each replicate map meta-policy for bootstrapped matrix to meta-policy for original matrix
# replicates_list = []
# for replicate_i in range(40):
# x_indices = np.random.choice(list(range(mat0.shape[0])), size=mat0.shape[0], replace=True)
# y_indices = np.random.choice(list(range(mat0.shape[1])), size=mat0.shape[1], replace=True)
# cur_mat0 = mat0[x_indices, :][:, y_indices]
# cur_mat1 = mat1[x_indices, :][:, y_indices]
# replicates_list.append((cur_mat0, cur_mat1, x_indices, y_indices))
# pickle.dump(replicates_list, open('replicates_list.pickle', 'wb'))
outer_list = []
for i in range(5):
inner_list = []
cur_mat = matrix[7*i:7*(i+1), 7*i:7*(i+1), :]
for replicate_i in range(20):
x_indices = np.random.choice(list(range(cur_mat.shape[0])), size=cur_mat.shape[0], replace=True)
y_indices = np.random.choice(list(range(cur_mat.shape[1])), size=cur_mat.shape[1], replace=True)
bootstrapped = cur_mat[x_indices, :, :][:, y_indices, :]
inner_list.append((bootstrapped, x_indices, y_indices))
outer_list.append(inner_list)
pickle.dump(outer_list, open('bootstrapped_replicates_prosociality_coeff_0.3', 'wb'))
```
Example of using it:
```
replicates_list = pickle.load(open('replicates_list.pickle', 'rb'))
principal0_replicates = replicates_list[:20]
principal1_replicates = replicates_list[20:]
principal0_meta_policies = []
for (mat0, mat1, x_indices, y_indices) in principal0_replicates:
# replace these with meta-solutions for bootstrapped matrices mat0 and mat1
dist0 = np.random.dirichlet(np.ones(mat0.shape[0]),size=1)[0]
dist1 = np.random.dirichlet(np.ones(mat0.shape[1]),size=1)[0]
dist0_orig_space = np.zeros_like(dist0)
for i, val in enumerate(dist0):
orig_x_index = x_indices[i]
dist0_orig_space[orig_x_index] += val
principal0_meta_policies.append(dist0_orig_space)
principal1_meta_policies = []
for (mat0, mat1, x_indices, y_indices) in principal1_replicates:
# replace these with meta-solutions for bootstrapped matrices mat0 and mat1
dist0 = np.random.dirichlet(np.ones(mat0.shape[0]),size=1)[0]
dist1 = np.random.dirichlet(np.ones(mat0.shape[1]),size=1)[0]
dist1_orig_space = np.zeros_like(dist1)
for i, val in enumerate(dist1):
orig_y_index = y_indices[i]
dist1_orig_space[orig_y_index] += val
principal1_meta_policies.append(dist1_orig_space)
```
| github_jupyter |
## 2. FeatureExtraction_ORB_Batch
## Run name
```
import time
project_name = 'Google_LandMark_Rec'
step_name = 'FeatureExtraction_ORB_Batch'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = project_name + '_' + step_name + '_' + time_str
print('run_name: ' + run_name)
t0 = time.time()
```
## Important params
```
batch_size = 50000
```
## Import PKGs
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
from IPython.display import display
import os
import sys
import gc
import math
import shutil
import zipfile
import pickle
import h5py
import cv2
from PIL import Image
from tqdm import tqdm
import multiprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
cpu_amount = multiprocessing.cpu_count()
print('cpu_amount: ', cpu_amount)
```
## Project folders
```
cwd = os.getcwd()
feature_folder = os.path.join(cwd, 'feature')
input_folder = os.path.join(cwd, 'input')
output_folder = os.path.join(cwd, 'output')
model_folder = os.path.join(cwd, 'model')
org_train_folder = os.path.join(input_folder, 'org_train')
org_test_folder = os.path.join(input_folder, 'org_test')
train_folder = os.path.join(input_folder, 'data_train')
val_folder = os.path.join(input_folder, 'data_val')
test_folder = os.path.join(input_folder, 'data_test')
test_sub_folder = os.path.join(test_folder, 'test')
train_csv_file = os.path.join(input_folder, 'train.csv')
test_csv_file = os.path.join(input_folder, 'test.csv')
sample_submission_folder = os.path.join(input_folder, 'sample_submission.csv')
```
## Preview csv
```
train_csv = pd.read_csv(train_csv_file)
print('train_csv.shape is {0}.'.format(train_csv.shape))
display(train_csv.head(2))
test_csv = pd.read_csv(test_csv_file)
print('test_csv.shape is {0}.'.format(test_csv.shape))
display(test_csv.head(2))
train_id = train_csv['id']
train_landmark_id = train_csv['landmark_id']
print('len(train_landmark_id) = \t%s' % len(list(set(train_landmark_id))))
id_2_landmark_id_dict = dict(zip(train_id, train_landmark_id))
print('len(id_2_landmark_id_dict) = \t%d' % len(id_2_landmark_id_dict))
index = 0
print('id: %s, \tlandmark_id:%s' % (train_id[index], id_2_landmark_id_dict[train_id[index]]))
index = 1
print('id: %s, \tlandmark_id:%s' % (train_id[index], id_2_landmark_id_dict[train_id[index]]))
```
## FeatureExtraction_ORB
```
def image_detect_and_compute(image_file, clf):
"""Detect and compute interest points and their descriptors."""
img = cv2.imread(image_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kp, des = clf.detectAndCompute(img, None)
return des
n_features = 500
clf = cv2.ORB_create(n_features)
org_train_images = os.listdir(org_train_folder)[:10]
print(len(org_train_images))
image_file = os.path.join(org_train_folder, org_train_images[0])
print(image_file)
```
## Official code
```
%%time
def dump_pickle_feature_batch(run_name, dataset_name, image_features, batch_num):
image_features_file = os.path.join(feature_folder, 'feature_%s_%s_b%s.pickle' % (run_name, dataset_name, batch_num))
print('Dump: ', image_features_file, end=' ')
print(len(image_features.keys()))
pickle.dump(image_features, open(image_features_file, "wb"), True)
def load_pickle_feature_batch(run_name, dataset_name, batch_num):
image_features_file = os.path.join(feature_folder, 'feature_%s_%s_b%s.pickle' % (run_name, dataset_name, batch_num))
image_features = pickle.load(open(image_features_file, "rb"))
print('Load: ', image_features_file, end=' ')
print(len(image_features.keys()))
return image_features
# dump_pickle_feature(run_name, image_features)
# image_features = load_pickle_feature(run_name)
def feature_extraction(folder, dataset_name, run_name=run_name, batch_size=500):
image_names = os.listdir(folder)
amount = len(image_names)
# amount = 1000
batch_count = int(amount / batch_size) + 1
print('amount: %s, batch_count: %s' % (amount, batch_count))
for i in range(batch_count):
image_features = {}
for j, image_name in enumerate(image_names[i*batch_size: (i+1)*batch_size]):
image_id = image_name[:-4]
image_file = os.path.join(folder, image_name)
des = image_detect_and_compute(image_file, clf)
image_features[image_id] = des
if j < 3:
print(image_name, image_id, end=' ')
print(des.shape, end=' ')
print(des[0][:10])
if (j+1) % 1000 == 0:
print(int((j+1)/1000), end=' ')
dump_pickle_feature_batch(run_name, dataset_name, image_features, i)
del image_features
gc.collect()
feature_extraction(org_train_folder, 'train', run_name, batch_size)
image_features = load_pickle_feature_batch(run_name, 'train', 1)
print('*'*80)
print('len_image_features=', len(image_features.keys()))
for i, image_id in enumerate(list(image_features.keys())[:3]):
print('image_id: %s,\t landmark_id:%s,\t feature_shape: ' % (image_id, id_2_landmark_id_dict[image_id]), image_features[image_id].shape, end=' ')
print(image_features[image_id][0][:10])
feature_extraction(org_test_folder, 'test', run_name, batch_size)
image_features = load_pickle_feature_batch(run_name, 'test', 1)
print('*'*80)
print('len_image_features=', len(image_features.keys()))
for i, image_id in enumerate(list(image_features.keys())[:3]):
print('image_id: %s,\t feature_shape: %s' % (image_id, image_features[image_id].shape), end=' ')
print(image_features[image_id][0][:10])
print('Time cost: %.2f' % (time.time() - t0))
```
| github_jupyter |
```
import utils.cs_vqe_tools as c_tools
import utils.qonversion_tools as qonvert
import utils.bit_tools as bit
import utils.circuit_tools as circ
import utils.linalg_tools as la
import utils.plotting_tools as plot
import legacy.eigenstate_generator as eig_old
import cs_vqe_classes.cs_vqe as c
import cs_vqe_classes.eigenstate as eig
import cs_vqe_classes.cs_vqe_circuit as cs_circ
import ast
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from openfermion.linalg import LinearQubitOperator, get_sparse_operator, get_ground_state
import itertools
from statistics import median, mean
from matplotlib.ticker import FormatStrFormatter
f = open("hamiltonians/hamiltonians.txt","r")
hamiltonians = ast.literal_eval(f.read())
f.close()
small_ham_keys = []
for h in hamiltonians.keys():
num_qubits = hamiltonians[h][1]
if num_qubits < 10:
small_ham_keys.append(h)
print(len(small_ham_keys))
small_ham_keys
nc_mols={}
for speciesname in small_ham_keys:
print(speciesname)
num_qubits = hamiltonians[speciesname][1] # number of qubits (all of these Hamiltonians have been tapered for molecular symmetries)
hamiltonian = hamiltonians[speciesname][2] # full Hamiltonian
terms_noncon = list(hamiltonians[speciesname][3].keys()) # noncontextual part of Hamiltonian, found by greedy DFS
mol_rot = c.cs_vqe(hamiltonian, terms_noncon, num_qubits, rot_A=True)
mol_unrot = c.cs_vqe(hamiltonian, terms_noncon, num_qubits, rot_A=False)
G = mol_rot.generators()[0]
A_rot = mol_rot.generators()[1]
A_unrot = mol_unrot.generators()[1]
eig_rot_proj = la.eigenstate_projector(A_rot, num_qubits)
eig_unrot_proj = la.eigenstate_projector(A_unrot, num_qubits)
eig_rot_mat = np.matrix(la.eigenstate_projector(A_rot, num_qubits))
eig_unrot_mat = np.matrix(la.eigenstate_projector(A_unrot, num_qubits))
gs_true=[]
gs_rot_proj=[]
gs_unrot_proj=[]
for index in range(num_qubits):
removed_index = list(range(0, index))
removed_index.reverse()
removed_generators = [list(G.keys())[i] for i in removed_index]
Z_indices = [g.find('Z') for g in removed_generators]
#nc_proj = la.noncon_projector(initial_state, Z_indices, num_qubits)
new_ham_rot_noncon, new_ham_rot_context = mol_rot.move_generator(removed_generators)
new_ham_unrot_noncon, new_ham_unrot_context = mol_unrot.move_generator(removed_generators)
new_ham_rot_noncon_q = qonvert.dict_to_WeightedPauliOperator(new_ham_rot_noncon)
new_ham_unrot_noncon_q = qonvert.dict_to_WeightedPauliOperator(new_ham_unrot_noncon)
ham_rot_mat = new_ham_rot_noncon_q.to_matrix()
ham_unrot_mat = new_ham_unrot_noncon_q.to_matrix()
gs_true.append(get_ground_state(ham_rot_mat)[0])
ham_rot_proj = eig_rot_mat*ham_rot_mat*eig_rot_mat.H
ham_unrot_proj = eig_unrot_mat*ham_unrot_mat*eig_unrot_mat.H
gs_rot_proj.append(get_ground_state(ham_rot_proj)[0])
gs_unrot_proj.append(get_ground_state(ham_unrot_proj)[0])
nc_mols[speciesname] = {'num_qubits':num_qubits,
'gs_true':gs_true,
'gs_rot_proj':gs_rot_proj,
'gs_unrot_proj':gs_unrot_proj}
nc_mols
nc_mols.pop('Ne1_STO-3G_singlet')
factors = la.factor_int(len(nc_mols))
print(factors)
if factors[0] == 1:
grid_pos = range(factors[1])
else:
grid_pos = list(itertools.product(range(factors[0]), range(factors[1])))
fig, axs = plt.subplots(nrows = factors[0], ncols = factors[1], figsize = (6*factors[1],6*factors[0]))
for index, h in enumerate(nc_mols.keys()):
grid = grid_pos[index]
results = nc_mols[h]
num_qubits = results['num_qubits']
X=list(range(1, num_qubits+1))
Y1=results['gs_true']
Y2=results['gs_rot_proj']
Y3=results['gs_unrot_proj']
axs[grid].set_title(str(h))
l1 = axs[grid].plot(X, Y1, label='Exact noncontextual gs')
l2 = axs[grid].plot(X, Y2, label='Restriction to +1-eigenspace rotated')
l3 = axs[grid].plot(X, Y3, label='Restriction to +1-eigenspace unrotated')
axs[grid].set_xticks(X)
if factors[0] != 1:
if grid[0] == 1:
axs[grid].set_xlabel('Number of generators removed',fontsize=16)
if grid[1] == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
else:
axs[grid].set_xlabel('Number of generators removed',fontsize=16)
if grid == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
handles, labels = axs[grid].get_legend_handles_labels()
fig.legend(handles,
labels,
loc="lower center",
borderaxespad=0.1,
ncol=2)
fig.suptitle('Noncontextual ground state energy - restriction to +1-eigenspace versus exact', fontsize=16)
#fig.savefig("plots/nc_+1_restriction_versus_exact", dpi=300)
proj_energy = {}
for speciesname in small_ham_keys:
num_qubits = hamiltonians[speciesname][1] # number of qubits (all of these Hamiltonians have been tapered for molecular symmetries)
hamiltonian = hamiltonians[speciesname][2] # full Hamiltonian
terms_noncon = list(hamiltonians[speciesname][3].keys()) # noncontextual part of Hamiltonian, found by greedy DFS
mol = cs_circ.cs_vqe_circuit(hamiltonian, terms_noncon, num_qubits, list(range(num_qubits)))
A = mol.A
print(A)
qubit_nums = range(1, num_qubits+1)
gs_true = []
gs_proj = []
for n_q in qubit_nums:
ham_red = mol.ham_reduced[n_q-1]
ham_red_q = qonvert.dict_to_QubitOperator(ham_red)
gs_red = get_ground_state(get_sparse_operator(ham_red_q, n_q).toarray())
gs_true.append(gs_red[0])
A_red = mol.reduce_anz_terms(A, n_q)
eig_proj = la.eigenstate_projector(A_red, n_q)
psi = gs_red[1]
psi_proj = la.apply_projections(psi, [eig_proj])
expct_proj = la.expectation(ham_red, psi_proj, n_q)
gs_proj.append(expct_proj)
proj_energy[speciesname] = {'qubit_nums':qubit_nums,
'gs_true':gs_true,
'gs_proj':gs_proj}
factors = la.factor_int(len(proj_energy))
if factors[0] == 1:
grid_pos = range(factors[1])
else:
grid_pos = list(itertools.product(range(factors[0]), range(factors[1])))
fig, axs = plt.subplots(nrows = factors[0], ncols = factors[1], figsize = (6*factors[1],6*factors[0]))
for index, speciesname in enumerate(small_ham_keys):
grid = grid_pos[index]
mol_results = proj_energy[speciesname]
X = mol_results['qubit_nums']
Y1 = mol_results['gs_true']
Y2 = mol_results['gs_proj']
axs[grid].get_yaxis().get_major_formatter().set_useOffset(False)
axs[grid].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
axs[grid].set_title(str(speciesname))
l1 = axs[grid].plot(X, Y1, label='Exact ground state energy')
l2 = axs[grid].plot(X, Y2, label='Energy after projection into +1-eigenspace of A')
axs[grid].set_xticks(X)
if factors[0] != 1:
if grid[0] == 2:
axs[grid].set_xlabel('Number of qubits simulated',fontsize=16)
if grid[1] == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
else:
axs[grid].set_xlabel('Number of generators removed',fontsize=16)
if grid == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
handles, labels = axs[grid].get_legend_handles_labels()
fig.legend(handles,
labels,
loc="lower center",
borderaxespad=0.1,
ncol=2)
fig.suptitle('Contextual subspace ground state energy - projection into +1-eigenspace versus exact', fontsize=16)
```
# With CS-VQE reduced Hamiltonians
```
proj_energy = {}
for speciesname in small_ham_keys:
num_qubits = hamiltonians[speciesname][1] # number of qubits (all of these Hamiltonians have been tapered for molecular symmetries)
hamiltonian = hamiltonians[speciesname][2] # full Hamiltonian
terms_noncon = list(hamiltonians[speciesname][3].keys()) # noncontextual part of Hamiltonian, found by greedy DFS
order = list(range(num_qubits))
mol_rot = cs_circ.cs_vqe_circuit(hamiltonian, terms_noncon, num_qubits, num_electrons=2, order=order, rot_A=True)
mol_unrot = cs_circ.cs_vqe_circuit(hamiltonian, terms_noncon, num_qubits, num_electrons=2, order=order, rot_A=False)
A_rot = mol_rot.A
A_unrot = mol_unrot.A
print(speciesname)
print('Unrotated:')
print(mol_unrot.G)
print(A_unrot, mol_rot.order)
print('Rotated:')
print(mol_rot.G)
print(A_rot, mol_unrot.order)
print('----------------------------')
qubit_nums = range(1, num_qubits+1)
gs_rot_true = []
gs_unrot_true = []
gs_rot_proj = []
gs_unrot_proj = []
for n_q in qubit_nums:
ham_red_rot = mol_rot.ham_reduced[n_q-1]
ham_red_rot_q = qonvert.dict_to_WeightedPauliOperator(ham_red_rot)
ham_rot_mat = ham_red_rot_q.to_matrix()
gs_rot_true.append(get_ground_state(ham_rot_mat)[0])
ham_red_unrot = mol_rot.ham_reduced[n_q-1]
ham_red_unrot_q = qonvert.dict_to_WeightedPauliOperator(ham_red_unrot)
ham_unrot_mat = ham_red_unrot_q.to_matrix()
gs_unrot_true.append(get_ground_state(ham_unrot_mat)[0])
A_rot_red = mol_rot.reduce_anz_terms(A_rot, n_q)
A_unrot_red = mol_unrot.reduce_anz_terms(A_unrot, n_q)
eig_rot_mat = np.matrix(la.eigenstate_projector(A_rot_red, n_q))
eig_unrot_mat = np.matrix(la.eigenstate_projector(A_unrot_red, n_q))
ham_rot_proj = eig_rot_mat*ham_rot_mat*eig_rot_mat
ham_unrot_proj = eig_unrot_mat*ham_unrot_mat*eig_unrot_mat.H
gs_rot_proj.append(get_ground_state(ham_rot_proj)[0])
gs_unrot_proj.append(get_ground_state(ham_unrot_proj)[0])
#print(ham_red)
#print(ham_mat)
#print(A_red)
#print(eig_mat)
#print(ham_proj)
#print('\n')
proj_energy[speciesname] = {'qubit_nums':list(qubit_nums),
'gs_rot_true':gs_rot_true,
'gs_unrot_true':gs_unrot_true,
'gs_rot_proj':gs_rot_proj,
'gs_unrot_proj':gs_unrot_proj,
'diff_rot':[a-b for a, b in zip(gs_rot_proj, gs_rot_true)],
'diff_unrot':[a-b for a, b in zip(gs_unrot_proj, gs_unrot_true)]}
factors = la.factor_int(len(proj_energy))
if factors[0] == 1:
grid_pos = range(factors[1])
else:
grid_pos = list(itertools.product(range(factors[0]), range(factors[1])))
fig, axs = plt.subplots(nrows = factors[0], ncols = factors[1], figsize = (6*factors[1],4*factors[0]))
for index, speciesname in enumerate(small_ham_keys):
grid = grid_pos[index]
mol_results = proj_energy[speciesname]
X = mol_results['qubit_nums']
Y1 = mol_results['diff_rot']
Y2 = mol_results['diff_unrot']
axs[grid].get_yaxis().get_major_formatter().set_useOffset(False)
axs[grid].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
axs[grid].set_title(str(speciesname))
l1 = axs[grid].plot(X, Y1, label='with A rotations')
l2 = axs[grid].plot(X, Y2, label='without A rotations')
axs[grid].set_xticks(X)
if factors[0] != 1:
if grid[0] == 2:
axs[grid].set_xlabel('Number of qubits simulated',fontsize=16)
if grid[1] == 0:
axs[grid].set_ylabel('Energy difference (Ha)',fontsize=14)
else:
axs[grid].set_xlabel('Number of generators removed',fontsize=16)
if grid == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
handles, labels = axs[grid].get_legend_handles_labels()
fig.legend(handles,
labels,
fontsize=15,
loc='lower center',bbox_to_anchor=(0.5, 0.02),
fancybox=True, shadow=True, ncol=2)
fig.suptitle('Contextual subspace ground state energy - difference between unrestricted and after projection onto +1-eigenspace of A',
fontsize=20,
y=0.95)
fig.savefig("plots/diff_gs_exact_versus_+1_restriction", dpi=300)
```
# Using generator removal tool
```
proj_energy = {}
for speciesname in small_ham_keys:
num_qubits = hamiltonians[speciesname][1] # number of qubits (all of these Hamiltonians have been tapered for molecular symmetries)
hamiltonian = hamiltonians[speciesname][2] # full Hamiltonian
terms_noncon = list(hamiltonians[speciesname][3].keys()) # noncontextual part of Hamiltonian, found by greedy DFS
mol = c.cs_vqe(hamiltonian, terms_noncon, num_qubits)#, rot_A=True)
A = mol.generators()[1]
G = mol.generators()[0]
print(G, A)
qubit_nums = range(0, num_qubits)
gs_noncon_true = []
gs_context_true = []
gs_noncon_proj = []
gs_context_proj = []
combined_cs_vqe = []
for n_q in qubit_nums:
order = list(range(num_qubits))
order.reverse()
ham_red = mol.reduced_hamiltonian(order=order, num_sim_q=n_q+1)
ham_mat = qonvert.dict_to_WeightedPauliOperator(ham_red).to_matrix()
exact_combined = get_ground_state(ham_mat)[0]
combined_cs_vqe.append(exact_combined)
removed_index = list(range(n_q))
removed_index.reverse()
removed_generators = [list(G.keys())[i] for i in removed_index]
Z_indices = [g.find('Z') for g in removed_generators]
print('removed:', removed_generators)
#eig_ind = bit.unconstrain(initial_state, Z_indices)
new_ham_noncon, new_ham_context = mol.move_generator(rem_gen=removed_generators)
new_ham_noncon_q = qonvert.dict_to_WeightedPauliOperator(new_ham_noncon)
new_ham_context_q = qonvert.dict_to_WeightedPauliOperator(new_ham_context)
ham_noncon_mat = new_ham_noncon_q.to_matrix()
ham_context_mat = new_ham_context_q.to_matrix()
gs_noncon_true.append(get_ground_state(ham_noncon_mat)[0])
gs_context_true.append(get_ground_state(ham_context_mat)[0])
#A_red = mol.reduce_anz_terms(A, n_q)
eig_mat = np.matrix(la.eigenstate_projector(A, num_qubits))
ham_noncon_proj = eig_mat*ham_noncon_mat*eig_mat
ham_context_proj = eig_mat*ham_context_mat*eig_mat
gs_noncon_proj.append(get_ground_state(ham_noncon_proj)[0])
gs_context_proj.append(get_ground_state(ham_context_proj)[0])
proj_energy[speciesname] = {'qubit_nums':list(qubit_nums),
'gs_noncon_true':gs_noncon_true,
'gs_noncon_proj':gs_noncon_proj,
'gs_context_true':gs_noncon_true,
'gs_context_proj':gs_context_proj,
'diff_context':[a-b for a, b in zip(gs_context_proj, gs_context_true)],
'diff_noncon':[a-b for a, b in zip(gs_noncon_proj, gs_noncon_true)],
'combined_cs_vqe':combined_cs_vqe,
'combined_proj':[a+b for a, b in zip(gs_noncon_proj, gs_context_proj)],
'combined_true':[a+b for a, b in zip(gs_noncon_true, gs_context_true)]}
factors = la.factor_int(len(proj_energy))
if factors[0] == 1:
grid_pos = range(factors[1])
else:
grid_pos = list(itertools.product(range(factors[0]), range(factors[1])))
fig, axs = plt.subplots(nrows = factors[0], ncols = factors[1], figsize = (6*factors[1],6*factors[0]))
for index, speciesname in enumerate(small_ham_keys):
grid = grid_pos[index]
mol_results = proj_energy[speciesname]
X = mol_results['qubit_nums']
#Y1 = mol_results['gs_noncon_true']
#Y2 = mol_results['gs_noncon_proj']
#Y3 = mol_results['gs_context_true']
#Y4 = mol_results['gs_context_proj']
Y5 = mol_results['diff_noncon']
Y6 = mol_results['diff_context']
Y7 = mol_results['combined_cs_vqe']
Y8 = mol_results['combined_true']
Y9 = mol_results['combined_proj']
axs[grid].get_yaxis().get_major_formatter().set_useOffset(False)
axs[grid].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
axs[grid].set_title(str(speciesname))
#l1 = axs[grid].plot(X, Y1, label='Exact noncontextual ground state energy')
#l2 = axs[grid].plot(X, Y2, label='Noncontextual energy after projection into +1-eigenspace of A')
#l3 = axs[grid].plot(X, Y3, label='Exact contextual ground state energy')
#l4 = axs[grid].plot(X, Y4, label='Contextual energy after projection into +1-eigenspace of A')
#l5 = axs[grid].plot(X, Y5, label='Difference noncontextual')
#l6 = axs[grid].plot(X, Y6, label='Difference contextual')
l7 = axs[grid].plot(X, Y7, label='GS energy from CS-VQE code')
l8 = axs[grid].plot(X, Y8, label='Combined exact')
l9 = axs[grid].plot(X, Y9, label='Combined proj')
axs[grid].set_xticks(X)
if factors[0] != 1:
if grid[0] == 2:
axs[grid].set_xlabel('Number of qubits simulated',fontsize=16)
if grid[1] == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
else:
axs[grid].set_xlabel('Number of generators removed',fontsize=16)
if grid == 0:
axs[grid].set_ylabel('Energy (Ha)',fontsize=18)
handles, labels = axs[grid].get_legend_handles_labels()
fig.legend(handles,
labels,
loc="lower center",
borderaxespad=0.1,
ncol=2)
fig.suptitle('Contextual subspace ground state energy - projection into +1-eigenspace versus exact', fontsize=16)
hamiltonians['H3_STO-3G_singlet_1+']
small_ham_keys
```
| github_jupyter |
# Procedure e processi
Una procedura definisce uno "schema" di evoluzione *locale* di un processo computazionale. La procedura specifica in ogni fase del processo, come il processo stesso viene eseguito a partire dalla fase precedente. È interessante studiare il comportamento *globale* di un processo la cui evoluzione locale è stata specificata da una procedura. In generale, andare a studiare questo comportamento è molto complicato, ma in alcuni casi semplici si possono identificare degli schemi ricorrenti (in inglese **pattern**).
Proviamo ora a studiare la "forma" che prendono alcuni processi generati da procedure molto semplici. Studieremo anche velocemente la velocità con cui questi processi consumano le due risorse di calcolo fondamentali:
1. IL TEMPO
2. LA MEMORIA
Come nelle lezioni precedenti, le procedure che consideriamo sono molto semplici. Il loro ruolo è lo stesso ricoperto dai *test patterns* in [fotografia](http://www.bealecorner.org/red/test-patterns/): uno schema prototipale ipersemplificato, piuttosto che un caso d'uso reale.
## Ricorsione lineare e iterazione lineare
Si consideri la funzione fattoriale definita come segue: si definisce il *fattoriale* di un numero naturale $n$, indicato con $n!$, il prodotto dei numeri naturali uguali o minori a $n$, ovvero:
$$n! = \prod_{k=1}^n k = 1 \cdot 2 \cdot 3 \cdot \cdot \cdot (n-2) \cdot (n-1) \cdot n$$
Esistono molti modi di calcolare il fattoriale di un numero naturale. Un modo è di osservare che $n!$ è uguale ad $n$ volte $(n-1)!$ per qualsiasi intero $n$:
$$ n! = n \cdot [(n-1)\cdot(n-2)\cdots 3 \cdot 2 \cdot 1] = n \cdot (n-1)!$$
In questo modo possiamo calcolare $n!$ calcolando prima $(n-1)!$ e moltiplicando poi il risultato per $n$. Se aggiungiamo la definizione che $1!$ è uguale a 1, possiamo definire la procedura seguente:
```
def Fattoriale(n):
if n == 1:
return 1
else:
return n * Fattoriale(n-1)
Fattoriale(5)
```
Se usiamo il *substitution model* per valutare la procedure per $5!$, otteniamo lo schema seguente:
```
Fattoriale(5)
5 * Fattoriale(4)
5 * (4 * Fattoriale(3))
5 * (4 * (3 * Fattoriale(2)))
5 * (4 * (3 * (2 * Fattoriale(1))))
5 * (4 * (3 * (2 * 1)))
5 * (4 * (3 * 2))
5 * (4 * 6)
5 * 24
120
```
Ora, questa procedura è corretta, in quanto produce il risultato richiesto (si può dimostrare formalmente la correttezza, ma non è lo scopo di questa capitolo).
Possiamo tuttavia calcolare il fattoriale di un numero usando un approccio diverso. Potremmo descrivere una regola per calcolare il fattoriale di un numero naturale $n$ specificando che prima moltiplichiamo 1 per 2, poi moltiplichiamo il risultato per 3, poi il risultato per 4, e cosi via, sino a raggiungere il valore di $n$.
Più formalmente, manteniamo una variabile per il prodotto corrente (chiamata in gergo un **accumulator**), insieme a una variabile che "tiene il conto" da 1 a $n$ (un **counter**). Possiamo descrivere il processo di calcolo dicendo che l'accumulator e il counter aggiornano il loro stato "contemporaneamente" ad ogni fase del processo di calcolo:
```
accumulator <- counter * accumulator
counter <- counter + 1
```
Chiaramente, sia l'accumulator che il counter sono inizializzati a 1.
Data questa regola per calcolare $n!$, possiamo scrivere le due procedure seguenti:
```
def FactorialIter(accumulator, counter, n):
if counter > n:
return accumulator
else:
return FactorialIter(counter*accumulator, counter+1, n)
def FactorialI(n):
return FactorialIter(1, 1, n)
FactorialI(5)
```
Come prima, se usiamo il *substitution model* possiamo visualizzare il processo di calcolare $5!$ usando la procedura `FactorialI`, come segue:
```
FactorialI(5)
FactorialIter(1, 1, 5)
FactorialIter(1, 2, 5)
FactorialIter(2, 3, 5)
FactorialIter(6, 4, 5)
FactorialIter(24, 5, 5)
FactorialIter(120, 6, 5)
120
```
Si provi a confrontare questo schema di calcolo rispetto al precedente.
A prima vista non sembrano troppo diversi: entrambi calcolano $5!$. Entrambe le procedure richiedono un numero di passi proporzionale al numero $n$. Entrambe le procedure eseguono la stessa sequenza di prodotti, ottenendo gli stessi prodotti parziali. Tuttavia, visualizzando l'evoluzione del processo di calcolo, otteniamo chiaramente una "forma" diversa per i due processi.
Si consideri il primo processo. Procedendo con il modello sostitutivo, otteniamo prima una sequenza di espansioni, seguita da una sequenza di contrazioni. Le espansioni si ottengono mentre il processo costruisce una sequenza di operazioni **deferred** (in questo caso la sequenza di moltiplicazioni). Le contrazioni avvengono quando le operazioni **deferred** sono effettivamente calcolate.
Questo tipo di processo, caratterizzato da una sequenza di operazioni deffered, viene chiamato **PROCESSO RICORSIVO**.
Questo processo richiede che l'interprete tenga memoria delle operazioni che deve eseguire dopo, durante la sequenza di contrazioni. Nel caso del calcolo di $n!$, la lunghezza della sequenza di operazioni deferred, e quindi la quantità di informazioni da mantenere in memoria, è lineare nell'input $n$. Si parla quindi di **PROCESSO RICORSIVO LINEARE**.
Al contrario, il secondo processo, quello della procedura `FactorialI(n)` non si espande e non si contrae. Ad ogni passo, gli unici valori di cui dobbiamo tener traccia sono le tre variabili `accumulator`, `counter`, e `n`. Questo viene chiamato un **PROCESSO ITERATIVO**. In generale, un processo iterativo può essere descritto da un numero finito di *variabili di stato*, insieme con una regola che descrive come le variabili di stato dovrebbero essere aggiornate quando il processo passo da uno stato all'altro, e dovrebbe avere un test d'arresto (i.e., un predicato condizionale `if`) che specifica sotto quali condizioni il processo dovrebbe terminare. Nel calcolare $n!$, il numero di passo richiesto cresce linearmente con $n$. Tale processo viene chiamato un **PROCESSO ITERATIVO LINEARE** (in inglese vengono chiamate funzioni **tail-recursive**).
### IMPORTANTE
Nel confrontare i due tipi di processi, si deve fare attenzione a non confondere il concetto di *processo ricorsivo* con quello di *procedura ricorsiva*.
Quando descriviamo una procedura come ricorsiva, ci stiamo riferendo al fatto di definire una funzione che richiama se stessa.
Tuttavia, quando descriviamo un processo che segue uno schema linearmente ricorsivo, stiamo parlando di come il processo evolve, non della sintassi che viene usata per scrivere la procedura. Può creare confusione il dire che una procedura ricorsiva, come ad esempio `FactorialI(n)`, genera un processo iterativo. In ogni caso il processo è iterativo: il suo stato è descritto completamente dalle sue tre variabili di stato, e l'interprete deve tener traccia solo di quelle tre variabili per poter eseguire il processo.
Il motivo principale che genera questa confusione, è che l'implementazione attuale di molti di linguaggi di programmazione comuni (C, Java, e lo stesso Python) sono implementati in modo tale che l'esecuzione di ogni chiamata ricorsiva consuma una quantità di memoria che cresce linearmente con il numero di chiamate ricorsive alla stessa procedura, anche se il processo stesso è iterativo.
L'implementazione di altri linguaggi di programmazione (per esempio, il LISP e Haskell) è in grado di distinguere i processi ricorsivi e i processi iterativi, che risultano in grado di ottimizzare il processo di calcolo usando una quantità di memoria che dipende solo dal numero di variabili di stato.
## Ricorsione ad albero
Un altro schema ricorrente di calcolo è la ricorsione ad albero. Si consideri per esempio la successione dei numeri di Fibonacci, in cui ogni numero è la somma dei due numeri precedenti:
$$0, 1, 1, 2, 3, 5, 8, 13, 21, ...$$

In generale, l'ennessimo numero di Fibonacci può essere definito dalla regola seguente:
$$Fib(n) = \left\{ \begin{array}{ll}
0 & \mbox{if } n = 0 \\
1 & \mbox{if } n = 1 \\
Fib(n-1) + Fib(n-2) & \mbox{altrimenti} \end{array} \right.$$
**ESERCIZIO 4.1**: Tradurre questa definizione in una procedura ricorsiva per calcolare l'ennesimo numero di Fibonacci.
```
# COMPLETARE
def Fib(n):
if n <= 1:
return n
else:
return Fib(n-1) + Fib(n-2)
print(Fib(6))
```
Si consideri lo schema di calcolo di tale funzione (FARE ALLA LAVAGNA). Si noti che il processo stesso evolve come un albero: ad ogni nodo il processo di ramifica in due sottoprocessi, tranne che ai nodi foglia. Questo è dovuto al fatto che la procedura chiama se stessa due volte per ogni invocazione.
Questa procedura per calcolare i numeri di Fibonacci è un ottimo esempio di struttura ad albero, ma è un pessimo modo di calcolare i numeri di Fibonacci. Per rendersi conto di quanto sia inefficiente, si consideri che questo processo usa un numero di passi che cresce esponenzialmente con l'input. Tuttavia, lo spazio richiesto cresce solo linearmente con l'input, in quanto dobbiamo tener traccia dei nodi al di sotto del nodo corrente a qualsiasi momento del processo.
In generale, il numero di passi richiesto da un processo ricorsivo ad albero sarà proporzionale al numero di nodi dell'albero, mentre lo spazio richiesto sarà proporzionale alla profondità massima dell'albero.
Il calcolo dei numeri di Fibonacci può essere formulato anche come un processo iterativo. L'idea è di usare una coppia di numeri $a$ e $b$, inizializzati con $Fib(1)=1$ e $Fib(0)=0$, e di applicare ripetutamente le **trasformazioni simultanee**:
```
a <- a + b
b <- a
```
Non dovrebbe essere complicato realizzare che dopo aver applicato queste trasformazioni $n$ volte, $a$ e $b$ avranno i valori $Fib(n+1)$ e $Fib(n)$.
**ESERCIZIO 4.2**: Scrivere una procedura che calcoli i numeri di Fibonacci usando un processo iterativo.
```
# DA COMPLETARE
```
| github_jupyter |
```
import re
import numpy as np
from pyspark.sql import functions as F
from pyspark import SparkContext
from pyspark.sql import SparkSession
sc = SparkContext("local")
spark = SparkSession.builder.getOrCreate()
```
# You need to load twitter data
```
df = spark.read.csv("data\\tweets.csv",header=True,inferSchema=True )
# Remove the special chars. Only lettere will reamin.
df = df.withColumn("text_c", F.regexp_replace(F.col("text"), "[^a-zA-Z ]", ""));
df.show(20)
from pyspark.ml.feature import Tokenizer, CountVectorizer
from pyspark.ml.feature import StopWordsRemover
from pyspark.ml import Pipeline
from pyspark.ml.clustering import LDA
# Text preprocessin pipeline
tokenizer = Tokenizer(inputCol="text_c", outputCol="words")
remover = StopWordsRemover(inputCol=tokenizer.getOutputCol(), outputCol="filtered")
# countVectorizer = CountVectorizer(inputCol=remover.getOutputCol(), outputCol="features", vocabSize=500)
countVectorizer = CountVectorizer(inputCol=remover.getOutputCol(), outputCol="features", vocabSize=500,minDF=10, maxDF=20000)
pipeline = Pipeline(stages=[tokenizer,remover, countVectorizer])
data_model = pipeline.fit(df)
vocabulary = data_model.stages[2].vocabulary
print(vocabulary[:100])
dataset = data_model.transform(df)
dataset.show(5)
lda = LDA(k=5, maxIter=10)
model = lda.fit(dataset)
model.topicsMatrix()
# Describe topics.
topics = model.describeTopics(3)
print("The topics described by their top-weighted terms:")
topics.show(truncate=False)
# Print most important topic per category
topics = model.describeTopics(10)
for r in topics.select("termIndices").collect():
rez = []
for l in r:
for i in l:
rez.append(vocabulary[i])
print(rez[:10])
topic_name = ["peoples","children","vaccinated","fatal",""]
# Shows the result
transformed = model.transform(dataset)
transformed.select("text_c","topicDistribution").show(5)
from pyspark.sql.functions import udf
@udf
def vect_argmax(row):
row_arr = row.toArray()
max_pos = np.argmax(row_arr)
return(int(max_pos))
transformed1 = transformed.withColumn("argmax",vect_argmax(F.col('topicDistribution')))
transformed1.select("text_c","argmax").show(5, truncate=False)
# Code for preprocessing the tweets. Not in Spark.
# import pandas as pd
# dataset = pd.read_csv("data\\en_tweets_with_phenotype_counts_new.csv")
# import preprocessor as p
# tw = []
# for s in df["content"].head(200000):
# tw.append(p.clean(s))
# dft = pd.DataFrame(columns=["text"])
# dft["text"] = tw
# dft.to_csv("data\\tweets.csv",index=False)
```
| github_jupyter |
# Image recognition android application project
- Flower image classification with Resnet (20200823)
- 4685 training set with 5 class, 50 iterations, 32 batch
### Reference
- [Advanced Computer Vision with TensorFlow], https://stephan-osterburg.gitbook.io/coding/coding/ml-dl/tensorfow
## 1. Import Packages
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import glob
from six.moves import urllib
from glob import glob
import random
import shutil
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
# import matplotlib
import matplotlib.pyplot as plt
```
## 2. Load and Explore the Flower Dataset
```
def load_data_files(base_dir):
folder_name = "dataset/flower_photos"
RAW_DATASET = os.path.join(base_dir, folder_name)
abs_dir = os.path.join(os.getcwd(), folder_name)
sub_dir = os.listdir(abs_dir)
data_dic = {}
for class_name in sub_dir:
imgs = glob(os.path.join(RAW_DATASET,class_name,"*.jpg"))
data_dic[class_name] = imgs
print("Class: {}".format(class_name))
print("# of images: {} \n".format(len(imgs)))
return data_dic
BASE_DIR = os.getcwd()
data_dic = load_data_files(BASE_DIR)
```
## 3. Split train and validation dataset
```
# Create new directory and copy files to it
def copy_files_to_directory(files, directory):
if not os.path.exists(directory):
os.makedirs(directory)
print("Created directory: {}".format(directory))
for f in files:
shutil.copy(f, directory)
print("Copied {} files.\n".format(len(files)))
def train_validation_split(base_dir, data_dic, split_ratio=0.2):
FLOWER_DATASET = os.path.join(base_dir,"flower_dataset")
if not os.path.exists(FLOWER_DATASET):
os.makedirs(FLOWER_DATASET)
for class_name, imgs in data_dic.items():
idx_split = int(len(imgs) * split_ratio)
random.shuffle(imgs)
validation = imgs[:idx_split]
train = imgs[idx_split:]
copy_files_to_directory(train, os.path.join(FLOWER_DATASET,"train",class_name))
copy_files_to_directory(validation, os.path.join(FLOWER_DATASET,"validation",class_name))
# BASE_DIR = os.getcwd()
# train_validation_split(BASE_DIR, data_dic, split_ratio=0.2)
```
## 4. Image preprocessing
```
batch_size = 32
num_classes = 5
epochs = 50
preprocessing_image = tf.keras.preprocessing.image
train_datagen = preprocessing_image.ImageDataGenerator(
rescale=1./255,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True)
test_datagen = preprocessing_image.ImageDataGenerator(rescale=1./255)
BASE_DIR = os.getcwd()
train_generator = train_datagen.flow_from_directory(
os.path.join(BASE_DIR, "flower_dataset/train"),
target_size=(32, 32),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
os.path.join(BASE_DIR, "flower_dataset/validation"),
target_size=(32, 32),
batch_size=batch_size,
class_mode='categorical')
```
## 5. Resnet (Deep Residual Neural Network)
- Pre-activation Bottleneck Residual Block
```
models = tf.keras.models
layers = tf.keras.layers
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
losses = tf.keras.losses
optimizers = tf.keras.optimizers
metrics = tf.keras.metrics
def residual_block(input_tensor, filters, stage, reg=0.0, use_shortcuts=True):
bn_name = 'bn' + str(stage)
conv_name = 'conv' + str(stage)
relu_name = 'relu' + str(stage)
merge_name = 'merge' + str(stage)
# 1x1 conv
# batchnorm-relu-conv
# from input_filters to bottleneck_filters
if stage>1: # first activation is just after conv1
x = layers.BatchNormalization(name=bn_name+'a')(input_tensor)
x = layers.Activation('relu', name=relu_name+'a')(x)
else:
x = input_tensor
x = layers.Convolution2D(
filters[0], (1,1),
kernel_regularizer=regularizers.l2(reg),
use_bias=False,
name=conv_name+'a'
)(x)
# 3x3 conv
# batchnorm-relu-conv
# from bottleneck_filters to bottleneck_filters
x = layers.BatchNormalization(name=bn_name+'b')(x)
x = layers.Activation('relu', name=relu_name+'b')(x)
x = layers.Convolution2D(
filters[1], (3,3),
padding='same',
kernel_regularizer=regularizers.l2(reg),
use_bias = False,
name=conv_name+'b'
)(x)
# 1x1 conv
# batchnorm-relu-conv
# from bottleneck_filters to input_filters
x = layers.BatchNormalization(name=bn_name+'c')(x)
x = layers.Activation('relu', name=relu_name+'c')(x)
x = layers.Convolution2D(
filters[2], (1,1),
kernel_regularizer=regularizers.l2(reg),
name=conv_name+'c'
)(x)
# merge output with input layer (residual connection)
if use_shortcuts:
x = layers.add([x, input_tensor], name=merge_name)
return x
```
- Full Residual Network
```
def ResNetPreAct(input_shape=(32,32,3), nb_classes=5, num_stages=5,
use_final_conv=False, reg=0.0):
# Input
img_input = layers.Input(shape=input_shape)
#### Input stream ####
# conv-BN-relu-(pool)
x = layers.Convolution2D(
128, (3,3), strides=(2, 2),
padding='same',
kernel_regularizer=regularizers.l2(reg),
use_bias=False,
name='conv0'
)(img_input)
x = layers.BatchNormalization(name='bn0')(x)
x = layers.Activation('relu', name='relu0')(x)
# x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='pool0')(x)
#### Residual Blocks ####
# 1x1 conv: batchnorm-relu-conv
# 3x3 conv: batchnorm-relu-conv
# 1x1 conv: batchnorm-relu-conv
for stage in range(1,num_stages+1):
x = residual_block(x, [32,32,128], stage=stage, reg=reg)
#### Output stream ####
# BN-relu-(conv)-avgPool-softmax
x = layers.BatchNormalization(name='bnF')(x)
x = layers.Activation('relu', name='reluF')(x)
# Optional final conv layer
if use_final_conv:
x = layers.Convolution2D(
64, (3,3),
padding='same',
kernel_regularizer=regularizers.l2(reg),
name='convF'
)(x)
pool_size = input_shape[0] / 2
x = layers.AveragePooling2D((pool_size,pool_size),name='avg_pool')(x)
x = layers.Flatten(name='flat')(x)
x = layers.Dense(nb_classes, activation='softmax', name='fc10')(x)
return models.Model(img_input, x, name='rnpa')
```
- Architecture
```
model = ResNetPreAct()
model.summary()
```
- Training
```
def compile_model(model):
loss = losses.categorical_crossentropy
optimizer = optimizers.Adam(lr=0.0001)
metric = [metrics.categorical_accuracy, metrics.top_k_categorical_accuracy]
model.compile(optimizer, loss, metric)
return model
model = ResNetPreAct(input_shape=(32, 32, 3), nb_classes=num_classes, num_stages=5,
use_final_conv=False, reg=0.005)
model = compile_model(model)
%%time
hist50 = model.fit_generator(
train_generator,
steps_per_epoch = 4685//batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=20)
```
## 6. Accuracy and Loss Analysis
```
def plot_accuracy_and_loss(history):
plt.figure(1, figsize= (15, 10))
# plot train and test accuracy
plt.subplot(221)
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.title('SqueezeNet accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# plot train and test loss
plt.subplot(222)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('SqueezeNet loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
plot_accuracy_and_loss(hist50)
print("-- Evaluate --")
scores_train = model.evaluate_generator(
train_generator,
steps = 5)
scores_val = model.evaluate_generator(
validation_generator,
steps = 5)
print("Train %s: %.2f%%" %(model.metrics_names[1], scores_train[1]*100))
print("Val %s: %.2f%%" %(model.metrics_names[1], scores_val[1]*100))
print("-- Predict --")
output_train = model.predict_generator(train_generator, steps=5)
output_val = model.predict_generator(validation_generator, steps=5)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print(train_generator.class_indices)
print(output_train)
print(validation_generator.class_indices)
print(output_val)
```
## 7. Save weights and model architecture
```
# save model architecture
model_json = model.to_json()
open('4_model.json', 'w').write(model_json)
# save model's learned weights
model.save_weights('4_weights.h5', overwrite=True)
# Load trained model
import tensorflow as tf
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import load_model
json_file = open("4_model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# model weight load
loaded_model.load_weights("4_weights.h5")
print("Loaded model from disk")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/mengwangk/dl-projects/blob/master/equities.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Equities
## Magic and Import
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import pandas as pd
# import featuretools as ft
import matplotlib as mpl
import matplotlib.pyplot as plt
import logging
import json
import csv
from pathlib import Path
```
## Setup
```
# Settings for notebook
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Show Python version
import platform
platform.python_version()
# to make this notebook's output stable across runs
np.random.seed(42)
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
from IPython.display import display
pd.options.display.max_columns = 50
pd.options.display.html.table_schema = True
```
## Global variables
```
# Component stocks URL
_URL_COMPONENT_STOCKS = 'https://www.investing.com/indices/ftse-malaysia-klci-components'
_DS_PATH = Path('datasets/equities')
_COMPONENT_STOCKS_JSON_FILE = _DS_PATH/'equities.json'
_COMPONENT_STOCKS_CSV_FILE = _DS_PATH/'equities.csv'
os.makedirs(os.path.dirname(_COMPONENT_STOCKS_JSON_FILE), exist_ok=True)
# Import and install Scrapy
try:
import scrapy
except:
!pip install scrapy
import scrapy
import scrapy.crawler as crawler
from scrapy.http import *
from scrapy.selector import Selector
from scrapy.crawler import CrawlerProcess
from multiprocessing import Process, Queue
from twisted.internet import reactor
```
## Scrap Component Stocks
### Helper Functions
```
class JsonWriterPipeline(object):
"""JSON output writer"""
def open_spider(self, spider):
self.file = open(_COMPONENT_STOCKS_JSON_FILE, 'a', encoding='utf-8')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
class CsvWriterPipeline(object):
"""CSV output writer"""
def open_spider(self, spider):
self.file = open(_COMPONENT_STOCKS_CSV_FILE, 'a', encoding='utf-8')
self.writer = csv.writer(self)
self.writer.writerow(['name', 'link'])
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.writer.writerow(line)
return item
class ComponentStocksSpider(scrapy.Spider):
name = "component_stocks_spider"
start_urls = [
_URL_COMPONENT_STOCKS
]
custom_settings = {
'LOG_LEVEL': logging.WARNING,
# 'ITEM_PIPELINES': {'__main__.CsvWriterPipeline': 1}, # Used for pipeline 1
}
def start_requests(self):
# self.driver = webdriver.Chrome(self._CHROME_DRIVER)
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
selector = Selector(response)
table = selector.xpath('//*[@id="cr1"]')
rows = table.xpath(".//tbody/tr")
for row in rows:
print(row.xpath('.//td[2]/a/text()').extract())
# print(row.xpath('.//td[2]/a/@href').extract())
```
### Download Component Stocks
```
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(ComponentStocksSpider)
process.start()
stocks = ['AMMB','Axiata','CIMB Group','Dialog','DiGi.Com','Genting','Genting Malaysia','Hap Seng Consolidated','Hartalega','Hong Leong Bank','Hong Leong Financial','IHH Healthcare','IOI Corp','Kuala Lumpur Kepong','Malayan Banking','Malaysia Airport','Maxis','MISC','Nestle','Petronas Chemicals','Petronas Dagangan','Petronas Gas','PPB','Press Metal Bhd','Public Bank','RHB Bank','Sime Darby','Sime Darby Plantation','Tenaga Nasional','Top Glove Corp']
len(stocks)
```
| github_jupyter |
```
# default_exp learner
```
# Learner
> This contains fastai2 Learner extensions.
```
#export
from fastai2.learner import *
from tsai.imports import *
#export
@patch
def save_all(self:Learner, path='export', dls_fname='dls', model_fname='model', learner_fname='learner'):
path = Path(path)
if not os.path.exists(path): os.makedirs(path)
# Save the dls
torch.save(self.dls, path/f'{dls_fname}.pth')
# Saves the model along with optimizer
self.model_dir = path
self.save(model_fname)
# Export learn without the items and the optimizer state for inference
self.export(path/f'{learner_fname}.pkl')
print(f'Learner saved:')
print(f"path = '{path}'")
print(f"dls_fname = '{dls_fname}'")
print(f"model_fname = '{model_fname}.pth'")
print(f"learner_fname = '{learner_fname}.pkl'")
def load_learner_all(path='export', dls_fname='dls', model_fname='model', learner_fname='learner', cpu=True):
path = Path(path)
learn = load_learner(path/f'{learner_fname}.pkl', cpu=cpu)
learn.load(f'{model_fname}')
dls = torch.load(path/f'{dls_fname}.pth')
learn.dls = dls
return learn
#export
@patch
@delegates(subplots)
def plot_metrics(self: Recorder, nrows=None, ncols=None, figsize=None, **kwargs):
metrics = np.stack(self.values)
names = self.metric_names[1:-1]
n = len(names) - 1
if nrows is None and ncols is None:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n / nrows))
elif nrows is None: nrows = int(np.ceil(n / ncols))
elif ncols is None: ncols = int(np.ceil(n / nrows))
figsize = figsize or (ncols * 6, nrows * 4)
fig, axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i < n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
for i, (name, ax) in enumerate(zip(names, [axs[0]] + axs)):
ax.plot(metrics[:, i], color='#1f77b4' if i == 0 else '#ff7f0e', label='valid' if i > 0 else 'train')
ax.set_title(name if i > 1 else 'losses')
ax.legend(loc='best')
plt.show()
from tsai.data.all import *
from tsai.models.all import *
dsid = 'OliveOil'
X, y, splits = get_UCR_data(dsid, parent_dir='./data/UCR/', verbose=True, on_disk=True, return_split=False)
tfms = [None, [Categorize()]]
dsets = TSDatasets(X, y, tfms=tfms, splits=splits, inplace=True)
dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[64, 128])
model = InceptionTime(dls.vars, dls.c)
learn = Learner(dls, model, metrics=accuracy)
learn.fit_one_cycle(1, lr_max=1e-3)
learn.save_all()
del learn
learn = load_learner_all()
learn.recorder.plot_metrics()
#hide
out = create_scripts()
beep(out)
```
| github_jupyter |
```
import numpy as np
```
The OneR algorithm is quite simple but can be quite effective, showing the power of using even basic statistics in many applications.
The algorithm is:
* For each variable
* For each value of the variable
* The prediction based on this variable goes the most frequent class
* Compute the error of this prediction
* Sum the prediction errors for all values of the variable
* Use the variable with the lowest error
```
# Load our dataset
from sklearn.datasets import load_iris
#X, y = np.loadtxt("X_classification.txt"), np.loadtxt("y_classification.txt")
dataset = load_iris()
X = dataset.data
y = dataset.target
print(dataset.DESCR)
n_samples, n_features = X.shape
```
Our attributes are continuous, while we want categorical features to use OneR. We will perform a *preprocessing* step called discretisation. At this stage, we will perform a simple procedure: compute the mean and determine whether a value is above or below the mean.
```
# Compute the mean for each attribute
attribute_means = X.mean(axis=0)
assert attribute_means.shape == (n_features,)
X_d = np.array(X >= attribute_means, dtype='int')
# Now, we split into a training and test set
from sklearn.cross_validation import train_test_split
# Set the random state to the same number to get the same results as in the book
random_state = 14
X_train, X_test, y_train, y_test = train_test_split(X_d, y, random_state=random_state)
print("There are {} training samples".format(y_train.shape))
print("There are {} testing samples".format(y_test.shape))
from collections import defaultdict
from operator import itemgetter
def train(X, y_true, feature):
"""Computes the predictors and error for a given feature using the OneR algorithm
Parameters
----------
X: array [n_samples, n_features]
The two dimensional array that holds the dataset. Each row is a sample, each column
is a feature.
y_true: array [n_samples,]
The one dimensional array that holds the class values. Corresponds to X, such that
y_true[i] is the class value for sample X[i].
feature: int
An integer corresponding to the index of the variable we wish to test.
0 <= variable < n_features
Returns
-------
predictors: dictionary of tuples: (value, prediction)
For each item in the array, if the variable has a given value, make the given prediction.
error: float
The ratio of training data that this rule incorrectly predicts.
"""
# Check that variable is a valid number
n_samples, n_features = X.shape
assert 0 <= feature < n_features
# Get all of the unique values that this variable has
values = set(X[:,feature])
# Stores the predictors array that is returned
predictors = dict()
errors = []
for current_value in values:
most_frequent_class, error = train_feature_value(X, y_true, feature, current_value)
predictors[current_value] = most_frequent_class
errors.append(error)
# Compute the total error of using this feature to classify on
total_error = sum(errors)
return predictors, total_error
# Compute what our predictors say each sample is based on its value
#y_predicted = np.array([predictors[sample[feature]] for sample in X])
def train_feature_value(X, y_true, feature, value):
# Create a simple dictionary to count how frequency they give certain predictions
class_counts = defaultdict(int)
# Iterate through each sample and count the frequency of each class/value pair
for sample, y in zip(X, y_true):
if sample[feature] == value:
class_counts[y] += 1
# Now get the best one by sorting (highest first) and choosing the first item
sorted_class_counts = sorted(class_counts.items(), key=itemgetter(1), reverse=True)
most_frequent_class = sorted_class_counts[0][0]
# The error is the number of samples that do not classify as the most frequent class
# *and* have the feature value.
n_samples = X.shape[1]
error = sum([class_count for class_value, class_count in class_counts.items()
if class_value != most_frequent_class])
return most_frequent_class, error
# Compute all of the predictors
all_predictors = {variable: train(X_train, y_train, variable) for variable in range(X_train.shape[1])}
errors = {variable: error for variable, (mapping, error) in all_predictors.items()}
# Now choose the best and save that as "model"
# Sort by error
best_variable, best_error = sorted(errors.items(), key=itemgetter(1))[0]
print("The best model is based on variable {0} and has error {1:.2f}".format(best_variable, best_error))
# Choose the bset model
model = {'variable': best_variable,
'predictor': all_predictors[best_variable][0]}
print(model)
def predict(X_test, model):
variable = model['variable']
predictor = model['predictor']
y_predicted = np.array([predictor[int(sample[variable])] for sample in X_test])
return y_predicted
y_predicted = predict(X_test, model)
print(y_predicted)
# Compute the accuracy by taking the mean of the amounts that y_predicted is equal to y_test
accuracy = np.mean(y_predicted == y_test) * 100
print("The test accuracy is {:.1f}%".format(accuracy))
from sklearn.metrics import classification_report
print(classification_report(y_test, y_predicted))
```
| github_jupyter |
# Variations of pace across a single narrative
Much of the analysis we've done on the "pace" dataset so far has inquired about historical trends: how does the average pace of narrative vary from 1700 to the present?
But there are also significant variations inside a single narrative. In general, fiction starts by surveying a long time span, and then slows down as the plot proceeds.
But that pattern may *itself* change across historical time. Let's figure out how.
```
# We start by importing various tools we may need later.
import os, sys, csv, math, random
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
from scipy import stats
# Now we're going to load the raw data produced by Mercado, Lee, and Underwood,
# and transform it in three ways:
# 1) First, convert each book to a sequence of 16 numbers describing the
# pace in 16 250-word samples.
# 2) Sort those sequences into four groups: 18c novels, 19c novels, 20c novels,
# and biographies from any period. We will later average each group to produce
# a composite trend.
# 3) Calculate the difference between the pace of the first two and last two segments,
# and the pace of the 12 in the middle. This "ratio" will be saved for each title
# (but only fiction titles).
eighteenth = []
nineteenth = []
twentieth = []
biography = []
ratios = dict()
dates = dict()
startandend = {0, 1, 14, 15}
with open('segleveldata.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
idx = 0
titleset = set()
for row in reader:
title = row['title']
if title not in titleset:
idx = 0
titleset.add(title)
segs = []
openandclose = []
middle = []
else:
idx += 1
time = float(row['time'])
segs.append(time)
if idx in startandend:
openandclose.append(time)
else:
middle.append(time)
if idx == 15:
date = int(row['date'])
code = row['col']
if code == 'red':
biography.append(np.array(segs))
elif date < 1800:
eighteenth.append(np.array(segs))
elif date < 1900:
nineteenth.append(np.array(segs))
else:
twentieth.append(np.array(segs))
bookends = np.mean(openandclose)
center = np.mean(middle)
ratio = bookends - center
if code != 'red':
ratios[title] = ratio
dates[title] = date
```
### Composite trends for each century:
Let's start with the eighteenth century.
```
timebreaks = [-2.81, -1.43, 0.365, 1.75, 3.7, 5.08, 7.650835]
timelabels = ['15 min', 'an hour', '6 hours', 'a day', 'a week', 'a month', 'a year']
the18trend = np.sum(eighteenth, axis = 0) / len(eighteenth)
plt.plot(the18trend)
plt.yticks(timebreaks, timelabels)
plt.show()
```
#### uncertainty
How much of that trend can be relied on? Are the little bumps meaningful? Here's a quick and dirty way to ask the question: let's take five random bootstrap samples and plot them all.
```
for i in range(5):
randomsample = [random.choice(eighteenth) for x in eighteenth]
sampletrend = np.sum(randomsample, axis = 0) / len(randomsample)
plt.plot(sampletrend)
plt.yticks(timebreaks, timelabels)
plt.show()
```
**Quick and dirty answer:** no, the little bumps at the bottom of the curve are not meaningful. Only the overall arc.
Let's now superimpose the arcs for all three centuries. 18c at the top, 19c in the middle, 20c in red at the bottom.
```
the19trend = np.sum(nineteenth, axis = 0) / len(nineteenth)
the20trend = np.sum(twentieth, axis = 0) / len(twentieth)
plt.plot(the18trend, c = 'b')
plt.plot(the19trend, c = 'k')
plt.plot(the20trend, c = 'r')
plt.yticks(timebreaks, timelabels)
plt.show()
```
It looks, from visual inspection, like the "bowl shape" of this trend gets much weaker in the twentieth century.
### Biography
The pattern in biography seems to be substantially different. It's not clear that there really is much of a trend, in fact.
```
for i in range(5):
randomsample = [random.choice(biography) for x in biography]
sampletrend = np.sum(randomsample, axis = 0) / len(randomsample)
plt.plot(sampletrend)
plt.yticks(timebreaks, timelabels)
plt.show()
```
### Specific books
Let's look at the difference between the pace of the first two + last two segments, and the pace of the middle. Books with a large number here cover much more time in their opening and closing 500 words than on average in the middle.
Books with a low or negative number are unusual in lacking, or reversing, the "bowl" shape.
```
tuplelist = []
for k, v in ratios.items():
tuplelist.append((v, k))
tuplelist.sort()
tuplelist
```
### Historical trend in the relation of ends to middle
If we plot the values above across a timeline, is there any trend? Yes, there is: a subtle one.
```
x = []
y = []
for title, ratio in ratios.items():
y.append(ratio)
x.append(dates[title])
plt.scatter(x, y)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,p(x),"r--")
stats.pearsonr(x, y)
```
### Interpretation
r = -0.29 and p = 0.017
That's not a terribly strong trend, but it is statistically significant. Effect size is medium. It's enough to discuss. The difference of pace between the ends and the middle goes down over time in a way that is clear, if not dramatic. It's also worth noting that the effect would probably be more dramatic if we stopped in the mid-20c. In that sense it may resemble the other historical curve we considered, which similarly bottomed out in mid-20c.
```
df = pd.DataFrame({'x': x, 'y': y})
df = df[df.x < 1975]
plt.scatter(df.x, df.y)
z = np.polyfit(df.x, df.y, 1)
p = np.poly1d(z)
plt.plot(df.x,p(df.x),"r--")
print(stats.pearsonr(df.x, df.y))
```
| github_jupyter |
# Analysis of *.mha* image headers
Content under Creative Commons license CC-BY-NC-SA 4.0
Code under GNU-GPL v3 License
By [Serena Bonaretti](https://sbonaretti.github.io/)
---
The aim of this notebook is to extract image spacing, size, and pixel type from a group of `.mha` images
This notebook can be attached to the *Material* paragraph of your paper
**What you do:**
- Add the path to the folder containing your `.mha` file in the code below (look for the arrow ->). Note that all the files of the folder will be read
**What the notebook does:**
- Gets the list of mha files in the directory
- For each file:
- Reads the image header and extracts spacing, size, and pixel(voxel) type
- Creates a dataframe (=table) with all the information of all images
- Queries the table to extract how many images have a certain spacing, size, and pixel type
- Prints out dependencies for reproducibility
To read .mha headers, it uses the python package `SimpleITK`
To create and query the dataframe, it uses the python package `pandas`
---
```
import os
import pandas as pd
import SimpleITK as sitk
```
## Read image headers and create the dataframe
Read the folder content
-> Add your folder path to the variable `folder`
```
# get files in folder
folder = "./data/images/mha"
# make sure there is "/" or "\" at the end of the folder name
if folder[-1] != os.sep:
folder = folder + os.sep
# get the folder content
folder_content = os.listdir(folder)
```
Extract image information from the header
```
# variables for the loop
file_names = []
spacing = []
size = []
pixel_type = []
# create the reader
reader = sitk.ImageFileReader()
for i in range (0, len(folder_content)):
# make sure you are loading an .mha image
if os.path.splitext(folder_content[i])[1] == ".mha":
# print out name and assign it to list
print (folder_content[i])
file_names.append(folder_content[i])
# read the header
reader.SetFileName(folder + folder_content[i] )
reader.LoadPrivateTagsOn()
reader.ReadImageInformation()
# get spacing
spac = reader.GetSpacing() # it's a tuple
spac = list(spac) # convert to list
spac[0] = round(spac[0],3) # round to 3 decimals
spac[1] = round(spac[1],3)
spac[2] = round(spac[2],3)
spac = tuple(spac) # reconver to tuple
spacing.append(spac)
# get size
size.append(reader.GetSize())
# get pixel type
pixel_type.append(sitk.GetPixelIDValueAsString(reader.GetPixelID()))
```
Create the dataframe:
```
# combine data in list of lists
data = [file_names, spacing, size, pixel_type]
# create dataframe
df = pd.DataFrame(data)
# transpose dataframe
df = df.T
# add column names
df.columns = ["file_name", "spacing", "size", "pixel_type"]
df
```
## Get number of images
The number of images coincides with the number of rows:
```
n_of_rows = df.shape[0]
print (n_of_rows)
```
## Get spacing
Show number of images with a certain spacing:
```
df.groupby('spacing')[["file_name"]].count() #[[]] is for a nice print out
```
## Get size
Show number of images with a certain size:
```
df.groupby('size')[["file_name"]].count()
```
## Get pixel type
Show number of images with a certain pixel type:
```
df.groupby('pixel_type')[["file_name"]].count()
```
---
## Dependencies
Dependencies keep track of the computational environment, so that we can make our workflows reproducible.
Here we use the package watermark. If you haven't installed it yet, go to your terminal, and type `pip install watermark`
```
%load_ext watermark
%watermark -v -m -p pandas,SimpleITK,watermark
```
| github_jupyter |
## The Basics
```
import graphcat
import graphcat.notebook
```
Next, let's reproduce the example workflow from above, starting with an (initially empty) computational graph:
```
graph = graphcat.StaticGraph()
```
Next, we will add tasks to the graph, identified using unique string names:
```
graph.add_task("A")
graph.add_task("B")
graph.add_task("C")
```
Note that a task name can be any hashable object, not just a string - we used strings in this case because they map well to our particular problem.
Now, we can define the links that determine which tasks depend on previous tasks:
```
graph.add_links(source="A", targets="C")
graph.add_links(source="B", targets="C")
```
There are two ways to think about links. One way is to picture data "flowing" through the links from the source tasks to the target tasks, which is why we sometimes call the sources "upstream" and the targets "downstream". Alternatively, you can say that the target of a link "depends on" the source - anytime the source changes, the target needs to change, along with all of *its* targets, and-so-on. Both viewpoints are completely valid, and you will find that both are useful, depending on the context.
Finally, because a picture is worth $1\times10^3$ words, let's see what the graph looks like so far:
```
graphcat.notebook.display(graph)
```
```
import logging
logging.basicConfig(level=logging.INFO)
logger = graphcat.Logger(graph)
```
By default, newly-created tasks are considered *unfinished*, because they haven't been executed yet. Let's finish task "A" by *updating* it:
```
graph.update("A")
graphcat.notebook.display(graph)
```
```
graph.update("C")
graphcat.notebook.display(graph)
```
```
graph.mark_unfinished("A")
graphcat.notebook.display(graph)
```
Notice that both "A" and "C" have become unfinished: because "A" is unfinished and "C" depends on "A", "C" becomes unfinished too. "B" is unaffected because it doesn't depend on "A". Let's update "C" again:
```
graph.update("C")
graphcat.notebook.display(graph)
```
This time "C" is executed, but only after "A". As expected, "B" isn't executed because it was already finished.
Hopefully, we've convinced you that Graphcat always knows which tasks to execute, and in what order. This is true no matter how complex your computational graph becomes. In the next section, we will explore how to configure the graph to perform real work.
## Task Functions
In the previous section, we learned how to represent our workflow using tasks and links, but the tasks themselves didn't actually do anything when executed. To rectify this, we will assign *task functions* that define what a task does when executed. A task function is simply a Python function (technically: a Python *callable*) that is called when a task is executed, returning a value that is stored as the *output* for the task. When downstream tasks are executed, their task functions have access to the outputs from their upstream dependencies. Thus, upstream task function *outputs* become downstream task function *inputs*.
Let's turn our current example into a simple calculator. Tasks "A" and "B" will have task functions that return numbers, and task "C" will return the sum of its inputs. First, we define the task functions for each task:
```
def task_a(graph, name, inputs):
return 2
def task_b(graph, name, inputs):
return 3
def add(graph, name, inputs):
return sum([value() for value in inputs.values()])
```
Note that every task function must accept three keyword arguments: `graph`, `name` and `inputs`. The `graph` argument is the graph that this task is a part of; `name` is the name of the task being executed, and is useful for logging or changing the function's behavior based on the task's identity; `inputs` is an object that behaves like a Python dict and contains the outputs from upstream tasks.
Don't worry too much about how `add()` is implemented, we'll discuss that in detail in a bit. Let's assign our task functions to each task in the graph:
```
graph.set_task("A", task_a)
graph.set_task("B", task_b)
graph.set_task("C", add)
graphcat.notebook.display(graph)
```
```
graph.update("C")
graphcat.notebook.display(graph)
```
```
print("Result:", graph.output("C"))
```
```
graph.set_task("A", graphcat.constant(4))
graph.set_task("B", graphcat.constant(5))
print("Result:", graph.output("C"))
```
```
graph.set_task("D", graphcat.constant(6))
```
```
graph.set_links(source="D", targets="C")
print("Result:", graph.output("C"))
graphcat.notebook.display(graph)
```
## Named Inputs
By now, you should have questions about the way inputs are passed to task functions. From the log message in the preceding example - `{None: 4, None: 5, None: 6}` - it's obvious that the results from "A", "B", and "D" are passed to "C" using something that looks like a dict, but what's with the key `None`, and why does it appear multiple times (something that can't happen with an actual dict)?
```
def greeting(graph, name, inputs):
return f"{inputs.getone('greeting')}, {inputs.getone('subject')}!"
```
Note that the `greeting()` task function uses two inputs named `"greeting"` and `"subject"`. Each call to ``inputs.getone(<name>)`` will return the value of the named input. If there isn't an input with the given name, or there's more than one, the call will fail.
Now we can setup the parameter and greeting task functions for our existing graph:
```
graph.set_task("A", graphcat.constant("Hello"))
graph.set_task("B", graphcat.constant("World"))
graph.set_task("C", greeting)
```
```
graph.set_links(source="A", targets=("C", "greeting"))
graph.set_links(source="B", targets=("C", "subject"))
```
```
print("Result:", graph.output("C"))
graphcat.notebook.display(graph)
```
## Errors
What happens when things go wrong and your task function fails? Let's find out, using a special Graphcat helper function for generating task functions that throw exceptions:
```
graph.set_task("D", graphcat.raise_exception(RuntimeError("Whoops!")))
```
(In case you're wondering, we use this for testing and debugging)
```
try:
print("Result:", graph.output("C"))
except Exception as e:
print(f"Exception: {e!r}")
graphcat.notebook.display(graph)
```
```
try:
print("Result:", graph.output("C"))
except Exception as e:
print(f"Exception: {e!r}")
graphcat.notebook.display(graph)
```
Once, the error is cleared-up, things will return to normal:
```
graph.set_task("D", graphcat.constant(42))
print("Result:", graph.output("C"))
graphcat.notebook.display(graph)
```
| github_jupyter |
# `photoeccentric` Tutorial
In this tutorial, I will create a simulated transit based on a Kepler planet and demonstrate how to use `photoeccentric` to recover the planet's eccentricity using the photoeccentric effect [(Dawson & Johnson 2012)](https://arxiv.org/pdf/1203.5537.pdf).
The code I'm using to implement the photoeccentric effect is compiled into a package called `photoeccentric`, and can be viewed/downloaded here: https://github.com/ssagear/photoeccentric
I'll use `photoeccentric` to implement nested sampling with `dynesty`.
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from astropy.table import Table
import astropy.units as u
import os
# Using `batman` to create & fit fake transit
import batman
# Using astropy BLS and scipy curve_fit to fit transit
from astropy.timeseries import BoxLeastSquares
# Using emcee & corner to find and plot (e, w) distribution with MCMC
import emcee
import corner
# Using dynesty to do the same with nested sampling
import dynesty
# And importing `photoeccentric`
import photoeccentric as ph
%load_ext autoreload
%autoreload 2
# pandas display option
pd.set_option('display.float_format', lambda x: '%.5f' % x)
nwalk = 64
nsteps = 1000
ndiscard = 500
arrlen = (nsteps-ndiscard)*nwalk
```
I'll define the conversions between solar mass -> kg and solar radius -> meters for convenience.
```
smass_kg = 1.9885e30 # Solar mass (kg)
srad_m = 696.34e6 # Solar radius (m)
```
## The Sample
I'm using the sample of "cool KOIs" from [Muirhead et al. 2013](https://iopscience.iop.org/article/10.1088/0067-0049/213/1/5), and their properites from spectroscopy published here.
I'm reading in several .csv files containing data for this sample. The data includes spectroscopy data from Muirhead et al. (2013), stellar and planet parameters from the Kepler archive, and distances/luminosities from Gaia.
```
muirhead_data = pd.read_csv("datafiles/Muirhead2013_isochrones/muirhead_data_incmissing.txt", sep=" ")
# ALL Kepler planets from exo archive
planets = pd.read_csv('datafiles/exoplanetarchive/cumulative_kois.csv')
# Take the Kepler planet archive entries for the planets in Muirhead et al. 2013 sample
spectplanets = pd.read_csv('datafiles/database/spectplanets.csv')
# Kepler-Gaia Data
kpgaia = Table.read('datafiles/Kepler-Gaia/kepler_dr2_4arcsec.fits', format='fits').to_pandas();
# Kepler-Gaia data for only the objects in our sample
muirhead_gaia = pd.read_csv("datafiles/database/muirhead_gaia.csv")
# Combined spectroscopy data + Gaia/Kepler data for our sample
muirhead_comb = pd.read_csv('datafiles/database/muirhead_comb.csv')
# Only targets from table above with published luminosities from Gaia
muirhead_comb_lums = pd.read_csv('datafiles/database/muirhead_comb_lums.csv')
```
# Defining a "test planet"
I'm going to pick a planet from our sample to test how well `photoeccentric` works. Here, I'm picking KOI 947 (Kepler-737 b), a super-Earth orbiting an M dwarf [Exoplanet Catalog Entry](https://exoplanets.nasa.gov/exoplanet-catalog/2457/kepler-1582-b/). It has an orbital period of about 5 days.
First, I'll use the spectroscopy data from Muirhead et al. 2013 and Gaia luminosities to constrain the mass and radius of the host star beyond the constraint published in the Exoplanet Archive. I'll do this by matching these data with stellar isochrones [MESA](https://iopscience.iop.org/article/10.3847/0004-637X/823/2/102) (check this ciation) and using the masses/radii from the matching isochrones to constrian the stellar density.
```
# Kepler ID for Kepler-1582 b
kepid = 9710326
KOI = 947
kepname = spectplanets.loc[spectplanets['kepid'] == kepid].kepler_name.values[0]
koiname = spectplanets.loc[spectplanets['kepid'] == kepid].kepoi_name.values[0]
kp737b = muirhead_comb.loc[muirhead_comb['KIC'] == kepid]
```
I'll read in a file with MESA isochrones. I'll use `ph.fit_isochrone_lum()` to find the subset of stellar isochrones that are consistent with a certain stellar parameters form Kepler-737 (Teff, Mstar, Rstar, and Gaia luminosity).
```
# Read in MESA isochrones
isochrones = pd.read_csv('datafiles/Muirhead2013_isochrones/isochrones_sdss_spitzer_lowmass.dat', sep='\s\s+', engine='python')
```
Using `ph.fit_isochrone_lum()` to match isochrones to stellar data:
```
iso_lums = ph.fit_isochrone_lum(kp737b, isochrones, gaia_lum=True, source='Muirhead')
# Write to csv, then read back in (prevents python notebook from lagging)
iso_lums.to_csv("datafiles/isochrones/iso_lums_" + str(kepid) + ".csv")
isodf = pd.read_csv("datafiles/isochrones/iso_lums_" + str(kepid) + ".csv")
```
I'm determining the mass and radius constraints of this star based on the isochrones that were consistent with the data above.
```
mstar = isodf["mstar"].mean()
mstar_err = isodf["mstar"].std()
rstar = isodf["radius"].mean()
rstar_err = isodf["radius"].std()
```
Now, I'm using `ph.find_density_dist_symmetric()` to create a stellar density distribution from symmetric (Gaussian) distributions based on Mstar and Rstar from isochrones.
Note: this does not necessarily mean the resulting density distribution will appear symmetric.
```
rho_star, mass, radius = ph.find_density_dist_symmetric(mstar, mstar_err, rstar, rstar_err, arrlen)
plt.hist(rho_star, bins=20)
plt.xlabel('Stellar Density Histogram (kg m^-3)', fontsize=20)
```
# Creating a fake light curve based on a real planet
I'm pulling the planet parameters of Kepler-737 b from the exoplanet archive using `ph.planet_params_from_archive()`. This will give me the published period, Rp/Rs, and inclination constraints of this planet. (It will also return some other parameters, but we don't need those right now).
I'm calculating a/Rs using `ph.calc_a()`, instead of using the a/Rs constraint from the Exoplanet Archive. The reason is because a/Rs must be consistent with the density calculated above from spectroscopy/Gaia for the photoeccentric effect to work correctly, and the published a/Rs is often inconsistent. a/Rs depends on the orbital period, Mstar, and Rstar.
```
period, period_uerr, period_lerr, rprs, rprs_uerr, rprs_lerr, a_arc, a_uerr_arc, a_lerr_arc, i, e_arc, w_arc = ph.planet_params_from_archive(spectplanets, koiname)
# We calculate a_rs to ensure that it's consistent with the spec/Gaia stellar density.
a_rs = ph.calc_a(period*86400.0, mstar*smass_kg, rstar*srad_m)
a_rs_err = np.mean((a_uerr_arc, a_lerr_arc))
print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar)
print('Period (Days): ', period, 'Rp/Rs: ', rprs)
print('a/Rs: ', a_rs)
print('i (deg): ', i)
```
Now, I'll create a fake transit using `batman`.
I'm creating a model with the period, Rp/Rs, a/Rs, and inclination specified by the Kepler catalog entry and the density constraints.
I'll create the transit model with an $e$ and $w$ of my choice. This will allow me to test whether `photoeccentric` accurately recovers the $(e,w)$ combination I have input. I'll start with $e = 0.0$ and $w = 90.0$ degrees.
## $e = 0.0$, $\omega = 90.0$
I need to define a cadence length (~30 minutes, in days) that matches the Kepler long-cadence integration time, so I can create a fake light curve that integrates over the same time as real Kepler light curves.
```
# 30 minute cadence
cadence = 0.02142857142857143
time = np.arange(-300, 300, cadence)
```
The function `ph.integrate_lcfitter()` evaluates flux at every minute, than sums over every 30 minutes to simultae the Kepler integration time.
```
# Define e and w, calculate flux from transit model
e = 0.0
w = 90.0
flux = ph.integratedlc(time, period, rprs, a_rs, 0.0, i, 90.0, 0)
# Adding some gaussian noise on the order of Kepler noise (by eyeball)
noise = np.random.normal(0,0.0005,len(time))
nflux = flux+noise
flux_err = np.array([0.0005]*len(nflux))
plt.errorbar(time, nflux, yerr=flux_err, fmt='o')
plt.xlabel('Time')
plt.ylabel('Flux')
plt.xlim(-0.5, 0.5)
plt.axvline(0.0, c='r', label='Transit midpoint')
plt.legend()
transitmpt = 0
midpoints = np.unique(np.sort(np.concatenate((np.arange(transitmpt, time[0], -period), np.arange(transitmpt, time[-1], period)))))
```
## Fitting the transit
`photoeccentric` includes functionality to fit with MCMC (`emcee`) or nested sampling (`dynesty`).
First, I'll fit the transit shape with `emcee`. $Rp/Rs$, $a/Rs$, $i$, and $w$ are allowed to vary as free parameters.
The transit fitter, `ph.planetlc_fitter`, fixes $e = 0.0$, even if the input eccentricity is not zero! This means that if e is not 0, the transit fitter will fit the "wrong" values for $a/Rs$ and $i$ -- but they will be wrong in such a way that reveals the eccentricity of the orbit. More on that in the next section.
I enter an initial guess based on what I estimate the fit parameters will be. For this one, I'll enter values close to the Kepler archive parameters.
##### Removing Out of Transit Data
```
ttime = []
tflux = []
tflux_err = []
for i in range(len(midpoints)):
m, b, t1bjd, t1, fnorm, fe1 = ph.do_linfit(time, nflux, flux_err, midpoints[i], 11, 5)
ttime.append(t1bjd)
tflux.append(fnorm)
tflux_err.append(fe1)
ttime = np.array(ttime).flatten()
tflux = np.array(tflux).flatten()
tflux_err = np.array(tflux_err).flatten()
tflux = np.nan_to_num(tflux, nan=1.0)
tflux_err = np.nan_to_num(tflux_err, nan=np.nanmedian(tflux_err))
plt.errorbar(ttime, tflux, yerr=tflux_err, fmt='o')
priortransform = [3., 27., 1., 0., 15., 64., 2., 88., 0.1, transitmpt]
nbuffer = 11
ms, bs, timesBJD, timesPhase, fluxNorm, fluxErrs, perDists, rpDists, arsDists, incDists, t0Dist = ph.fit_keplc_emcee(KOI, midpoints, ttime, tflux, tflux_err, 64, 3000, nbuffer, spectplanets, muirhead_comb)
perDists
np.savetxt('Speriods.csv', perDists, delimiter=',')
np.savetxt('Srprs.csv', rpDists, delimiter=',')
np.savetxt('Sars.csv', arsDists, delimiter=',')
np.savetxt('Sinc.csv', incDists, delimiter=',')
np.savetxt('St0.csv', t0Dists, delimiter=',')
per_f = ph.mode(perDists)
rprs_f = ph.mode(rpDists)
a_f = ph.mode(arsDists)
i_f = ph.mode(incDists)
t0_f = ph.mode(t0Dists)
```
Below, I print the original parameters and fit parameters, and overlay the fit light curve on the input light curve.
Because I input $e = 0.0$, the transit fitter should return the exact same parameters I input (because the transit fitter always requires $e = 0.0$).
```
# Create a light curve with the fit parameters
fit1 = ph.integratedlc_fitter(time1, per_f, rprs_f, a_f, i_f, t0_f)
plt.errorbar(time1, nflux1, yerr=fluxerr1, c='blue', alpha=0.5, label='Original LC')
plt.plot(time1, fit1, c='red', alpha=1.0, label='Fit LC')
#plt.xlim(-0.1, 0.1)
plt.legend()
print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar)
print('\n')
print('Input params:')
print('Rp/Rs: ', rprs)
print('a/Rs: ', a_rs)
print('i (deg): ', i)
print('\n')
print('Fit params:')
print('Rp/Rs: ', rprs_f)
print('a/Rs: ', a_f)
print('i (deg): ', i_f)
```
### Determining T14 and T23
A crucial step to determining the $(e, w)$ distribution from the transit is calculating the total and full transit durations. T14 is the total transit duration (the time between first and fourth contact). T23 is the full transit duration (i.e. the time during which the entire planet disk is in front of the star, the time between second and third contact.)
Here, I'm using equations 14 and 15 from [this textbook](https://sites.astro.caltech.edu/~lah/review/transits_occultations.winn.pdf). We calculate T14 and T23 assuming the orbit must be circular, and using the fit parameters assuming the orbit is circular. (If the orbit is not circular, T14 and T23 will not be correct -- but this is what we want, because they will differ from the true T14 and T23 in a way that reveals the eccentricity of the orbit.)
```
T14dist = ph.get_T14(pdist, rdist, adist, idist)
T14errs = ph.get_sigmas(T14dist)
T23dist = ph.get_T23(pdist, rdist, adist, idist)
T23errs = ph.get_sigmas(T23dist)
```
# Get $g$
Finally, we can use all the values above to determine $\rho_{circ}$. $\rho_{circ}$ is what we would calculate the stellar density to be if we knew that the orbit was definitely perfectly circular. We will compare $\rho_{circ}$ to $\rho_{star}$ (the true, observed stellar density we calculated from spectroscopy/Gaia), and get $g(e, w)$:

which is also defined as 
Thus, if the orbit is circular $(e = 0)$, then $g$ should equal 1. If the orbit is not circular $(e != 0)$, then $\rho_{circ}$ should differ from $\rho_{star}$, and $g$ should be something other than 1. We can draw a $(e, w)$ distribution based on the value we calcaulte for $g(e,w)$!
`ph.get_g_distribution()` will help us determine the value of g. This function takes the observed $\rho_{star}$ as well as the fit (circular) transit parameters and calculated transit durations, and calculates $\rho_{circ}$ and $g(e,w)$ based on equations 6 and 7 in [Dawson & Johnson 2012](https://arxiv.org/pdf/1203.5537.pdf).
```
gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist)
g_mean = ph.mode(gs)
g_sigma = np.mean(np.abs(ph.get_sigmas(gs)))
```
Print $g$ and $\sigma_{g}$:
```
g_mean
g_sigma
```
The mean of $g$ is about 1.0, which means that $\rho_{circ}$ agrees with $\rho_{star}$ and the eccentricity of this transit must be zero, which is exactly what we input! We can take $g$ and $\sigma_{g}$ and use MCMC (`emcee`) to determine the surface of most likely $(e,w)$.
`photoeccentric` has the probability function for $(e,w)$ from $g$ built in to `ph.log_probability()`.
```
#Guesses
w_guess = 0.0
e_guess = 0.0
solnx = (w_guess, e_guess)
pos = solnx + 1e-4 * np.random.randn(32, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4)
sampler.run_mcmc(pos, 5000, progress=True);
labels = ["w", "e"]
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
fig = corner.corner(flat_samples, labels=labels, title_kwargs={"fontsize": 12}, truths=[w, e], plot_contours=True)
```
And here is the corner plot for the most likely values of $(e, w)$ that correspond to $g = 1$. The $e$ distribution peaks at 0!
# $e=0.9$, $w=-90.0$
Let's do that again, but this time I'll do an eccentric orbit: e = 0.3 and w = 90.
```
time = np.arange(-25, 25, cadence)
# Calculate flux from transit model
e = 0.9
w = -90.0
flux = ph.integratedlc(time, period, rprs, a_rs, 0.9, i, -90.0)
# Adding some gaussian noise
noise = np.random.normal(0,0.00006,len(time))
nflux = flux+noise
flux_err = np.array([0.00006]*len(nflux))
```
## Fitting the transit
Using astropy BLS:
```
periodPDF = ph.get_period_dist(time, nflux, 4, 6, arrlen)
print('Period fit: ', ph.mode(periodPDF))
pdist = periodPDF
```
Now, I'm fitting the transit shape with `emcee`. $Rp/Rs$, $a/Rs$, $i$, and $w$ are allowed to vary as free parameters.
The transit fitter, `ph.planetlc_fitter`, fixes $e = 0.0$, even if the input eccentricity is not zero! This means that if e != 0, the transit fitter will fit the wrong values for $a/Rs$ and $i$ -- but they will be wrong in such a way that reveals the eccentricity of the orbit. More on that in the next section.
I enter an initial guess based on what I estimate the fit parameters will be. For this one, I'll enter values pretty close to what I input.
```
ttimes = np.concatenate((-np.arange(0, time[-1], period)[1:], np.arange(0, time[-1], period)))
ttimes = np.sort(ttimes)
time1, nflux1, fluxerr1 = ph.get_transit_cutout_full(ttimes, 4, time, nflux, flux_err)
mid = ph.get_mid(time1)
ptime1 = ph.get_ptime(time1, mid, 29)
```
And fitting the transit using `ph.planetlc_fitter()`.
As explained above, because here the true eccentricity of the orbit is not zero, the transit fitter should fit the wrong values for $a/Rs$ and $i$ in a way that reveals the eccentricity of the orbit.
I enter an initial guess based on what I estimate the fit parameters will be: here, I'll try guesses for $a/Rs$ and $i$ that are slightly larger and smaller than the true values, respectively.
```
# Inital guess: period, rprs, a/Rs, i, w
p0 = [per_guess, rprs, 45, 89.0]
dr = 'e_' + str(0.3) + '_w_' + str(w)
direct = 'plots_tutorial/' + dr + '/'
if not os.path.exists(direct):
os.mkdir(direct)
# EMCEE Transit Model Fitting
_, _, pdist, rdist, adist, idist = ph.mcmc_fitter(p0, time1, ptime1, nflux1, fluxerr1, nwalk, nsteps, ndiscard, e, w, direct)
per_f = ph.mode(pdist)
rprs_f = ph.mode(rdist)
a_f = ph.mode(adist)
i_f = ph.mode(idist)
```
Below, I print the original parameters and fit parameters, and overlay the fit light curve on the input light curve.
Because I input $e = 0.0$, the transit fitter should return the exact same parameters I input (because the transit fitter always requires $e = 0.0$).
```
# Create a light curve with the fit parameters
fit1 = ph.integratedlc_fitter(time1, per_f, rprs_f, a_f, i_f)
plt.errorbar(time1, nflux1, yerr=fluxerr1, c='blue', alpha=0.5, label='Original LC')
plt.plot(time1, fit1, c='red', alpha=1.0, label='Fit LC')
#plt.xlim(-0.1, 0.1)
plt.legend()
print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar)
print('\n')
print('Input params:')
print('Rp/Rs: ', rprs)
print('a/Rs: ', a_rs)
print('i (deg): ', i)
print('\n')
print('Fit params:')
print('Rp/Rs: ', rprs_f)
print('a/Rs: ', a_f)
print('i (deg): ', i_f)
```
We need to calculate T14 and T23 again, and this time they should differ from the true transit durations because the equations we're using require $e = 0$, and we've input $e = 0.3$.
```
T14dist = ph.get_T14(pdist, rdist, adist, idist)
T14errs = ph.get_sigmas(T14dist)
T23dist = ph.get_T23(pdist, rdist, adist, idist)
T23errs = ph.get_sigmas(T23dist)
```
And now, we're getting $g$ in the same way as above, using `ph.get_g_distribution()`.
```
gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist)
g_mean = ph.mode(gs)
g_sigma = np.mean(np.abs(ph.get_sigmas(gs)))
```
Print $g$ and $\sigma_{g}$:
```
g_mean
g_sigma
```
The mean of $g$ is not 1 this time. It's about 1.3, which means that $\rho_{circ}$ differs from $\rho_{star}$ and the eccentricity of this transit must NOT be zero! This is good. Let's take $g$ and $\sigma_{g}$ and use MCMC again to see if the most likely $e$ is 0.3, as we input.
```
#Guesses
w_guess = 0.0
e_guess = 0.0
solnx = (w_guess, e_guess)
pos = solnx + 1e-4 * np.random.randn(32, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4)
sampler.run_mcmc(pos, 5000, progress=True);
labels = ["w", "e"]
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
fig = corner.corner(flat_samples, labels=labels, show_titles=True, title_kwargs={"fontsize": 12}, truths=[90.0, 0.3], quantiles=[0.16, 0.5, 0.84], plot_contours=True)
```
Here is the corner plot for the most likely values of $(e, w)$ that correspond to $g = 1.3$. This $e$ distribution peaks at about 0.3, which is exactly what we expect based on the transit we created!
# $e=0.3$, $w=-90.0$
Let's do one more, this time a planet with the same eccentricity $e = 0.3$, but where the longitude of periastron $\omega$ is -90 deg (insteadl of 90 deg). This means that this planet would be passing in front of the star (from our perspective) at the "long" side of its orbital ellipse, instead of the "short" side. A planet with $\omega = -90$ will produce the longest transit possible, while a planet with $\omega = +90$ will produce the shortest transit possible, assuming that all other parameters are the same.
`photoeccentric` should catch onto this, and produce an $(e, w)$ surface that corresponds to the most likely $w$.
```
time = np.arange(-25, 25, cadence)
# Calculate flux from transit model
e = 0.3
w = -90.0
flux = ph.integratedlc(time, period, rprs, a_rs, 0.3, i, -90.0)
# Adding some gaussian noise
noise = np.random.normal(0,0.00006,len(time))
nflux = flux+noise
flux_err = np.array([0.00006]*len(nflux))
plt.errorbar(time, nflux, yerr=flux_err)
plt.xlabel('Time')
plt.ylabel('Flux')
plt.xlim(-1, 1)
```
## Fitting the transit
Using astropy BLS:
```
periodPDF = ph.get_period_dist(time, nflux, 4, 6, arrlen)
print('Period fit: ', ph.mode(periodPDF))
pdist = periodPDF
```
And fitting the transit using `ph.planetlc_fitter()`.
The true eccentricity of this orbit is again not zero, so the transit fitter should fit the wrong values for $a/Rs$ and $i$ in a way that reveals the eccentricity of the orbit.
I enter an initial guess based on what I estimate the fit parameters will be: here, I'll try guesses for $a/Rs$ and $i$ that are slightly smaller than both of these true values.
```
ttimes = np.concatenate((-np.arange(0, time[-1], period)[1:], np.arange(0, time[-1], period)))
ttimes = np.sort(ttimes)
time1, nflux1, fluxerr1 = ph.get_transit_cutout_full(ttimes, 4, time, nflux, flux_err)
mid = ph.get_mid(time1)
ptime1 = ph.get_ptime(time1, mid, 29)
# Inital guess: period, rprs, a/Rs, i, w
p0 = [per_guess, rprs, 20, 89]
dr = 'e_' + str(e) + '_w_' + str(w)
direct = 'plots_tutorial/' + dr + '/'
if not os.path.exists(direct):
os.mkdir(direct)
# EMCEE Transit Model Fitting
_, _, pdist, rdist, adist, idist = ph.mcmc_fitter(p0, time1, ptime1, nflux1, fluxerr1, nwalk, nsteps, ndiscard, e, w, direct)
per_f = ph.mode(pdist)
rprs_f = ph.mode(rdist)
a_f = ph.mode(adist)
i_f = ph.mode(idist)
# Create a light curve with the fit parameters
fit1 = ph.integratedlc_fitter(time1, per_f, rprs_f, a_f, i_f)
```
Printing the original and fit parameters, and plotting the original and fit light curves:
```
plt.errorbar(time1, nflux1, yerr=fluxerr1, c='blue', alpha=0.5, label='Original LC')
plt.plot(time1, fit1, c='red', alpha=1.0, label='Fit LC')
#plt.xlim(-0.1, 0.1)
plt.legend()
print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar)
print('\n')
print('Input params:')
print('Rp/Rs: ', rprs)
print('a/Rs: ', a_rs)
print('i (deg): ', i)
print('\n')
print('Fit params:')
print('Rp/Rs: ', rprs_f)
print('a/Rs: ', a_f)
print('i (deg): ', i_f)
```
We need to calculate T14 and T23 again, and this time they should differ from the true transit durations because the equations we're using require $e = 0$, and we've input $e = 0.3$.
How will `ph.get_T14()` and `ph.get_T23()` know that the planet is transiting at apoapse ($\omega = -90$) instead of periapse ($\omega = 90$)? Because the fit values for $a/Rs$ and $i$ compensate for the change in transit duration! All the information we need is encoded into the fit $a/Rs$ and $i$.
```
T14dist = ph.get_T14(pdist, rdist, adist, idist)
T14errs = ph.get_sigmas(T14dist)
T23dist = ph.get_T23(pdist, rdist, adist, idist)
T23errs = ph.get_sigmas(T23dist)
gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist)
g_mean = ph.mode(gs)
g_sigma = np.mean(np.abs(ph.get_sigmas(gs)))
```
Print $g$ and $\sigma_{g}$:
```
g_mean
g_sigma
```
The mean of $g$ this time is about 0.7. Again, this means that $\rho_{circ}$ differs from $\rho_{star}$ and the eccentricity must not be zero.
But why is g less than 1, when in the previous example (with the same eccentricity) g was greater than 1?
Let's take $g$ and $\sigma_{g}$ and use MCMC again to see what the most likely $(e, w)$ surface looks like.
```
#Guesses
w_guess = 0.0
e_guess = 0.0
solnx = (w_guess, e_guess)
pos = solnx + 1e-4 * np.random.randn(32, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4)
sampler.run_mcmc(pos, 5000, progress=True);
labels = ["w", "e"]
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
fig = corner.corner(flat_samples, labels=labels, show_titles=True, title_kwargs={"fontsize": 12}, truths=[-90.0, 0.3], quantiles=[0.16, 0.5, 0.84], plot_contours=True)
```
In this corner plot, $e$ peaks at about 0.3 again! The $\omega$ distribution differs though -- this time, where $e = 0.3$ on this distribution, $\omega$ can equal -90. (If you look back at the previous example, $e = 0.3$ and $\omega = -90$ was inconsistent with the probability distribution in the corner plot.) So `photoeccentric` was able to accurately determine $(e, w)$ for at least these three cases!
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def random_binning(N, d,return_string=False):
"""
Function to distribute a sum total of N into d bins
Returns a list
"""
import numpy as np
count=N
sum_count=0
lst=[]
for i in range(d-1):
x=np.random.randint(0,count+1)
if return_string:
lst.append(str(x))
else:
lst.append(x)
count=count-x
sum_count+=x
if return_string:
lst.append(str(N-sum_count))
else:
lst.append(N-sum_count)
return lst
```
### Function to generate single term from a list of exponents. E.g. if we pass 1,2,0,3 it will generate: $x_1.x_2^2.x_4^3$
```
def gen_single_term(lst):
"""
Function to generate single term from a list of exponents.
"""
term=''
for i in range(1,len(lst)+1):
if lst[i-1]==0:
pass
elif lst[i-1]==1:
term+='x'+str(i)+'.'
else:
term+='x'+str(i)+'^'+str(lst[i-1])+'.'
return term[:-1]
gen_single_term([1,1,2,0,1])
def count_symbol(lst):
syms = set(lst)
syms_dict={}
term_lst=[]
for s in syms:
syms_dict[s]=lst.count(s)
for k,v in syms_dict.items():
if v!=1:
term_lst.append(str(v)+'.'+k)
else:
term_lst.append(k)
return term_lst
def gen_multinomial(n_features=5,max_power=5,max_terms=10,fixed_terms=None,coefficients=True,
prob_negative_sign=0.3):
"""
Generates multinomial expression.
n_features: Number of independent variables
max_power: Maximum exponent each terms can be raised to. A random power will be chosen up to this maximum.
max_terms: Maximum number of terms. A random number of terms will be chosen up to this maximum.
fixed_terms: Attempt will be made to generate only this many terms. Sometimes does not work.
coefficients (boolean): Adds (or does not) random integer coefficients in front of the terms.
prob_negative_sign: Probability of putting a negative term.
Each term's sign wil be chosen randomly based on this probability
"""
import numpy as np
eqn=''
eqn_terms=[]
if fixed_terms!=None:
n_terms=fixed_terms
else:
n_terms=np.random.randint(2,max_terms+1)
for i in range(n_terms):
power=np.random.randint(1,max_power+1)
#power=max_power
power_lst=random_binning(power,n_features)
term=gen_single_term(power_lst)
if coefficients:
coeff=np.random.randint(1,11)
if coeff!=1:
coeff=str(coeff)
term=coeff+'.'+term
eqn_terms.append(term)
eqn_terms=count_symbol(eqn_terms)
for e in eqn_terms:
eqn+=e
sign=np.random.choice(['+','-'],p=[prob_negative_sign,1-prob_negative_sign])
eqn= eqn+' '+sign+' '
return eqn[:-3]
gen_multinomial(coefficients=True)
def make_equation_system(n_samples=10,n_features=5,max_power=5,max_terms=10,
fixed_terms=None,coefficients=False,prob_negative_sign=0.3):
"""
Generates multiple samples of multinomials for constructing a system of equations
"""
regressions=[]
for i in range(n_samples):
regressions.append(gen_multinomial(n_features=n_features,max_power=max_power,
max_terms=max_terms,fixed_terms=fixed_terms,
coefficients=coefficients,prob_negative_sign=prob_negative_sign))
return regressions
make_equation_system(n_samples=10,n_features=4,max_power=3,max_terms=8,fixed_terms=4,
coefficients=True,prob_negative_sign=0.5)
```
### Evaluate a polynomial string
```
from sympy import *
def symbolize(s):
"""
Converts a a string (equation) to a SymPy symbol object
"""
from sympy import sympify
s1=s.replace('.','*')
s2=s1.replace('^','**')
s3=sympify(s2)
return(s3)
def eval_multinomial(s,vals=None,symbolic_eval=False):
"""
Evaluates polynomial at vals.
vals can be simple list, dictionary, or tuple of values.
vals can also contain symbols instead of real values provided those symbols have been declared before using SymPy
"""
from sympy import Symbol,sympify,symbols
sym_s=symbolize(s)
sym_set=sym_s.atoms(Symbol)
sym_lst=[]
for s in sym_set:
sym_lst.append(str(s))
sym_lst.sort()
if symbolic_eval==False and len(sym_set)!=len(vals):
print("Length of the input values did not match number of variables and symbolic evaluation is not selected")
return None
else:
if type(vals)==list:
sub=list(zip(sym_lst,vals))
elif type(vals)==dict:
l=list(vals.keys())
l.sort()
lst=[]
for i in l:
lst.append(vals[i])
sub=list(zip(sym_lst,lst))
elif type(vals)==tuple:
sub=list(zip(sym_lst,list(vals)))
result=sym_s.subs(sub)
return result
s1=gen_multinomial(fixed_terms=5,coefficients=True)
s1
eval_multinomial(s1,{'x1':2,'x3':1,'x4':0,'x2':3,'x5':1},symbolic_eval=True)
vals=(2,3,1)
eval_multinomial(s1,vals)
from sympy import symbols
x,y=symbols('x y')
vals=[2,x,2,2,y]
eval_multinomial(s1,vals,symbolic_eval=True)
```
### Pretty printing and LaTeX format output
```
def pretty_multinomial(n_features=5,max_power=5,max_terms=10,fixed_terms=None,coefficients=False):
from sympy import init_printing
init_printing()
s=gen_multinomial(n_features=n_features,max_power=max_power,max_terms=max_terms,
fixed_terms=fixed_terms,coefficients=coefficients)
sym_s=symbolize(s)
return(sym_s)
pretty_multinomial(coefficients=True,fixed_terms=4)
def latex_multinomial(n_features=5,max_power=5,max_terms=10,fixed_terms=None,coefficients=False):
from sympy import latex
s=gen_multinomial(n_features=n_features,max_power=max_power,max_terms=max_terms,
fixed_terms=fixed_terms,coefficients=coefficients)
sym_s=symbolize(s)
return(latex(sym_s))
latex_multinomial(coefficients=True,fixed_terms=4)
lst=[]
for i in range(6):
lst.append(pretty_multinomial(coefficients=True))
lst
pretty_multinomial(n_features=2,max_power=4,coefficients=True)
```
| github_jupyter |
## Bidisagreements visualisation matrix
In this notebook I will demonstrate how to use the `agreements` package within the annotations library, which allows for the qualitative assessment of bidisagreements (cases of data instances with 1 disagreement). The lone class here is `BiDisagreements`.
```
import sys
from disagree import BiDisagreements
import pandas as pd
```
First we will create a dummy dataset of labels. Remember that current capabilities allow for labels of ascending integers starting at zero, as no labels. So if you have the possible labels `["cat", "dog", "giraffe", None]`, you will want to convert these to `[0, 1, 2, None]`.
The data set in this tutorial will have 15 instances of data, annotated by 3 annotators. The possible labels will be `[0, 1, 2, 3, None]`:
```
test_annotations = {"a": [None, None, None, None, None, 1, 3, 0, 1, 0, 0, 2, 2, None, 2],
"b": [0, None, 1, 0, 2, 2, 3, 2, None, None, None, None, None, None, None],
"c": [None, None, 1, 0, 2, 3, 3, None, 1, 0, 0, 2, 2, None, 3]}
df = pd.DataFrame(test_annotations)
labels = [0, 1, 2, 3] # Note that you don't need to specify the presence of None labels
```
Initialise the instance:
```
bidis = BiDisagreements(df, labels)
```
We can get a summary of the number of instances of data where no disagreements occurred, where 1 disagreement occurred (bidisagreement), where 2 disagreements occurred (tridisagreement), and where even more disagreements occurred:
```
bidis.agreements_summary()
```
This shows that there are 9 instances of data for which all annotators that labelled it agree. There are 2 instances whereby 2 of the annotators disagree on the label. There is 1 instance where 3 annotators disagree. There are no instances where more than 3 annotators disagree (there are only 3 annotators in this example anyway, so it would be very strange if this wasn't zero!).
If you want to just have a look at the bidisagreements visually, then you can return a matrix representing the disagreements, and plot it however you like. Element $(i, j)$ is the number of bidisagreements between label $i$ and label $j$.
```
mat = bidis.agreements_matrix()
mat_normalised = bidis.agreements_matrix(normalise=True)
```
As we've seen when using the `agreements_summary` method, there were two bidisagreements. This visualisation shows that one of these comes from a disagreement between labels 2 and 0, and the other comes from labels 2 and 3.
At this small scale, it's not very useful, but when you have 10s of thousands of labels, this can be really useful for quickly identifying where large disagreements are coming from. Once you can pinpoint where the disagreement comes from, you can go about modifying annotation schema and/or label types.
Addressing these issues is essential to building datasets robust to machine learning algorithms. If your annotations are frought with disagreements, then any machine learning model will not be reliable.
| github_jupyter |
```
import sys
sys.path.append('../../../python/')
#get the f function from the paper
import dataPython as dp
import numpy as np
import scipy.interpolate as inter
data_bh = dp.getXYdata('../../../NGC5533_Bulge/NGC5533/NGC5533-blackhole_fmt.txt')
data_bulge2_7 = dp.getXYdata('../../../data/ourbulge2_7.txt')
data_bulge0_5 = dp.getXYdata('../../../data/ourbulge0_5.txt')
data_bulge1 = dp.getXYdata('../../../data/ourbulge1.txt')
data_bulge2 = dp.getXYdata('../../../data/ourbulge2.txt')
data_bulge3 = dp.getXYdata('../../../data/ourbulge3.txt')
data_bulge4 = dp.getXYdata('../../../data/ourbulge4.txt')
data_bulge6 = dp.getXYdata('../../../data/ourbulge6.txt')
data_disk = dp.getXYdata('../../../NGC5533_Bulge/NGC5533/NGC5533-disk_fmt.txt')
data_dm = dp.getXYdata('../../../NGC5533_Bulge/NGC5533/NGC5533-dm_fmt.txt')
#Black hole
#convert to numpy arrays
r_bh = np.asarray(data_bh['xx'])
v_bh = np.asarray(data_bh['yy'])
#smoothing
r_bh = r_bh[r_bh.argsort()]
v_bh = v_bh[r_bh.argsort()]
idx = np.arange(0,np.shape(r_bh)[0])
f_v_bh = inter.InterpolatedUnivariateSpline(r_bh[idx%1==0], v_bh[idx%1==0], k=3)
#Bulge
#convert to numpy arrays
r_b2_7 = np.asarray(data_bulge2_7['xx'])
v_b2_7 = np.asarray(data_bulge2_7['yy'])
#smoothing
r_b2_7 = r_b2_7[r_b2_7.argsort()]
v_b2_7 = v_b2_7[r_b2_7.argsort()]
idx = np.arange(0,np.shape(r_b2_7)[0])
f_v_b2_7 = inter.InterpolatedUnivariateSpline(r_b2_7[idx%4==0], v_b2_7[idx%4==0], k=3)
#convert to numpy arrays
r_b0_5 = np.asarray(data_bulge0_5['xx'])
v_b0_5 = np.asarray(data_bulge0_5['yy'])
#smoothing
r_b0_5 = r_b0_5[r_b0_5.argsort()]
v_b0_5 = v_b0_5[r_b0_5.argsort()]
idx = np.arange(0,np.shape(r_b0_5)[0])
f_v_b0_5 = inter.InterpolatedUnivariateSpline(r_b0_5[idx%4==0], v_b0_5[idx%4==0], k=3)
#convert to numpy arrays
r_b1 = np.asarray(data_bulge1['xx'])
v_b1 = np.asarray(data_bulge1['yy'])
#smoothing
r_b1 = r_b1[r_b1.argsort()]
v_b1 = v_b1[r_b1.argsort()]
idx = np.arange(0,np.shape(r_b1)[0])
f_v_b1 = inter.InterpolatedUnivariateSpline(r_b1[idx%4==0], v_b1[idx%4==0], k=3)
#convert to numpy arrays
r_b2 = np.asarray(data_bulge2['xx'])
v_b2 = np.asarray(data_bulge2['yy'])
#smoothing
r_b2 = r_b2[r_b2.argsort()]
v_b2 = v_b2[r_b2.argsort()]
idx = np.arange(0,np.shape(r_b2)[0])
f_v_b2 = inter.InterpolatedUnivariateSpline(r_b2[idx%4==0], v_b2[idx%4==0], k=3)
#convert to numpy arrays
r_b3 = np.asarray(data_bulge3['xx'])
v_b3 = np.asarray(data_bulge3['yy'])
#smoothing
r_b3 = r_b3[r_b3.argsort()]
v_b3 = v_b3[r_b3.argsort()]
idx = np.arange(0,np.shape(r_b3)[0])
f_v_b3 = inter.InterpolatedUnivariateSpline(r_b3[idx%4==0], v_b3[idx%4==0], k=3)
#convert to numpy arrays
r_b4 = np.asarray(data_bulge4['xx'])
v_b4 = np.asarray(data_bulge4['yy'])
#smoothing
r_b4 = r_b4[r_b4.argsort()]
v_b4 = v_b4[r_b4.argsort()]
idx = np.arange(0,np.shape(r_b4)[0])
f_v_b4 = inter.InterpolatedUnivariateSpline(r_b4[idx%4==0], v_b4[idx%4==0], k=3)
#convert to numpy arrays
r_b6 = np.asarray(data_bulge6['xx'])
v_b6 = np.asarray(data_bulge6['yy'])
#smoothing
r_b6 = r_b6[r_b6.argsort()]
v_b6 = v_b6[r_b6.argsort()]
idx = np.arange(0,np.shape(r_b6)[0])
f_v_b6 = inter.InterpolatedUnivariateSpline(r_b6[idx%4==0], v_b6[idx%4==0], k=3)
#Disk
#convert to numpy arrays
r_d = np.asarray(data_disk['xx'])
v_d = np.asarray(data_disk['yy'])
#smoothing
r_d = r_d[r_d.argsort()]
v_d = v_d[r_d.argsort()]
idx = np.arange(0,np.shape(r_d)[0])
f_v_d = inter.InterpolatedUnivariateSpline(r_d[idx%4==0], v_d[idx%4==0], k=3)
#Dark Matter
#convert to numpy arrays
r_dm = np.asarray(data_dm['xx'])
v_dm = np.asarray(data_dm['yy'])
#smoothing
r_dm = r_dm[r_dm.argsort()]
v_dm = v_dm[r_dm.argsort()]
idx = np.arange(0,np.shape(r_dm)[0])
f_v_dm = inter.InterpolatedUnivariateSpline(r_dm[idx%4==0], v_dm[idx%4==0], k=3)
f_tot2_7 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b2_7(x)**2 + f_v_bh(x)**2)
f_tot0_5 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b0_5(x)**2 + f_v_bh(x)**2)
f_tot1 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b1(x)**2 + f_v_bh(x)**2)
f_tot2 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b2(x)**2 + f_v_bh(x)**2)
f_tot3 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b3(x)**2 + f_v_bh(x)**2)
f_tot4 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b4(x)**2 + f_v_bh(x)**2)
f_tot6 = lambda x: np.sqrt(f_v_dm(x)**2 + f_v_d(x)**2 + f_v_b6(x)**2 + f_v_bh(x)**2)
data = dp.getXYdata_wXYerr('../../../NGC5533_Bulge/NGC5533/NGC5533-rot-data_fmt.txt')
r_dat = np.asarray(data['xx'])
v_dat = np.asarray(data['yy'])
v_err0 = np.asarray(data['ex'])
v_err1 = np.asarray(data['ey'])
import matplotlib as mpl
from matplotlib import pyplot as plt
import pylab
fig = plt.figure(figsize=(9.0,12.0)) #size of the plot
ax3 = plt.axes()
ax3.grid(True)
ax3.yaxis.grid(True,which='minor',linestyle='--')
xmax=19
x = np.arange(0,xmax,0.1)
xb = np.arange(0.5,xmax,0.1)
#functions
plt.errorbar(r_dat,v_dat,yerr=[v_err0,v_err1], marker='o', markersize=8, \
ecolor='gray',color='gray', linestyle='none', label='NGC 5533 (Noordermeer 2007)', linewidth=2)
plt.plot(xb, f_v_b2_7(xb), linestyle='solid', color='r', label='bulge, n = 2.7')
#plt.plot(xb, f_v_b_v0_5(xb), linestyle='solid', color='b', label='bulge, n = 0.5')
#plt.plot(xb, f_v_b_v1(xb), linestyle='solid', color='g', label='bulge, n = 1.0')
#plt.plot(xb, f_v_b_v2(xb), linestyle='solid', color='r', label='bulge, n = 2.0')
#plt.plot(xb, f_v_b_v3(xb), linestyle='solid', color='c', label='bulge, n = 3.0')
#plt.plot(xb, f_v_b_v4(xb), linestyle='solid', color='m', label='bulge, n = 4.0')
#plt.plot(xb, f_v_b_v6(xb), linestyle='solid', color='y', label='bulge, n = 6.0')
plt.plot(x, f_v_bh(x), color='darkviolet', linewidth=2, linestyle=':', label='central black hole')
plt.plot(xb, f_v_d(xb), color='gold', linewidth=2, linestyle='dashdot', label='disk')
plt.plot(xb, f_v_dm(xb), color='c', linewidth=2, linestyle='--', label='dark matter')
plt.plot(xb, f_tot2_7(xb), color='k', linewidth=3, linestyle='solid', label='total velocity, n = 2.7')
plt.plot(xb, f_tot0_5(xb), color='k', linewidth=1, linestyle=':', label='n = 0.5 - 6.0')
plt.plot(xb, f_tot1(xb), color='k', linewidth=1, linestyle=':')
plt.plot(xb, f_tot2(xb), color='k', linewidth=1, linestyle=':')
plt.plot(xb, f_tot3(xb), color='k', linewidth=1, linestyle=':')
plt.plot(xb, f_tot4(xb), color='k', linewidth=1, linestyle=':')
plt.plot(xb, f_tot6(xb), color='k', linewidth=1, linestyle=':')
plt.axis('tight'); #xmin, xmax, ymin, ymax
#or use: plt.xlim(10, 0) plt.ylim(1.2, -1.2);
#or to tighten the bounds: plt.axis('tight')
#or use: plt.axis('tight')
#plt.axis([0, 15000, 0.5e6, 2.5e6])
#labeling plots
ax3.set_ylim(-10,350)
ax3.set_xlim(0,xmax)
ax3.set_title('NGC 5533 Galaxy', fontsize=18) #labeling axes
ax3.set_xlabel('Radius (kpc)', fontsize=14)
ax3.set_ylabel('Rotation Curve (km/s)', fontsize=14)
ax3.xaxis.set_tick_params(labelsize=14)
ax3.yaxis.set_tick_params(labelsize=14);
plt.legend(loc=0,fontsize=14, bbox_to_anchor=(0.5, -0.1)); #adding legend
#plt.savefig('Plot3.eps')
#Removed vectorization - other steps seemed necessary for converting to useable types.
```
| github_jupyter |
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
---
In this notebook, we train an MLP to classify images from the MNIST database.
### 1. Load MNIST Database
```
from keras.datasets import mnist
# use Keras to import pre-shuffled MNIST database
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("The MNIST database has a training set of %d examples." % len(X_train))
print("The MNIST database has a test set of %d examples." % len(X_test))
```
### 2. Visualize the First Six Training Images
```
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
import numpy as np
# plot first six training images
fig = plt.figure(figsize=(20,20))
for i in range(6):
ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])
ax.imshow(X_train[i], cmap='gray')
ax.set_title(str(y_train[i]))
```
### 3. View an Image in More Detail
```
def visualize_input(img, ax):
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
ax.annotate(str(round(img[x][y],2)), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
visualize_input(X_train[0], ax)
```
### 4. Rescale the Images by Dividing Every Pixel in Every Image by 255
```
# rescale [0,255] --> [0,1]
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
```
### 5. Encode Categorical Integer Labels Using a One-Hot Scheme
```
from keras.utils import np_utils
# print first ten (integer-valued) training labels
print('Integer-valued labels:')
print(y_train[:10])
# one-hot encode the labels
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# print first ten (one-hot) training labels
print('One-hot labels:')
print(y_train[:10])
```
### 6. Define the Model Architecture
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# define the model
model = Sequential()
model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# summarize the model
model.summary()
```
### 7. Compile the Model
```
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
```
### 8. Calculate the Classification Accuracy on the Test Set (Before Training)
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
### 9. Train the Model
```
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',
verbose=1, save_best_only=True)
hist = model.fit(X_train, y_train, batch_size=128, epochs=10,
validation_split=0.2, callbacks=[checkpointer],
verbose=1, shuffle=True)
```
### 10. Load the Model with the Best Classification Accuracy on the Validation Set
```
# load the weights that yielded the best validation accuracy
model.load_weights('mnist.model.best.hdf5')
```
### 11. Calculate the Classification Accuracy on the Test Set
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
| github_jupyter |
# Complex Formula 1 circuit
```
%matplotlib inline
import cvxpy as cvx
import dccp
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
```
## Defining the scene
```
a, b, c, d, e, f, g = np.array([0.5, 0.]), np.array([1., 0.5]), np.array([1., 1.5]), np.array([0., 1.5]), np.array([-0.35, 0.65]), np.array([-0.35, 0.35]), np.array([0., -0.5])
p1, p2, p3, p4, p5, p6, p7 = np.array([1., 0.]), np.array([1., 1.]), _, np.array([0., 1.]), np.array([-0.83, 0.5]), np.array([0., 0.]), np.array([0., 0.])
obstacles = []
r1, r2 = 0.55, 0.45
dx = 0.05
n1, n2, n3, n4, n5, n6, n7 = 50, 80, 50, 50, 30, 30, 80
x1 = cvx.Variable((n1+1, 2))
x2 = cvx.Variable((n2+1, 2))
x3 = cvx.Variable((n3+1, 2))
x4 = cvx.Variable((n4+1, 2))
x5 = cvx.Variable((n5+1, 2))
x6 = cvx.Variable((n6+1, 2))
x7 = cvx.Variable((n7+1, 2))
b = cvx.Variable(2)
c = cvx.Variable(2)
d = cvx.Variable(2)
e = cvx.Variable(2)
f = cvx.Variable(2)
g = cvx.Variable(2)
constr = [x1[0] == a, x1[n1] == b, x2[0] == b, x2[n2] == c, x3[0] == c, x3[n3] == d, x4[0] == d, x4[n4] == e, x5[0] == e, x5[n5] == f, x6[0] == f, x6[n6] == g, x7[0] == g, x7[n7] == a]
constr.extend([b[0] == 1., c[0] == 1., d[0] == 0., e[1] == 0.65, f[1] == 0.35, g[1] == 0.])
v1 = x1[1:] - x1[:-1]
v2 = x2[1:] - x2[:-1]
v3 = x3[1:] - x3[:-1]
v4 = x4[1:] - x4[:-1]
v5 = x5[1:] - x5[:-1]
v6 = x6[1:] - x6[:-1]
v7 = x7[1:] - x7[:-1]
for i in range(1, n1+1):
constr.append(cvx.norm(v1[i-1]) <= dx)
constr.append(cvx.norm(x1[i] - p1) <= r1)
constr.append(cvx.norm(x1[i] - p1) >= r2)
constr.append(x2[:, 0] >= 1)
for i in range(1, n2+1):
constr.append(cvx.norm(v2[i-1]) <= dx)
constr.append(cvx.norm(x2[i] - p2) >= (1-r1))
constr.append(cvx.norm(x2[i] - p2) <= (1-r2))
for i in range(1, n3+1):
constr.append(cvx.norm(v3[i-1]) <= dx)
constr.append(x3[i,1] >= (2-r1))
constr.append(x3[i,1] <= (2-r2))
for i in range(1, n4+1):
constr.append(cvx.norm(v4[i-1]) <= dx)
constr.append(cvx.norm(x4[i] - p4) >= (1-r1))
constr.append(cvx.norm(x4[i] - p4) <= (1-r2))
for i in range(1, n5+1):
constr.append(cvx.norm(v5[i-1]) <= dx)
constr.append(cvx.norm(x5[i] - p5) <= r1)
constr.append(cvx.norm(x5[i] - p5) >= r2)
for i in range(1, n6+1):
constr.append(cvx.norm(v6[i-1]) <= dx)
constr.append(cvx.norm(x6[i] - p6) >= (1-r1))
constr.append(cvx.norm(x6[i] - p6) <= (1-r2))
constr.append(x7[:, 1] <= 0.)
for i in range(1, n7+1):
constr.append(cvx.norm(v7[i-1]) <= dx)
constr.append(cvx.norm(x7[i] - p7) >= (1-r1))
constr.append(cvx.norm(x7[i] - p7) <= (1-r2))
total_v = cvx.norm(v1, "fro") + cvx.norm(v2, "fro") + cvx.norm(v3, "fro") + cvx.norm(v4, "fro") + cvx.norm(v5, "fro") + cvx.norm(v6, "fro") + cvx.norm(v7, "fro")
prob = cvx.Problem(cvx.Minimize(total_v), constr)
prob.is_dcp()
prob.solve(method="dccp");
traj = np.r_[x1.value, x2.value, x3.value, x4.value, x5.value, x6.value, x7.value]
traj
r_center = (r1 + r2) / 2
theta1 = np.linspace(np.pi/2, np.pi, 500)
theta2 = np.linspace(-np.pi/2, np.pi/2, 1000)
x3 = np.linspace(1., 0., 1000)
theta4 = np.linspace(np.pi/2, 5/4*np.pi, 750)
theta5 = np.linspace(-np.pi/9, np.pi/9, 500)
theta6 = np.linspace(-5/4*np.pi, 0., 1000)
road_top_x, road_top_y = 1 + r1 * np.cos(theta1), r1 * np.sin(theta1)
road_center_x, road_center_y = 1 + r_center * np.cos(theta1), r_center * np.sin(theta1)
road_bottom_x, road_bottom_y = 1 + r2 * np.cos(theta1), r2 * np.sin(theta1)
road2_top_x, road2_top_y = 1 + (1-r1) * np.cos(theta2), 1 + (1-r1) * np.sin(theta2)
road2_center_x, road2_center_y = 1 + (1-r_center) * np.cos(theta2), 1 + (1-r_center) * np.sin(theta2)
road2_bottom_x, road2_bottom_y = 1 + (1-r2) * np.cos(theta2),1 + (1-r2) * np.sin(theta2)
road3_top_y = np.ones(1000) * (2 - r1)
road3_center_y = np.ones(1000) * (2 - r_center)
road3_bottom_y = np.ones(1000) * (2 - r2)
road4_top_x, road4_top_y = 0. + (1-r1) * np.cos(theta4), 1 + (1-r1) * np.sin(theta4)
road4_center_x, road4_center_y = 0. + (1-r_center) * np.cos(theta4), 1 + (1-r_center) * np.sin(theta4)
road4_bottom_x, road4_bottom_y = 0. + (1-r2) * np.cos(theta4), 1 + (1-r2) * np.sin(theta4)
road5_top_x, road5_top_y = -0.83 + r1 * np.cos(theta5), 0.5 + r1 * np.sin(theta5)
road5_center_x, road5_center_y = -0.83 + r_center * np.cos(theta5), 0.5 + r_center * np.sin(theta5)
road5_bottom_x, road5_bottom_y = -0.83 + r2 * np.cos(theta5), 0.5 + r2 * np.sin(theta5)
road6_top_x, road6_top_y = (1-r1) * np.cos(theta6), (1-r1) * np.sin(theta6)
road6_center_x, road6_center_y = (1-r_center) * np.cos(theta6), (1-r_center) * np.sin(theta6)
road6_bottom_x, road6_bottom_y = (1-r2) * np.cos(theta6), (1-r2) * np.sin(theta6)
plt.plot(road_center_x, road_center_y, "grey", linewidth=7);
plt.plot(road_top_x, road_top_y, "r", linewidth=3.5);
plt.plot(road_top_x, road_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road_bottom_x, road_bottom_y, "r", linewidth=3.5);
plt.plot(road_bottom_x, road_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road2_center_x, road2_center_y, "grey", linewidth=7);
plt.plot(road2_top_x, road2_top_y, "r", linewidth=3.5);
plt.plot(road2_top_x, road2_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road2_bottom_x, road2_bottom_y, "r", linewidth=3.5);
plt.plot(road2_bottom_x, road2_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(x3, road3_center_y, "grey", linewidth=7);
plt.plot(x3, road3_top_y, "r", linewidth=3.5);
plt.plot(x3, road3_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(x3, road3_bottom_y, "r", linewidth=3.5);
plt.plot(x3, road3_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road4_center_x, road4_center_y, "grey", linewidth=7);
plt.plot(road4_top_x, road4_top_y, "r", linewidth=3.5);
plt.plot(road4_top_x, road4_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road4_bottom_x, road4_bottom_y, "r", linewidth=3.5);
plt.plot(road4_bottom_x, road4_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road5_center_x, road5_center_y, "grey", linewidth=7);
plt.plot(road5_top_x, road5_top_y, "r", linewidth=3.5);
plt.plot(road5_top_x, road5_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road5_bottom_x, road5_bottom_y, "r", linewidth=3.5);
plt.plot(road5_bottom_x, road5_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road6_center_x, road6_top_y, "grey", linewidth=7);
plt.plot(road6_top_x, road6_top_y, "r", linewidth=3.5);
plt.plot(road6_top_x, road6_top_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
plt.plot(road6_bottom_x, road6_bottom_y, "r", linewidth=3.5);
plt.plot(road6_bottom_x, road6_bottom_y, "w--", linewidth=3.5, dashes=(1.5, 1.5));
for o, r in obstacles:
circle = plt.Circle(o, r, color='orange')
plt.gca().add_artist(circle)
plt.scatter(traj[:, 0], traj[:, 1], color="b", s=2, zorder=10)
plt.gca().set_facecolor((0.43, 0.98, 0.4))
plt.grid(True, color=(0.42, 0.87, 0.39), linewidth=20)
plt.axis('scaled')
plt.xlim(-1, 1.75);
plt.ylim(-0.75, 1.75);
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
plt.gca().tick_params(tick1On=False)
```
## Plotting the circuit
```
a, b, c, d, e, f, g = np.array([0.5, 0.]), np.array([1., 0.5]), np.array([1., 1.5]), np.array([0., 1.5]), np.array([-0.35, 0.65]), np.array([-0.35, 0.35]), np.array([0., -0.5])
p1, p2, p3, p4, p5, p6, p7 = np.array([1., 0.]), np.array([1., 1.]), _, np.array([0., 1.]), np.array([-0.83, 0.5]), np.array([0., 0.]), np.array([0., 0.])
obstacles = []
r1, r2 = 0.55, 0.45
dx = 0.05
n1, n2, n3, n4, n5, n6, n7 = 50, 80, 50, 50, 30, 30, 80
r_center = (r1 + r2) / 2
theta1 = np.linspace(np.pi/2, np.pi, 500)
theta2 = np.linspace(-np.pi/2, np.pi/2, 1000)
x3 = np.linspace(1., 0., 1000)
theta4 = np.linspace(np.pi/2, 5/4*np.pi, 750)
theta5 = np.linspace(-np.pi/9, np.pi/9, 500)
theta6 = np.linspace(-5/4*np.pi, 0., 1000)
road_top_x, road_top_y = 1 + r1 * np.cos(theta1), r1 * np.sin(theta1)
road_center_x, road_center_y = 1 + r_center * np.cos(theta1), r_center * np.sin(theta1)
road_bottom_x, road_bottom_y = 1 + r2 * np.cos(theta1), r2 * np.sin(theta1)
road2_top_x, road2_top_y = 1 + (1-r1) * np.cos(theta2), 1 + (1-r1) * np.sin(theta2)
road2_center_x, road2_center_y = 1 + (1-r_center) * np.cos(theta2), 1 + (1-r_center) * np.sin(theta2)
road2_bottom_x, road2_bottom_y = 1 + (1-r2) * np.cos(theta2),1 + (1-r2) * np.sin(theta2)
road3_top_y = np.ones(1000) * (2 - r1)
road3_center_y = np.ones(1000) * (2 - r_center)
road3_bottom_y = np.ones(1000) * (2 - r2)
road4_top_x, road4_top_y = 0. + (1-r1) * np.cos(theta4), 1 + (1-r1) * np.sin(theta4)
road4_center_x, road4_center_y = 0. + (1-r_center) * np.cos(theta4), 1 + (1-r_center) * np.sin(theta4)
road4_bottom_x, road4_bottom_y = 0. + (1-r2) * np.cos(theta4), 1 + (1-r2) * np.sin(theta4)
road5_top_x, road5_top_y = -0.83 + r1 * np.cos(theta5), 0.5 + r1 * np.sin(theta5)
road5_center_x, road5_center_y = -0.83 + r_center * np.cos(theta5), 0.5 + r_center * np.sin(theta5)
road5_bottom_x, road5_bottom_y = -0.83 + r2 * np.cos(theta5), 0.5 + r2 * np.sin(theta5)
road6_top_x, road6_top_y = (1-r1) * np.cos(theta6), (1-r1) * np.sin(theta6)
road6_center_x, road6_center_y = (1-r_center) * np.cos(theta6), (1-r_center) * np.sin(theta6)
road6_bottom_x, road6_bottom_y = (1-r2) * np.cos(theta6), (1-r2) * np.sin(theta6)
plt.plot(road_top_x, road_top_y, "orange", linewidth=2);
plt.plot(road_bottom_x, road_bottom_y, "orange", linewidth=2);
plt.plot(road2_top_x, road2_top_y, "tomato", linewidth=2);
plt.plot(road2_bottom_x, road2_bottom_y, "tomato", linewidth=2);
plt.plot(x3, road3_top_y, "orange", linewidth=2);
plt.plot(x3, road3_bottom_y, "orange", linewidth=2);
plt.plot(road4_top_x, road4_top_y, "tomato", linewidth=2);
plt.plot(road4_bottom_x, road4_bottom_y, "tomato", linewidth=2);
plt.plot(road5_top_x, road5_top_y, "orange", linewidth=2);
plt.plot(road5_bottom_x, road5_bottom_y, "orange", linewidth=2);
plt.plot(road6_top_x, road6_top_y, "tomato", linewidth=2);
plt.plot(road6_bottom_x, road6_bottom_y, "tomato", linewidth=2);
delta = 0.05
plt.scatter(a[0], a[1], s=25, color="blue", marker="x")
plt.text(a[0]-2*delta, a[1]+delta, "A", fontsize=14)
plt.scatter(b[0], b[1], s=25, color="blue", marker="x")
plt.text(b[0]-2*delta, b[1]+delta, "B", fontsize=14)
plt.scatter(c[0], c[1], s=25, color="blue", marker="x")
plt.text(c[0]-2*delta, c[1]+delta, "C", fontsize=14)
plt.scatter(d[0], d[1], s=25, color="blue", marker="x")
plt.text(d[0]-2*delta, d[1]+delta, "D", fontsize=14)
plt.scatter(e[0], e[1], s=25, color="blue", marker="x")
plt.text(e[0]-2*delta, e[1]+delta, "E", fontsize=14)
plt.scatter(f[0], f[1], s=25, color="blue", marker="x")
plt.text(f[0]-2*delta, f[1]+delta, "F", fontsize=14)
plt.scatter(p1[0], p1[1], s=30, color="orange")
plt.text(p1[0]-2*delta, p1[1]+delta, "P1", fontsize=14)
plt.scatter(p2[0], p2[1], s=30, color="tomato")
plt.text(p2[0]-2*delta, p2[1]+delta, "P2", fontsize=14)
plt.scatter(p4[0], p4[1], s=30, color="tomato")
plt.text(p4[0]-2*delta, p4[1]+delta, "P4", fontsize=14)
plt.scatter(p5[0], p5[1], s=30, color="orange")
plt.text(p5[0]-2*delta, p5[1]+delta, "P5", fontsize=14)
plt.scatter(p6[0], p6[1], s=30, color="tomato")
plt.text(p6[0]-2*delta, p6[1]+delta, "P6", fontsize=14)
plt.grid(True)
plt.axis('scaled')
plt.xlim(-1, 1.75);
plt.ylim(-0.75, 1.75);
```
| github_jupyter |
## Balanced Accuracy
Average recall across classes
```
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
recall_score,
)
```
## Load data
```
# load data
data = pd.read_csv('../kdd2004.csv')
# remap target class to 0 and 1
data['target'] = data['target'].map({-1:0, 1:1})
data.head()
# data size
data.shape
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
```
## Baseline: predict the majority class
```
# Baseline prediction: predict the majority class
y_train_base = pd.Series(np.zeros(len(y_train)))
y_test_base = pd.Series(np.zeros(len(y_test)))
```
## Train ML models
### Random Forests
```
rf = RandomForestClassifier(n_estimators=100, random_state=39, max_depth=2, n_jobs=4)
rf.fit(X_train, y_train)
y_train_rf = rf.predict_proba(X_train)[:,1]
y_test_rf = rf.predict_proba(X_test)[:,1]
```
### Logistic Regression
```
logit = LogisticRegression(random_state=0, max_iter=1000)
logit.fit(X_train, y_train)
y_train_logit = logit.predict_proba(X_train)[:,1]
y_test_logit = logit.predict_proba(X_test)[:,1]
```
## Accuracy
```
print('Accuracy Baseline test: ', accuracy_score(y_test, y_test_base))
print('Accuracy Random Forest test:', accuracy_score(y_test, rf.predict(X_test)))
print('Accuracy Logistic Regression test:', accuracy_score(y_test, logit.predict(X_test)))
```
Judging by the accuracy, it seems that the machine learning models add only a tiny bit of performance compared to the baseline.
## Balanced accuracy
- [balanced_accuracy_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html)
```
print('Balanced accuracy, Baseline test: ', balanced_accuracy_score(y_test, y_test_base))
print('Balanced accuracy, Random Forest test:', balanced_accuracy_score(y_test, rf.predict(X_test)))
print('Balanced accuracy, Regression test:', balanced_accuracy_score(y_test, logit.predict(X_test)))
```
## Recall per class
```
# with the below parameters in the recall_score function we can obtain the recall
# in each class:
print('Recall, class 0 and 1: ', recall_score(
y_test, y_test_base, labels=[0,1], average=None))
print('Recall, class 0 and 1:', recall_score(
y_test, rf.predict(X_test), labels=[0,1], average=None))
print('Recall, class 0 and 1:', recall_score(
y_test, logit.predict(X_test), labels=[0,1], average=None))
```
| github_jupyter |
# Kernel SHAP explanation for multinomial logistic regression models
## Introduction
In a previous [example](kernel_shap_wine_intro.ipynb), we showed how the KernelSHAP algorithm can be aplied to explain the output of an arbitrary classification model so long the model outputs probabilities or operates in margin space. We also showcased the powerful visualisations in the `shap` library that can be used for model investigation. In this example we focus on understanding, in a simple setting, how conclusions drawn from the analysis of the KernelShap output relate to conclusions drawn from interpreting the model directly. To make this possible, we fit a logistic regression model on the Wine dataset.
```
import shap
shap.initjs()
import matplotlib.pyplot as plt
import numpy as np
from alibi.explainers import KernelShap
from scipy.special import logit
from sklearn.datasets import load_wine
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
```
## Data preparation: load and split Wine dataset
```
wine = load_wine()
wine.keys()
data = wine.data
target = wine.target
target_names = wine.target_names
feature_names = wine.feature_names
```
Split data into testing and training sets and normalize it.
```
X_train, X_test, y_train, y_test = train_test_split(data,
target,
test_size=0.2,
random_state=0,
)
print("Training records: {}".format(X_train.shape[0]))
print("Testing records: {}".format(X_test.shape[0]))
scaler = StandardScaler().fit(X_train)
X_train_norm = scaler.transform(X_train)
X_test_norm = scaler.transform(X_test)
```
## Fitting a multinomial logistic regression classifier to the Wine dataset
### Training
```
classifier = LogisticRegression(multi_class='multinomial',
random_state=0,
)
classifier.fit(X_train_norm, y_train)
```
### Model assessment
```
y_pred = classifier.predict(X_test_norm)
cm = confusion_matrix(y_test, y_pred)
title = 'Confusion matrix for the logistic regression classifier'
disp = plot_confusion_matrix(classifier,
X_test_norm,
y_test,
display_labels=target_names,
cmap=plt.cm.Blues,
normalize=None,
)
disp.ax_.set_title(title)
```
## Interpreting the logistic regression model
<a id='src_2'></a>
One way to arrive at the multinomial logistic regression model is to consider modelling a categorical response
variable $y \sim \text{Cat} (y| \beta x)$ where $\beta$ is $K \times D$ matrix of distribution parameters with $K$ being the number of classes and $D$ the feature dimensionality. Because the probability of outcome $k$ being observed given $x$, $p_{k} = p(y=k|x, \mathbf{\beta})$, is bounded by $[0, 1]$, the logistic regression assumes that a linear relationship exists between the *logit* transformation of the output and the input. This can be formalised as follows:
\begin{equation}
\log \left( \frac{p_{k}}{1 - p_{k}} \right) = \beta_{0,k} + \beta_{1,k} x_{1} + \beta_{2,k} x_{2} + \cdots + \beta_{D,k} x_{D} = \mathbf{\beta}_k \cdot x
\end{equation}
The RHS is a function of the expected value of the categorical distribution (sometimes referred to as a _link function_ in the literature). The coefficients $\beta$ of the linear relations used to fit the logit transformation are estimated jointly given a set of training examples $\mathcal{D}= \{(x_i, y_i)\}_{i=1}^N$.
For each class, the vector of coefficients $\mathbb{\beta}_k$ can be used to interpret the model *globally*; in the absence of interaction terms, the coefficient of a predictor (i.e., independent variable) represents the *change in log odds* when the predictor changes by one unit while all other variables are kept at fixed values. Equivalently, the exponentiated coefficient is equivalent to a change in odds. Since the transformation from odds to outcome probabilities is monotonic, a change in odds also implies a change in the outcome probability in the same direction. Thus, the magnitudes of the feature coefficients measure the effect of a predictor on the output and thus one can globally interpret the logistic regression model.
However, the log odds ratios and odds ratios are known to be sensitive to *unobserved heterogenity*, that is, omission of a variable with good explanatory power from a logistic regression model *assumed true*. While we will not be concerned directly with this issue and refer the interested reader to [[2]](#References), we will be using the *estimated percentage unit effect* (or the *marginal effect*)
$$
\beta_{j,k} \times p_{i,k}(1 - p_{i, k})
$$
as a means of estimating the effect of a predictor $j$ on individual $i$ ($x_{i, j})$ with respect to predicting the $k^{th}$ class and thus *locally* interpret the model. The average marginal effect is more robust measure of effects in situations where effects are compared across different groups or models. Consider a logistic model where an independent variable $x_1$ is used to predict an outcome and a logistic model where $x_2$, known to be uncorrelated with $x_1$, is also included. Since the two models assign different probabilities to the different outcomes and since the distribution of the outcome across values of $x_1$ should be the same across the two models (due to the independence assumption), we expected the second model will scale the coeffcient of $\beta_1$. Hence, the log-odds and odds ratios are not robust to unobserved heterogeneity so directly comparing the two across models or groups can be misleading. As discussed in [[2]](#References), the marginal effect is generally robust to the effect.
The average marginal effect (AME) of a predictor
$$
\frac{1}{n} \sum_{i=1}^{n}\beta_{j,k} \times p_{i,k} (1 - p_{i,k})
$$
is equivalent to simply using $\beta_{j,k}$ to *globally* explain the model.
```
def issorted(arr, reverse=False):
"""
Checks if a numpy array is sorted.
"""
if reverse:
return np.all(arr[::-1][:-1] <=arr[::-1][1:])
return np.all(arr[:-1] <= arr[1:])
def get_importance(class_idx, beta, feature_names, intercepts=None):
"""
Retrive and sort abs magnitude of coefficients from model.
"""
# sort the absolute value of model coef from largest to smallest
srt_beta_k = np.argsort(np.abs(beta[class_idx, :]))[::-1]
feat_names = [feature_names[idx] for idx in srt_beta_k]
feat_imp = beta[class_idx, srt_beta_k]
# include bias among feat importances
if intercepts is not None:
intercept = intercepts[class_idx]
bias_idx = len(feat_imp) - (np.searchsorted(np.abs(feat_imp)[::-1], np.abs(intercept)))
# bias_idx = np.searchsorted(np.abs(feat_imp)[::-1], np.abs(intercept)) + 1
feat_imp = np.insert(feat_imp, bias_idx, intercept.item(), )
intercept_idx = np.where(feat_imp == intercept)[0][0]
feat_names.insert(intercept_idx, 'bias')
return feat_imp, feat_names
def plot_importance(feat_imp, feat_names, **kwargs):
"""
Create a horizontal barchart of feature effects, sorted by their magnitude.
"""
left_x, right_x = kwargs.get("left_x"), kwargs.get("right_x")
eps_factor = kwargs.get("eps_factor", 4.5)
xlabel = kwargs.get("xlabel", None)
ylabel = kwargs.get("ylabel", None)
labels_fontsize = kwargs.get("labels_fontsize", 15)
tick_labels_fontsize = kwargs.get("tick_labels_fontsize", 15)
# plot
fig, ax = plt.subplots(figsize=(10, 5))
y_pos = np.arange(len(feat_imp))
ax.barh(y_pos, feat_imp)
# set lables
ax.set_yticks(y_pos)
ax.set_yticklabels(feat_names, fontsize=tick_labels_fontsize)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel(xlabel, fontsize=labels_fontsize)
ax.set_ylabel(ylabel, fontsize=labels_fontsize)
ax.set_xlim(left=left_x, right=right_x)
# add text
for i, v in enumerate(feat_imp):
eps = 0.03
if v < 0:
eps = -eps_factor*eps
ax.text(v + eps, i + .25, str(round(v, 3)))
return ax, fig
```
We now retrieve the estimated coefficients, and plot them sorted by their maginitude.
```
beta = classifier.coef_
intercepts = classifier.intercept_
all_coefs = np.concatenate((beta, intercepts[:, None]), axis=1)
class_idx = 0
feat_imp, feat_names = get_importance(class_idx,
beta,
feature_names,
)
_, class_0_fig = plot_importance(feat_imp,
feat_names,
left_x=-1.,
right_x=1.25,
xlabel = f"Feature effects (class {class_idx})",
ylabel = "Features"
)
```
Note that these effects are with respect to the model bias (displayed below).
```
classifier.intercept_
```
This plot shows that features such as `proline`, `flavanoids`, `od280/od315_of_diluted_wines`, `alcohol` increase the odds of *any* sample being classified as `class_0` whereas the `alcalinity_of_ash` decreases them.
```
feat_imp, feat_names = get_importance(1, # class_idx
beta,
feature_names,
)
```
The plot below shows that, however, `alcalinity_of_ash` increases the odds of a wine being in `class_1`. Predictors such as `proline`, `alcohol` or `ash`, which increase the odds of predicting a wine as a member of `class_0`, decrease the odds of predicting it as a member of `class_1`.
```
_, class_1_fig = plot_importance(feat_imp,
feat_names,
left_x=-1.5,
right_x=1,
eps_factor = 5, # controls text distance from end of bar for negative examples
xlabel = "Feature effects (class {})".format(1),
ylabel = "Features"
)
feat_imp, feat_names = get_importance(2, # class_idx
beta,
feature_names,
)
```
Finally, for `class_2`, the `color_intensity`, `ash` are the features that increase the `class_2` odds.
```
_, class_2_fig = plot_importance(feat_imp,
feat_names,
left_x=-1.25,
right_x=1,
xlabel = "Feature effects (class {})".format(2),
ylabel = "Features"
# eps_factor = 5.
)
```
## Apply KernelSHAP to explain the model
<a id='src_1'></a>
Note that the *local accuracy* property of SHAP (eq. (5) in [[1]](#References)) requires
$$
f(x) = g(x') = \phi_0 + \sum_{j=1}^D \phi_j x_j'.
$$
Hence, sum of the feature importances, $\phi_j$, should be equal to the model output, $f(x)$. By passing `link='logit'` to the explainer, we ensure that $\phi_0$, the *base value* (see _**Local explanation**_ section [here](kernel_shap_wine_intro.ipynb)) will be calculated in the correct units. Note that here $x' \in \mathbb{R}^D$ represents a *simplified input* for which the shap value is computed. A simple example of a simplified input in the image domain, justified by the dimensionality of the input space, is a *superpixel mask*: we formulate the task of explaining the outcome of an image prediction task as determining the effects of each superpixel in a segmenented image upon the outcome. The interested reader is referred to [[1]](#References) for more details about simplified inputs.
```
pred_fcn = classifier.predict_proba
lr_explainer = KernelShap(pred_fcn, link='logit')
lr_explainer.fit(X_train_norm)
# passing the logit link function to the explainer ensures the units are consistent ...
mean_scores_train = logit(pred_fcn(X_train_norm).mean(axis=0))
print(mean_scores_train - lr_explainer.expected_value)
lr_explanation = lr_explainer.explain(X_test_norm, l1_reg=False)
```
Because the dimensionality of the feature space is relatively small, we opted not to regularise the regression that computes the Shapley values. For more information about the regularisation options available for higher dimensional data see the introductory example [here](kernel_shap_wine_intro.ipynb).
### Locally explaining multi-output models with KernelShap
### Explaining the logitstic regression model globally with KernelSHAP
#### Summary plots
To visualise the impact of the features on the decision scores associated with class `class_idx`, we can use a summary plot. In this plot, the features are sorted by the sum of their SHAP values magnitudes across all instances in `X_test_norm`. Therefore, the features with the highest impact on the decision score for class `class_idx` are displayed at the top of the plot.
```
shap.summary_plot(lr_explanation.shap_values[class_idx], X_test_norm, feature_names)
```
Because the logistic regression model uses a linear predictor function, the exact shap values for each class $k$ can be computed exactly according to ([[1]](#References))
$$
\phi_{i,j}(f, x_i) = \beta_{j,k}(x_{i,j} - \mathbb{E}_{\mathcal{D}}[x_{j}]).
$$
Here we introduced an additional index $i$ to emphasize that we compute a shap value for *each predictor* and *each instance* in a set to be explained.This allows us to check the accuracy of the SHAP estimate. Note that we have already applied the normalisation so the expectation is not subtracted below.
```
exact_shap = beta[:, None, :]*X_test_norm
feat_name = 'alcohol'
feat_idx = feature_names.index(feat_name)
x = np.linspace(-3, 4, 1000)
plt.scatter(exact_shap[class_idx,...][:, feat_idx], lr_explanation.shap_values[class_idx][:, feat_idx])
plt.plot(x, x, linestyle='dashed', color='red')
plt.xlabel(r'Exact $\phi_j$', fontsize=18)
plt.ylabel(r'Estimated $\phi_j$', fontsize=18)
plt.title(fr"Comparison of estimated and exact shap values for feature '{feat_name}'")
plt.grid(True)
```
The plot below shows that the exact shap values and the estimate values give rise to similar ranking of the features, and only the order of the `flavanoids` and `alcohol`features is swapped.
```
shap.summary_plot(exact_shap[class_idx, ...], X_test_norm, feature_names)
```
An simlar plot can be create for the logistic regression model by plotting the marginal effects. Note that the plot labelling cannot be changed, so the x axis is incorrectly labeled as `SHAP value` below.
```
p = classifier.predict_proba(X_test_norm)
prb = p * (1. - p)
marg_effects = all_coefs[:, None, :] * prb.T[..., None]
assert (all_coefs[0, 0] * prb[:, 0] - marg_effects[0, :, 0]).sum() == 0.0
avg_marg_effects = np.mean(marg_effects, axis=1) # nb: ranking of the feature coefs should be preserved
mask = np.ones_like(X_test_norm) # the effect (postive vs negative) on the output depend on the sign of the input
mask[X_test_norm < 0] = -1
shap.summary_plot(marg_effects[class_idx, :, :-1]*mask, X_test_norm, feature_names) # exclude bias
```
As expected, the ranking of the marginal effects is the same as that provided the ranking the raw coefficients (see below). However, this effect measure allows us to assess the effects at instance level. Note that both the approximate computation and the exact method yield the same group of features as the most important, although their rankings are not identical. It is important to note that the exact effects ranking and absolute values is a function of the entire data (due to the dependence of the model coefficients) whereas the approximate computation is *local*: the explanation model is fitted locally around each instance. We also notice that the approximate and exact shap value computation both identify the same relationship between the feature value and the effect on the evidence of a sample belonging to `class_idx`.
```
class_0_fig
```
Looking at the 6 most important features for this classification in `class_0`, we see that both the `KernelSHAP` method and the logistic regression rank the `proline` feature as the one with the most significant effect. While the order of the subsequent 5 features is permuted, the effects of these features are also very similar so, in effect, similar conclusions would be drawn from analysing either output.
### References
<a id='References'></a>
[[1]](#src_1) Lundberg, S.M. and Lee, S.I., 2017. A unified approach to interpreting model predictions. In Advances in neural information processing systems (pp. 4765-4774).
[[2]](#src_2) Mood, C., 2017. "Logistic regression: Uncovering unobserved heterogeneity."
| github_jupyter |
# Building a Search Engine from scratch
We want to build our own search engine from scratch by following this steps:
1. Build our own datasets retrieving almost 20000 html pages referring to a list of ranked animes of all time.
2. Preprocessing of the text
3. Building the search engine to compute synopsis related queries
4. Imporving the search engine computing the cosine similarity
## 0. Useful imports
```
import functions as f #import of all the functions we made for this homework.
from tqdm import tqdm
from bs4 import BeautifulSoup
import requests
import time
import nltk
import csv
from shutil import copyfile
import numpy as np
import pandas as pd
nltk.download('stopwords')
nltk.download('punkt')
```
## 1. Data collection
First we have to retrieve the urls with the BeautifulSoup this is an easy task. We give you the code but we'll provide ypu already with the processed pages it's not mandatory to run it.
```
anime = []
for page in tqdm(range(0, 383)):
url = 'https://myanimelist.net/topanime.php?limit=' + str(page * 50)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
for tag in soup.find_all('tr'):
links = tag.find_all('a')
for link in links:
if type(link.get('id')) == str and len(link.contents[0]) > 1:
anime.append(link.get('href'))
```
## Crawler
In this part we'll download the html pages corresponding to the urls. It can be helpful to use a time sleep to avoid being blocked for too much requests. We stored the anime's html in folders dividing them by the correspondent page.
Once we get all the urls in the first 400 pages of the list, we:
1. Download the html corresponding to each of the collected urls.
2. After we collect a single page, immediately save its html in a file. In this way, if your program stops, for any reason, we will not lose the data collected up to the stopping point.
3. Organize the entire set of downloaded html pages into folders. Each folder will contain the htmls of the animes in page 1, page 2, ... of the list of animes.
```
!mkdir "./page_folders/"
import os
for page in tqdm(range(1, 384)):
folder = "page"+str(page)
path = "./page_folders/"+folder
os.mkdir(path)
for page in tqdm(range(0, 383)):
folder = "./page_folders/page"+str(page+1)
update_page = 50*page
for i in range(0,50): # 1 -> 50
url = f'{anime[update_page+i]}'
response = requests.get(url)
filename = r""+folder+"/anime_"+str(update_page+i+1)+".html"
with open(filename,'w', encoding='utf-8') as f:
f.write(response.text)
time.sleep(3)
```
## Parser
Once we have the html we want to collect some informations about the pages. We implemented a parsing function to collect for each anime the following informations:
1. Anime Name (to save as animeTitle): String
2. Anime Type (to save as animeType): String
3. Number of episode (to save as animeNumEpisode): Integer
4. Release and End Dates of anime (to save as releaseDate and endDate): convert both release and end date into datetime format.
5. Number of members (to save as animeNumMembers): Integer
6. Score (to save as animeScore): Float
7. Users (to save as animeUsers): Integer
8. Rank (to save as animeRank): Integer
9. Popularity (to save as animePopularity): Integer
10. Synopsis (to save as animeDescription): String
11. Related Anime (to save as animeRelated): Extract all the related animes, but only keep unique values and those that have a hyperlink associated to them. List of strings.
12. Characters (to save as animeCharacters): List of strings.
13. Voices (to save as animeVoices): List of strings
14. Staff (to save as animeStaff): Include the staff name and their responsibility/task in a list of lists.
For each anime, we create an ***anime_i.tsv*** file of this structure:
- animeTitle \t animeType \t ... \t animeStaff
If an information is missing, we just leave it as an empty string. Example:
- animeTitle \t animeType \t ... \t animeStaff
- Fullmetal Alchemist: Brotherhood \t TV \t ... \t [['Cook, Justin', 'Producer'], ['Irie, Yasuhiro', 'Director, Episode Director, Storyboard'], ['Yonai, Noritomo', 'Producer'], ['Mima, Masafumi', 'Sound Director']]
We create a folder to store all the tsv's
```
!mkdir "./tsv_files/"
import os
for page in tqdm(range(1, 384)):
folder = "page"+str(page)
path = "./tsv_files/"+folder
os.mkdir(path)
```
Then we parse the html and convert them into tsv's
```
for j in tqdm(range(0,383)):
tqdm.write(f' page{j+1}')
update_page = 50*j
for i in range(0,50):
f.html_parsing(f'tsv_files/page{j+1}/anime_{update_page+i+1}.tsv',
f'./page_folders/page{j+1}/anime_{update_page+i+1}.html')
with open('./tsv_files/header.tsv', 'wt') as out_file:
anime_tsv = csv.writer(out_file, delimiter='\t')
anime_tsv.writerow(['animeTitle', 'animeType', 'animeNumEpisode', 'animeRelDate', 'animeEndDate',
'animeMembers', 'animeScore', 'animeUsers', 'animeRank', 'animePopularity', 'animeSynopsis',
'animeRelated', 'animeCharacters', 'animeVoices', 'animeStaff'])
```
Once we collected all the tsv's file we have to clean them (replacing empty lists w/ empty strings)
```
for j in tqdm(range(0,383)):
tqdm.write(f' page{j+1}')
update_page = 50*j
for i in range(0,50):
f = open(f'./tsv_files/page{j+1}/anime_{update_page+i+1}.tsv', mode = 'r', encoding='utf-8')
txt = f.read().strip()
f.close()
f = open(f'./tsv_files/page{j+1}/anime_{update_page+i+1}.tsv', mode = 'w', encoding='utf-8')
f.write(txt.replace('\\', ''))
f.close()
```
Last step is to merge all the tsv in a single file called _tsv_merged_
```
for j in tqdm(range(0,383)):
tqdm.write(f' page{j+1}')
update_page = 50*j
for i in range(0,50):
copyfile(f'./tsv_files/page{j+1}/anime_{update_page+i+1}.tsv',
f'./tsv_merged/anime_{update_page+i+1}.tsv')
with open(f"./tsv_files/header.tsv", encoding='utf-8') as f:
header = f.read().strip().split('\t')
f.close()
```
## 2. Building the Search Engine
The fundametal steps to build a search engine for documents are:
1. Pre-processing of the documents ending up with a dictionary of the document id as key and the list of tokens of the document as values.
2. Building the inverted index such thta for each token we have all the documents in which is contained. In the second part we'll consider as value a tuple of document and tfidf of the token for that document.
3. Implementing a function thta given a query retrieves the related documents.
The first step is to build a dictionary of dictionaries, for every anime we'll have a dictionary containing all the informations retrieved earlier. The query will be focused only on the synopsys for now.
```
D = {} # this dictionary will store all the dictionaries of the single animes
for i in tqdm(range(0,19126)):
d = f.Dict(f"/content/drive/MyDrive/ADM-HW3/tsv_merged/anime_{i+1}.tsv")
D[f"anime_{i+1}"] = d
```
### Synopsis preprocessing
First, we must pre-process all the information collected for each anime by:
- Removing stopwords
- Removing punctuation
- Stemming
- Anything else we think it's needed
For this purpose, we use the nltk library.
Once we preprocessed with the _text_preprocessing_ function we collect all the unique tokens
```
# first we preprocess the synopsis by lowering the case removing stopwords, punctuation and numbers and finally stemming
all_tokens = []
for key in tqdm(D):
out, tkns = f.text_preprocessing(D[key]['animeSynopsis'], lower = True, numbers = True, stemming = True)
for token in tkns:
if token not in all_tokens: # we want to store all the unique tokens in a list
all_tokens.append(token)
# save all tokens in a txt file
f = open(f"/content/drive/MyDrive/ADM-HW3/tokens.txt", "w", encoding='utf-8')
for i in all_tokens:
f.write(i + '\n')
f.close()
```
We map each token to an integer in the following dictionary, this we'll be useful later
```
# we map every unique token to an integer and store it in a dictionary
term_id = range(1, len(all_tokens))
vocab = {k: v for k,v in zip(all_tokens, term_id)}
```
Then we store in two arrays the tokens and the synopsis and we'll use them as input in the _inv_index_ function that will build the inverted index
```
# we store the tokens and the synopsis in two numpy arrays
tok = list(f.vocab.keys())
tok = np.array(tok)
syn = []
for key in tqdm(D):
a, b = f.text_preprocessing(D[key]['animeSynopsis'], lower = True, numbers = True, stemming = True)
if a == 'synopsi inform ad titl help improv databas ad synopsi':
syn.append([''])
f.preproc_D[key]['animeSynopsis'] = [' ']
else:
f.preproc_D[key]['animeSynopsis'] = b
syn = np.array(syn)
# we compute the inverted index
inv_index = f.inverted_index(tok, syn)
```
### Executing the queries
Given a query, that we let the user enter:
*saiyan race*
the Search Engine is supposed to return a list of documents.
**What documents do we want?**
Since we are dealing with conjunctive queries (AND), each of the returned documents should contain all the words in the query. The final output of the query must return, if present, the following information for each of the selected documents:
- **animeTitle**
- **animeDescription**
- **Url**
```
f.search()
```
## Cosine similarity and Tf-idf
At this point we would like to have a more powerful search engine that given a query will compute the cosine similarity and return the best ranked results by using a MinHeap
Our second Inverted Index is of this format:
{
term_id_1:[(document1, tfIdf_{term,document1}), (document2, tfIdf_{term,document2}), (document4, tfIdf_{term,document4}), ...],
term_id_2:[(document1, tfIdf_{term,document1}), (document3, tfIdf_{term,document3}), (document5, tfIdf_{term,document5}), (document6, tfIdf_{term,document6}), ...],
...}
Practically, for each word we want the list of documents in which it is contained in, and the relative *tfIdf* score.
**Tip**: to compute the tfidf you can also use the sci-kit library
```
# this time we store the synopsis in a dicitionary
anime_id, syn = [], []
for key in tqdm(f.D):
anime_id.append(key)
a, b = f.text_preprocessing(f.D[key]['animeSynopsis'], lower = True, numbers = True, stemming = True)
syn.append(b)
synopsis = {k:v for k,v in zip(anime_id, syn)}
```
Then we compute the tf-idf as follows:
It is the combination of *Term Frequency (TF)* and *Inverse Data Frequency (IDF)*.
TF is the number of times a word *t* appears in a document *d* divided by the total number of words in the document. Every document has its own term frequency:
$tf_{t,d}=\frac{n_{t,d}}{\sum_{t'\in d} n_{t',d}}$
The IDF is a measure of how much information the word provides, i.e. if it's common or rare across all documents.
IDF is the log of the number of all documents *N* divided by the number of documents *d* that contain the word *t*. IDF determines the weight of rare words across all documents in the corpus:
$idf(t,D)=\log \left(\frac{N}{| \{ d\in D: t\in D \} | }\right)$
TF-IDF is given by the multiplication of TF and IDF:
$w_{t,d,D}=tf_{t,d} \times idf(t,D)$
```
tok = list(f.vocab.keys())
tok = np.array(tok)
for j in tqdm(range(0, len(tok))):
term_j, k = tok[j], 0
for i in f.inv_index[term_j]:
try:
doc_i = synopsis[i]
tfidf = ( doc_i.count(term_j) / len(doc_i) ) * ( np.log10( len(synopsis) / len(f.inv_index[term_j]) ))
f.inv_index[term_j][k] = (i, tfidf)
k += 1
except KeyError: pass
continue
```
### Execute the query
```
f.search_cosine()
```
## Conclusions
This is a Naive search engine that could be imporved for example by using hash functions to do the queries or just changing the way a query is computed adding filters for example or just defining a more discriminative score.
| github_jupyter |
# Iterative Solvers 4 - Preconditioning
## The basic idea
For both the GMRES method and CG we have seen that the eigenvalue distribution is crucial for fast convergence. In both cases we would like the eigenvalues of the matrix be clustered close together and be well separated from zero. Unfortunately, in many applications the matrices that arise naturally are badly behaved in this respect. Preconditioning is a strategy to try to modify a linear system so that it becomes more ameniable to iterative solvers. Preconditioning is one of the most active research areas and crucial for the solution of linear systems of equations with millions or even billions of unknowns.
Consider the linear system of equations
$$
Ax = b
$$
In its basic form the idea of preconditioning is to multiply the system with a matrix $P^{-1}$ that is some kind of approximation to $A^{-1}$, that is $P^{-1}\approx A^{-1}$ in some sense (making this precise is the difficult bit).
We obtain either the left-preconditioned system
$$
P^{-1}Ax = P^{-1}b
$$
or the right-preconditioned system
$$
AP^{-1}y = b,
$$
where in the latter case we then additionally need to solve $Px=y$.
Classes of preconditioners include
* SPAI (Sparse Approximate Inverses)
* Incomplete LU Decomposition
* Incomplete Cholesky Decomposition
* Splitting Preconditioners
* Algebraic Multigrid Methods
These are also known as **algebraic preconditioners**. They consider the matrix $A$ and try to find an approximation to $A$ that is easily invertible.
A different class of preconditioners are **analytic preconditioners**. These are preconditioners that are often constructed as problems to PDEs that are easier to solve than the original PDE but still approximate to some sense the underlying physics of the problem.
## Sparse Approximate Inverse
As example of the class of algebraic preconditioners we consider here the Sparse Approximate Inverse (SPAI). Incomplete LU decompositions will be discussed later on. We note that SPAI is a technique that works well in certain cases, but is not suitable in others. **There is no general preconditioning technique that always works well.**
We denote by $\|A\|_F$ the Frobenious norm of a matrix $A$ defined by
$$
\|A\|_F^2 := \sum_{i, j}|a_{ij}|^2.
$$
The idea of SPAI is now to try to find a matrix $M := P^{-1}$ such that
$$
F(M) := \|I - AM\|_F
$$
is small. The minimum of this function is obviously reached for $M = A^{-1}$. But this usually not practical. Instead, we try to find a successive sequence of matrices $M_k$ that are approaching the minimum of the function $F$.
There are many ways to define an approximate minimization procedure to minimize $F$. The following is a global minimum residual algorithm, described by Saad in [Iterative Methods for Sparse Linear Systems](https://www-users.cs.umn.edu/~saad/IterMethBook_2ndEd.pdf).
$$
\begin{align}
C_k &= A M_k\nonumber\\
G_k &= I - C_k\nonumber\\
\alpha_k &=\text{tr}(G_k^TAG_k) / \|AG_k\|_F^2\nonumber\\
M_{k+1} &= M_k + \alpha_k G_k\nonumber\\
\end{align}
$$
In each step of the algorithm the matrix $M_k$ becomes slightly denser. Hence, in practice this is often combined with a numerical drop strategy for entries of $M$.
The following code implements this method. As starting matrix $M_0$ we choose $M_0 = \frac{2}{\|AA^T\|_1}A$, which was recommended by Chow and Saad in [Approximate Inverse Preconditioners via Sparse-Sparse Iterations](https://dl.acm.org/doi/10.1137/S1064827594270415). Note that in the following code we did not implement dropping of values. For practical purposes this is essential and strategies are discussed in the paper by Chow and Saad.
```
def spai(A, m):
"""Perform m step of the SPAI iteration."""
from scipy.sparse import identity
from scipy.sparse import diags
from scipy.sparse.linalg import onenormest
n = A.shape[0]
ident = identity(n, format='csr')
alpha = 2 / onenormest(A @ A.T)
M = alpha * A
for index in range(m):
C = A @ M
G = ident - C
AG = A @ G
trace = (G.T @ AG).diagonal().sum()
alpha = trace / np.linalg.norm(AG.data)**2
M = M + alpha * G
return M
```
We run the code with the following matrix, which is a slightly shifted version of the discrete 3-point second order differential operator.
```
import numpy as np
from scipy.sparse import diags
n = 1000
data = [2.001 * np.ones(n),
-1. * np.ones(n - 1),
-1. * np.ones(n - 1)]
offsets = [0, 1, -1]
A = diags(data, offsets=offsets, shape=(n, n), format='csr')
```
The condition number without the preconditioner is
```
np.linalg.cond(A.todense())
```
Let us now generate the preconditioner.
```
M = spai(A, 50)
%matplotlib inline
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(8 ,8))
ax = fig.add_subplot(111)
ax.spy(M, markersize=1)
```
Let us check the condition number of the right-preconditioned system.
```
np.linalg.cond(A.todense() @ M.todense())
```
It has been reduced by a factor 100. This is a good sign. We are now running CG for the preconditioned and non-preconditioned system.
```
%matplotlib inline
from matplotlib import pyplot as plt
from scipy.sparse.linalg import cg
n = A.shape[0]
b = np.ones(n)
residuals = []
callback = lambda x: residuals.append(np.linalg.norm(A @ x - b))
x, _ = cg(A, b, callback=callback)
residuals_preconditioned = []
callback = lambda x: residuals_preconditioned.append(np.linalg.norm(A @ x - b) / np.linalg.norm(b))
x, _ = cg(A, b, M=M, callback=callback)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
ax.semilogy(residuals, 'k--')
ax.semilogy(residuals_preconditioned, 'b--')
ax.set_ylabel('relative residual')
fig.legend(['no preconditioning', 'SPAI'], loc='lower center', fancybox=True, shadow=True)
```
We can see a significant improvement in the number of iterations. However, the comparison is not quite fair. Setting up the preconditioner took time, which should be taken into account. Just considering number of iterations is not sufficient. In practice, the overall computation time is the much more important measure. We have implemented here a very primitive variant of SPAI that should not be used in practice. For practical alternatives see the cited literature. For the type of matrix that $A$ is there are also much better precondtioners available, some of which we will encounter later.
## A note on preconditioned Conjugate Gradient
We have passed here the preconditioner as a matrix into the CG algorithm. This is possible if the preconditioner is also symmetric, positive definite. This is the case in our example. We are not going to discuss the details of preconditioned conjugate gradients in more details here and refer to the book by Saad.
| github_jupyter |
## In this note, we will discuss different numerical algorithm scheme in solving the shock-wave problem
In order to compare different types of algorithm, we will consider the following 1D diffusion equation,
$$ \partial_t u = - v \partial_x u $$
For a given initial condition $u(t=t_0, x)$, it's solution at later time $u(t, x)$ is known analytically,
$$ u(t, x) = u(t_0, x - vt) $$
Here, we first setup our initial condition $u(t=t_0, x)$ for this 1D diffusion problem. We choose $v = 0.5$.
```
%matplotlib inline
from numpy import *
from os import path
import matplotlib.pyplot as plt
#initial setup
v = 0.5
x = linspace(-2, 5, 701)
dx = x[1] - x[0]
u0 = zeros(len(x))
idx = (x > -1) & (x < 0)
u0[idx] = 1.0 # square
#u0[idx] = -x[idx] # triangle
plt.plot(x, u0, linewidth=2)
plt.xlim([-2, 2]);
plt.ylim([0, 1.2]);
# prepare the analytic solution
def get_analytic_solution(u0, delta_x, delta_t, n_step):
u_analytic = zeros(len(u0))
x_shift = (int)(n_step*delta_t/delta_x*v)
u_analytic[x_shift:] = u0[:-x_shift]
return(u_analytic)
```
### Forward-time centered-space (FTCS) method
The simplest numerical scheme would be
$$ u_{i}^{n+1} = u_{i}^n - \frac{v \Delta t}{2 \Delta x}(u_{i+1}^n - u_{i-1}^n). $$
This numerical scheme gives $ O((\Delta x)^2) $ in $x$ and $ O(\Delta t) $ in time.
```
def evolve_FTCS_method(u_init, delta_x, delta_t, n_step):
u_current = copy(u_init)
u_next = zeros(len(u_init))
for it in range(n_step):
for i in range(0, len(u_current)):
if i == 0:
u_next[i] = u_current[i] - v*delta_t/delta_x*(u_current[i+1] - u_current[i])
elif i == len(u_current)-1:
u_next[i] = u_current[i] - v*delta_t/delta_x*(u_current[i] - u_current[i-1])
else:
u_next[i] = (u_current[i] - v*delta_t/(2.*delta_x)
*(u_current[i+1] - u_current[i-1]))
u_current = copy(u_next)
return(u_next)
# numerical evolution
dt = 0.001 # make sure delta_t/delta_x < 1
nt = 200 # evolve 200 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_FTCS_method(u0, dx, dt, nt)
# check numerical solution
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2])
plt.ylabel(r'$u$', fontsize=20)
plt.xlabel(r'$x$', fontsize=20);
```
### Upwind scheme
To fix this problem, we can try the first scheme for the spatial derivative,
$$ u_{i}^{n+1} = u_{i}^n - \frac{v \Delta t}{\Delta x}(u_{i}^n - u_{i-1}^n). $$
This numerical scheme gives $ O((\Delta x)) $ in $x$ and $ O(\Delta t) $ in time.
```
def evolve_upwind_method(u_init, delta_x, delta_t, n_step):
u_current = copy(u_init)
u_next = zeros(len(u_init))
for it in range(n_step):
for i in range(0, len(u_current)):
if i == 0:
u_next[i] = u_current[i] - v*delta_t/delta_x*(u_current[i+1] - u_current[i])
else:
u_next[i] = u_current[i] - v*delta_t/delta_x*(u_current[i] - u_current[i-1])
u_current = copy(u_next)
return(u_next)
# numerical evolution
dt = 0.001 # make sure delta_t/delta_x < 1
nt = 1000 # evolve 1000 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_upwind_method(u0, dx, dt, nt)
# check numerical solution
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2])
plt.ylabel(r'$u$', fontsize=20)
plt.xlabel(r'$x$', fontsize=20)
plt.ylim([-0.05, 1.2]);
```
The positivity is maintained in this up-wind scheme. However, as the system evolves in time, the shape of the solution gets more and more distored. (try nt = 1000)
This is because the higher order correction to the spatial derivatives in this scheme is
$$ \frac{u_i^n - u_{i-1}^n}{\Delta x} = \partial_x u(t_n, x_i) - \frac{\Delta x}{2} \partial_x^2 u(t_n, x_i) + O((\Delta x)^2)$$
The term $\partial_x^2 u(t_n, x_i)$ generates too much numerical viscosity, which smear out the sharpness of the shape of the function.
Thus, we need better discretization scheme which ensure the positivity and achieve higher order accuracy in $\Delta x$.
## Finite Element Methods
For a fluid cell $i$ the conservation law equation can be written as,
$$ \bar{u}_i^{n+1} = \bar{u}_i^n - \frac{1}{\Delta x} \int_{t_n}^{t_{n+1}} dt \bigg(v \bar{u}^n_{i+1/2} - v \bar{u}^n_{i-1/2} \bigg) $$
where $i\pm1/2$ are the cell boundaries.
Approximating the time integral by midpoint rule, we get
$$ \bar{u}_i^{n+1} = \bar{u}_i^n - \frac{v\Delta t}{\Delta x} \bigg( \bar{u}^{n+1/2}_{i+1/2} - \bar{u}^{n+1/2}_{i-1/2} \bigg) $$
In order to evaluate the values of $\bar{u}$ at the boundaries $i\pm1/2$, we can use a staggered method. In the first step, we evaluate $\bar{u}^{n+1}$ at $i+1/2$ and second step we jump half step back to $\bar{u}^{n+2}_{i}$.
$$ \bar{u}^{n+1}_{i+1/2} = \frac{\bar{u}^n_{i+1} + \bar{u}^n_i}{2} - \frac{v \Delta t}{\Delta x} \bigg( \bar{u}^{n+1/2}_{i+1} - \bar{u}^{n+1/2}_{i} \bigg) $$
Here we use $\bar{u}^{n}_{i+1/2} = (\bar{u}^n_{i+1} + \bar{u}^n_i)/2$ at the current time step $t_n$. The halfway time step quantities can be evaluated using the forward Euler method,
$$ \bar{u}^{n+1/2}_i = \bar{u}^n_i - \frac{\Delta t}{2} (\partial_x u)^n_i $$
This is a generalized Lax-Friedrichs method.
### generalized minmod flux limiter
To evaluate the spatial derivative, $(\partial_x u)^n_i$, we already know that the central difference scheme could be unstable when the gradient is large. But it is OK to use in the smooth region. So here we introduce a scheme which can switch between centeral difference scheme and first order scheme based on the local gradient:
$$ (\partial_x u)^n_i = \left\{ \begin{array}{cc} 0 & \mbox{if } \bar{u}_i^n < \bar{u}^n_{i\pm1} \mbox{ or } \bar{u}_i^n > \bar{u}^n_{i\pm1} \\ sign(\bar{u}^n_{i+1} - \bar{u}^n_i)min \left(\theta \frac{\vert \bar{u}_{i+1}^n - \bar{u}_{i}^n \vert}{\Delta x}, \frac{\vert \bar{u}_{i+1}^n - \bar{u}_{i-1}^n \vert}{2 \Delta x} , \theta \frac{\vert \bar{u}_{i}^n - \bar{u}_{i-1}^n \vert}{\Delta x} \right) & \mbox{otherwise} \end{array} \right.$$
The the parameter $\theta$ is called the flux limiter, $1 \le \theta < 2$ in general.
```
def minmod(u_left, u_mid, u_right, delta_x, theta=1.8):
if (u_left - u_mid)*(u_right - u_mid) > 0:
return(0)
else:
dudx_left = (u_mid - u_left)/delta_x
dudx_right = (u_right - u_mid)/delta_x
dudx_mid = (u_right - u_left)/(2.*delta_x)
if u_right > u_mid:
dudx = min([dudx_left*theta, dudx_mid, dudx_right*theta])
else:
dudx = max([dudx_left*theta, dudx_mid, dudx_right*theta])
return(dudx)
```
### Generalized Lax-Friedrichs method
```
def evolve_LaxFriedrichs_method(u_init, delta_x, delta_t, n_step):
u_current = copy(u_init)
u_next = zeros(len(u_init))
dudx = zeros(len(u_init))
u_half = zeros(len(u_init))
for it in range(nt):
for i in range(0, len(u_current)): # compute dudx using minmod flux limiter
if i == 0:
dudx = minmod(u_current[i], u_current[i], u_current[i+1], delta_x)
elif i == len(u_current)-1:
dudx = minmod(u_current[i-1], u_current[i], u_current[i], delta_x)
else:
dudx = minmod(u_current[i-1], u_current[i], u_current[i+1], delta_x)
u_half = u_current - delta_t/2.*dudx
for i in range(0, len(u_current)):
if it%2 == 0: # staggered evolution between i and i+1/2
if i == len(u_current) - 1:
u_next[i] = ((u_current[i] + u_current[i-1])/2.
- v*delta_t/delta_x*(u_half[i] - u_half[i-1]))
else:
u_next[i] = ((u_current[i+1] + u_current[i])/2.
- v*delta_t/delta_x*(u_half[i+1] - u_half[i]))
else:
if i == 0:
u_next[i] = ((u_current[i+1] + u_current[i])/2.
- v*delta_t/delta_x*(u_half[i+1] - u_half[i]))
else:
u_next[i] = ((u_current[i] + u_current[i-1])/2.
- v*delta_t/delta_x*(u_half[i] - u_half[i-1]))
u_current = copy(u_next)
return(u_next)
# check numerical solution
dt = 0.001 # make sure delta_t/delta_x < 1
nt = 1000 # evolve 1000 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_LaxFriedrichs_method(u0, dx, dt, nt)
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2]);
plt.ylabel(r'$u$', fontsize=20);
plt.xlabel(r'$x$', fontsize=20);
plt.ylim([-0.05, 1.2]);
```
We can see that the numerical viscosity is still quite large in this scheme. From the Taylor expation, we find
$$ \frac{\bar{u}^n_{i+1} + \bar{u}^n_i}{2} = \bar{u}^n_{i+1/2} + \frac{(\Delta x)^2}{8} \partial_x^2 u^n_{i+1/2} $$
The numerical viscosity comes with $\mathcal{O}(\Delta x^2)$. We need higher order scheme.
### Second order Nessyahu-Tadmor (NT) Scheme
To improve the estimation of $\bar{u}^n_{i+1/2}$, we can use additional information from the first order derivative,
$$ \bar{u}^n_{i+1/2} = \bar{u}^n_{i} + \frac{\Delta x}{2} (\partial_x u)_i^n \quad \mbox{ or } \quad \bar{u}^n_{i+1/2} = \bar{u}^n_{i+1} - \frac{\Delta x}{2} (\partial_x u)_{i+1}^n$$
In this case, we add a correction term,
$$ \bar{u}^{n+1}_{i+1/2} = \frac{\bar{u}^n_{i+1} + \bar{u}^n_i}{2} - \frac{v \Delta t}{\Delta x} \bigg( \bar{u}^{n+1/2}_{i+1} - \bar{u}^{n+1/2}_{i} \bigg) - \frac{\Delta x}{4}(\partial_x u)^n_{i+1} - (\partial_x u)^n_i) $$
The last term cancels the leading order numerical viscosity in the generalized Lax-Friedrichs method. This scheme is $\mathcal{O}(\Delta x^4)$ in the smooth region.
```
def evolve_NT_method(u_init, delta_x, delta_t, n_step):
u_current = copy(u_init)
u_next = zeros(len(u_init))
u_half = zeros(len(u_init))
dudx = zeros(len(u_init))
for it in range(nt):
for i in range(0, len(u_current)):
if i == 0:
dudx[i] = minmod(u_current[i], u_current[i], u_current[i+1], delta_x)
elif i == len(u_current)-1:
dudx[i] = minmod(u_current[i-1], u_current[i], u_current[i], delta_x)
else:
dudx[i] = minmod(u_current[i-1], u_current[i], u_current[i+1], delta_x)
u_half = u_current - delta_t/2.*dudx
for i in range(0, len(u_current)):
if it%2 == 1: # staggered evolution between i and i+1/2
if i == len(u_current) - 1:
u_next[i] = ((u_current[i] + u_current[i-1])/2.
- v*delta_t/delta_x*(u_half[i] - u_half[i-1])
- delta_x/4.*(dudx[i] - dudx[i-1]))
else:
u_next[i] = ((u_current[i+1] + u_current[i])/2.
- v*delta_t/delta_x*(u_half[i+1] - u_half[i])
- delta_x/4.*(dudx[i+1] - dudx[i]))
else:
if i == 0:
u_next[i] = ((u_current[i+1] + u_current[i])/2.
- v*delta_t/delta_x*(u_half[i+1] - u_half[i])
- delta_x/4.*(dudx[i+1] - dudx[i]))
else:
u_next[i] = ((u_current[i] + u_current[i-1])/2.
- v*delta_t/delta_x*(u_half[i] - u_half[i-1])
- delta_x/4.*(dudx[i] - dudx[i-1]))
u_current = copy(u_next)
return(u_next)
# check numerical solution
dt = 0.001 # make sure delta_t/delta_x < 1
nt = 1000 # evolve 1000 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_NT_method(u0, dx, dt, nt)
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2]);
plt.ylabel(r'$u$', fontsize=20);
plt.xlabel(r'$x$', fontsize=20);
plt.ylim([-0.05, 1.2]);
```
The second order NT scheme works well in practice. However, one can not take $\Delta t$ to zero because the numerical viscosity behaves like $1/\Delta t$. It would be good to take the limit $\Delta t \rightarrow 0$ because one can then formulate the discretized problem as a set of coupled ordinary differential equations in time. Many techniques for the ordinary differential equations such as the Runge-Kutta methods become available to control the accuracy of the time evolution.
### The second order Kurganov-Tadmor (KT) algorithm
The idea of the KT algorithm is that the size of the cell which contains the discontinuity at the half integer point $x_{i+1/2}$ is controlled by the local propagation speed $c_{i+1/2}$. The cells containing the boundaries and the cells not containing the boundaries are independently evolved.
```
def evolve_KT_method(u_init, delta_x, delta_t, n_step):
u_current = copy(u_init)
dudx = zeros(len(u_init))
u_plus_half = zeros(len(u_init))
u_minus_half = zeros(len(u_init))
H = zeros(len(u_init))
u_half = copy(u_init)
u_next = zeros(len(u_init))
for it in range(nt):
for rk_step in range(2):
for i in range(len(u_half)): # compute dudx using minmod flux limiter
if i == 0:
dudx[i] = minmod(u_half[i], u_half[i], u_half[i+1], delta_x)
elif i == len(u_current)-1:
dudx[i] = minmod(u_half[i-1], u_half[i], u_half[i], delta_x)
else:
dudx[i] = minmod(u_half[i-1], u_half[i], u_half[i+1], delta_x)
for i in range(len(u_half)): # compute halfway cells
if i == len(u_half) - 1:
u_plus_half[i] = u_half[i] - delta_x/2.*dudx[i]
else:
u_plus_half[i] = u_half[i+1] - delta_x/2.*dudx[i+1]
u_minus_half = u_half + delta_x/2.*dudx
c_i = v
H = ((v*u_plus_half + v*u_minus_half)/2. - c_i/2.*(u_plus_half - u_minus_half))
if rk_step == 0: # evlove time evolution with Runge-Kutta
for i in range(len(u_half)):
if i == 0:
u_half[i] = u_half[i] - delta_t/delta_x*(H[i+1] - H[i])
else:
u_half[i] = u_half[i] - delta_t/delta_x*(H[i] - H[i-1])
else:
for i in range(len(u_half)):
if i == 0:
u_next[i] = u_half[i] - delta_t/delta_x*(H[i+1] - H[i])
else:
u_next[i] = u_half[i] - delta_t/delta_x*(H[i] - H[i-1])
u_next = 0.5*(u_current + u_next)
u_current = copy(u_next) # update
u_half = copy(u_next)
return(u_next)
dt = 0.01 # make sure delta_t/delta_x < 1
nt = 100 # evolve 100 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_KT_method(u0, dx, dt, nt)
# check numerical solution
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2])
plt.ylabel(r'$u$', fontsize=20)
plt.xlabel(r'$x$', fontsize=20)
plt.ylim([-0.05, 1.2]);
```
### SHASTA
```
def minmod_SHASTA(w0, w1, w2, w3, deltax):
delta_1 = (w1 - w0)/deltax
delta_2 = (w2 - w1)/deltax
delta_3 = (w3 - w2)/deltax
if delta_1*delta_2 < 0 or delta_2*delta_3 < 0:
return(0.0)
else:
if delta_2 > 0:
return(min([delta_1, 1./8.*delta_2, delta_3]))
else:
return(max([delta_1, 1./8.*delta_2, delta_3]))
def evolve_SHASTA_method(u_init, delta_x, delta_t, n_step):
epsilon = v*delta_t/delta_x
u_current = copy(u_init)
u_next = zeros(len(u_init))
dudx = zeros(len(u_init))
u_half = zeros(len(u_init))
w_next = zeros(len(u_init))
dwdx = zeros(len(u_init))
for it in range(nt):
for i in range(0, len(u_current)):
if i == 0:
dudx[i] = minmod(u_current[i], u_current[i], u_current[i+1], delta_x)
elif i == len(u_current)-1:
dudx[i] = minmod(u_current[i-1], u_current[i], u_current[i], delta_x)
else:
dudx[i] = minmod(u_current[i-1], u_current[i], u_current[i+1], delta_x)
u_half = u_current - delta_t/2.*dudx
for i in range(0, len(u_current)):
if i == 0:
w_next[i] = (u_current[i] - epsilon*(u_half[i+1] - u_half[i])
+ 1./8.*(u_current[i+1] - 2.*u_current[i] + u_current[i]))
elif i == len(u_current) - 1:
w_next[i] = (u_current[i] - epsilon*(u_half[i] - u_half[i-1])
+ 1./8.*(u_current[i] - 2.*u_current[i] + u_current[i-1]))
else:
w_next[i] = (u_current[i] - epsilon/2.*(u_half[i+1] - u_half[i-1])
+ 1./8.*(u_current[i+1] - 2.*u_current[i] + u_current[i-1]))
for i in range(0, len(u_current)): # compute dw^{i+1}/dx
if i == 0:
dwdx[i] = minmod_SHASTA(w_next[i], w_next[i], w_next[i+1],
w_next[i+2], delta_x)
elif i == len(u_current) - 2:
dwdx[i] = minmod_SHASTA(w_next[i-1], w_next[i], w_next[i+1],
w_next[i+1], delta_x)
elif i == len(u_current) - 1:
dwdx[i] = minmod_SHASTA(w_next[i-1], w_next[i], w_next[i],
w_next[i], delta_x)
else:
dwdx[i] = minmod_SHASTA(w_next[i-1], w_next[i], w_next[i+1],
w_next[i+2], delta_x)
for i in range(0, len(u_current)): # anti-diffusion stage
if i == len(u_current)-1:
u_next[i] = w_next[i] - delta_x*(dwdx[i] - dwdx[i-1])
else:
u_next[i] = w_next[i] - delta_x*(dwdx[i+1] - dwdx[i])
u_current = copy(u_next) # update
return(u_next)
dt = 0.001 # make sure delta_t/delta_x < 1
nt = 100 # evolve 100 time step
u_analytic = get_analytic_solution(u0, dx, dt, nt)
u_now = evolve_SHASTA_method(u0, dx, dt, nt)
# check numerical solution
fig = plt.figure()
plt.plot(x, u_analytic, '-b', linewidth=2)
plt.plot(x, u_now, 'or')
plt.xlim([-2, 2]);
plt.ylabel(r'$u$', fontsize=20);
plt.xlabel(r'$x$', fontsize=20)
```
| github_jupyter |
## MODEL NORMALIZE: Normalize/Scale all the columns
### Notebook Plans:
- Run different types of models using only the default parameters
```
import pandas as pd
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../data/Train_maskedv2.csv")
test = pd.read_csv("../data/Test_maskedv2.csv")
train.info()
### Dropping columns "dw_12", "dw_13", "lan_13" as discussed in Basic_EDA notebook
X = train.drop(["dw_12", "dw_13", "lan_13", "target_pct_vunerable", "ward"], axis = 1)
y = train["target_pct_vunerable"]
test_like_train = test.drop(["dw_12", "dw_13", "lan_13", "ward"], axis = 1)
```
### SCALE THE COLUMNS
```
from sklearn.preprocessing import StandardScaler, MinMaxScaler
ss = StandardScaler()
mm = MinMaxScaler()
X = ss.fit_transform(X)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
import math as mt
def RMSE_CV(model):
cv_score = mt.sqrt(cross_val_score(model, X, y, cv = 5, scoring = make_scorer(mean_squared_error)).mean())
print("RMSE for the CV:", cv_score)
def output_csv(model, file_name):
model.fit(X, y)
pred = model.predict(test_like_train)
return pd.DataFrame({"ward": test["ward"], "target_pct_vunerable": pred}).to_csv("../submission_files/"+file_name, index = False)
lr = LinearRegression()
sgd = SGDRegressor()
xgb = XGBRegressor()
lgbm = LGBMRegressor()
svm = SVR()
gbr = GradientBoostingRegressor()
rf = RandomForestRegressor()
%time RMSE_CV(lr)
%time RMSE_CV(sgd)
%time RMSE_CV(xgb)
%time RMSE_CV(lgbm)
%time RMSE_CV(svm)
%time RMSE_CV(gbr)
%time RMSE_CV(rf)
```
### TUNE MODEL "lgbm" to beat 6.0407
```
lgbm
##lgbm_tune = LGBMRegressor(silent = False, n_estimators = 200)
##lgbm_tune = LGBMRegressor(silent = False, n_estimators = 1000, learning_rate = 0.01)
##lgbm_tune = LGBMRegressor(silent = False, n_estimators = 1000, learning_rate = 0.01, colsample_bytree = 0.5)
lgbm_tune = LGBMRegressor(silent = False, n_estimators = 1000, learning_rate = 0.01, colsample_bytree = 0.5, reg_alpha = 1)
%time RMSE_CV(lgbm_tune)
```
### Conclusion:
- Model score of little difference from MODEL_1 notebook
- I guess ensemble algorithms have an inbuilt feature scaler in them as observed in this notebook
| github_jupyter |
```
from qmvpa import utils, factor, rsa
from data_loader_local import load_data
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from dep.utils import subset_units
from config import get_log_info
from itertools import product
%matplotlib inline
%autosave 5
sns.set(style = 'white', context='poster', font_scale=1, rc={"lines.linewidth": 2})
# get roots
log_root = '/tigress/qlu/logs/keras-resnet/log'
plt_root = '/tigress/qlu/logs/keras-resnet/plots'
pltdata_root = '/tigress/qlu/logs/keras-resnet/plots_data'
# define data name, model name
data_name = 'cifar100'
model_name = 'resnet18'
# load data
_, _, _, _, _, labels, data_info = load_data(data_name)
[n_classes, img_rows, img_cols, img_channels] = data_info
n_test_egs = len(labels)
# define params
log_info_list = get_log_info(data_name, model_name)
n_subjs, layer_selected, n_layers, log_epochs, n_log_epochs = log_info_list
print('log epochs', log_epochs)
print('layer selected', layer_selected)
# collect precomputed RDMs
wRSM_n_l2d = utils.list_2d(n_layers, n_log_epochs)
iRSM_n_l2d = utils.list_2d(n_layers, n_log_epochs)
wRSM_s_l2d = utils.list_2d(n_layers, n_log_epochs)
iRSM_s_l2d = utils.list_2d(n_layers, n_log_epochs)
# for each layer, look at the development
for (i_l, i_e) in product(range(n_layers), range(n_log_epochs)):
l, e = layer_selected[i_l], log_epochs[i_e]
# plot dir and plot data dir
plt_dir = os.path.join(
plt_root, data_name, model_name,
'epoch_%.3d' % e, 'rsa')
pltdata_dir = os.path.join(
pltdata_root, data_name, model_name,
'epoch_%.3d' % e, 'rsa')
# RSA and SRM result path
rsa_result_fname = 'wirsa_ns_l%.2d.npy' % (l)
rsa_result_path = os.path.join(pltdata_dir, rsa_result_fname)
srm_result_fname = 'srm_l%.2d.npy' % (l)
srm_result_path = os.path.join(pltdata_dir, srm_result_fname)
# unpack information
wRSM_n, iRSM_n, wRSM_s, iRSM_s = np.load(rsa_result_path)
wRSM_n_l2d[i_l][i_e] = wRSM_n
iRSM_n_l2d[i_l][i_e] = iRSM_n
wRSM_s_l2d[i_l][i_e] = wRSM_s
iRSM_s_l2d[i_l][i_e] = iRSM_s
"""
collect correlations:
inter-subject RSM at time t vs. final within-subject RSM
"""
R_vals = np.zeros((n_layers, n_log_epochs))
P_vals = np.zeros((n_layers, n_log_epochs))
for (i_l, i_e) in product(range(n_layers), range(n_log_epochs)):
l, e = layer_selected[i_l], log_epochs[i_e]
R_vals[i_l, i_e], P_vals[i_l, i_e] = rsa.correlate_2RSMs(
iRSM_s_l2d[i_l][i_e], wRSM_n_l2d[i_l][-1])
if model_name == 'conv':
n_layers = 3
b_pals = sns.color_palette("Blues", n_colors = n_layers)
log_epochs_int = [int(log_epochs[i]) for i in range(len(log_epochs))]
f, ax = plt.subplots(1,1, figsize = (5, 2))
# plot a heatmap to get the color bar for neural network depth
temp_plt = sns.heatmap(
R_vals, cmap=sns.color_palette("Blues", n_colors = n_layers),
cbar_kws={'label': 'Layer depth', 'ticks' : [0, n_layers]},
ax = ax)
ax.cla()
for i_l in range(n_layers):
ax.plot(log_epochs_int, R_vals[i_l,:], color = b_pals[i_l], marker = '.')
title_text = """
%s, %s
""" % (model_name, data_name)
# title_text = """
# 2nd order correlation
# inter-subject RSM vs. final within-subject RSM
# model: %s, data: %s
# """ % (model_name, data_name)
ax.set_title(title_text)
ax.set_xlabel('Training epochs')
ax.set_ylabel('Linear corr.')
ax.set_ylim([0, 1.05]);
ax.set_xlim([0, log_epochs_int[-1]+1]);
sns.despine()
print(title_text)
temp_path = '/tigress/qlu/logs/temp/'
fname = 'deve_%s_%s_s.png' % (model_name, data_name)
f.savefig(os.path.join(temp_path, fname), bbox_inches='tight', dpi = 1000)
"""
collect correlations:
inter-subject RSM at time t vs. within-subject RSM at time t
"""
R_vals = np.zeros((n_layers, n_log_epochs))
P_vals = np.zeros((n_layers, n_log_epochs))
for (i_l, i_e) in product(range(n_layers), range(n_log_epochs)):
l, e = layer_selected[i_l], log_epochs[i_e]
R_vals[i_l, i_e], P_vals[i_l, i_e] = rsa.correlate_2RSMs(
iRSM_s_l2d[i_l][i_e], wRSM_n_l2d[i_l][i_e])
if model_name == 'conv':
n_layers = 3
b_pals = sns.color_palette("Blues", n_colors = n_layers)
log_epochs_int = [int(log_epochs[i]) for i in range(len(log_epochs))]
f, ax = plt.subplots(1,1, figsize = (5, 2))
temp_plt = sns.heatmap(
R_vals, cmap=sns.color_palette("Blues", n_colors = n_layers),
cbar_kws={'label': 'Layer depth', 'ticks' : [0, n_layers]},
ax = ax)
ax.cla()
for i_l in range(n_layers):
ax.plot(log_epochs_int, R_vals[i_l,:], color = b_pals[i_l], marker = '.')
title_text = """
%s, %s
""" % (model_name, data_name)
ax.set_title(title_text)
ax.set_xlabel('Training epochs')
ax.set_ylabel('Linear corr.')
ax.set_ylim([0, 1.05]);
ax.set_xlim([0, log_epochs_int[-1]+1]);
sns.despine()
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.