text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import numpy as np
import numpy.ma as ma
from astropy.table import QTable, Table
from astropy.io import fits
import sys
sys.path.insert(1,"/Users/kellydouglass/Documents/Research/Rotation_curves/RotationCurves/spirals/")
#sys.path.insert(1, '/home/kelly/Documents/RotationCurves/spirals/')
from DRP_rotation_curve import extract_data, extract_Pipe3d_data
from disk_mass import calc_mass_curve, fit_mass_curve
sys.path.insert(1, '/Users/kellydouglass/Documents/Research/Rotation_curves/Yifan_Zhang/RotationCurve/2D_RC/main/')
from rotation_curve_functions import disk_mass
import matplotlib.pyplot as plt
%matplotlib notebook
SDSS_FOLDER = '/Users/kellydouglass/Documents/Research/data/SDSS/'
MASS_MAP_FOLDER = SDSS_FOLDER + 'dr15/manga/spectro/pipe3d/v2_4_3/2.4.3/'
VEL_MAP_FOLDER = SDSS_FOLDER + 'dr16/manga/spectro/analysis/v2_4_3/2.2.1/HYB10-GAU-MILESHC/'
MASS_CURVE_MASTER_FOLDER = '../spirals/Pipe3d-mass_curve_data_files/'
```
# Sample galaxy properties
```
#gal_ID = '7443-12705'
gal_ID = '8139-12703'
manga_plate, manga_IFU = gal_ID.split('-')
_,_, map_mask, r_band,_,_,_,_,_,_,_ = extract_data(VEL_MAP_FOLDER, gal_ID)
sMass_density = extract_Pipe3d_data(MASS_MAP_FOLDER, gal_ID)
mr_band = ma.array(r_band, mask=map_mask)
'''
oneD_fit_file = '../spirals/DRPall-master_file_30.txt'
oneD_fit_parameters = QTable.read(oneD_fit_file, format='ascii.ecsv')
gal_oneD_fit_parameters_boolean = np.logical_and(oneD_fit_parameters['MaNGA_plate'] == int(manga_plate),
oneD_fit_parameters['MaNGA_IFU'] == int(manga_IFU))
gal_oneD_fit_parameters_row = oneD_fit_parameters[gal_oneD_fit_parameters_boolean]
'''
map_fit_file = '../spirals/DRP-master_file_vflag_BB_smooth1p85_mapFit_N2O2_HIdr2_morph_v6.txt'
map_fit_parameters = Table.read(map_fit_file, format='ascii.commented_header')
gal_map_fit_parameters_boolean = (map_fit_parameters['MaNGA_plate'] == int(manga_plate)) & \
(map_fit_parameters['MaNGA_IFU'] == int(manga_IFU))
gal_map_fit_parameters_row = map_fit_parameters[gal_map_fit_parameters_boolean]
H_0 = 100 # Hubble's Constant in units of h km/s/Mpc
c = 299792.458 # Speed of light in units of km/s
MANGA_FIBER_DIAMETER = 2*(1/60)*(1/60)*(np.pi/180) # angular fiber diameter (2") in radians
MANGA_SPAXEL_SIZE = 0.5*(1/60)*(1/60)*(np.pi/180) # spaxel size (0.5") in radians
#z = gal_oneD_fit_parameters_row['redshift'][0]
z = gal_map_fit_parameters_row['NSA_redshift'][0]
dist_to_galaxy_Mpc = c*z/H_0
dist_to_galaxy_kpc = dist_to_galaxy_Mpc*1000
pix_scale_factor = dist_to_galaxy_kpc*np.tan(MANGA_SPAXEL_SIZE)
################################################################################
# Axis ratio (from photometry)
#-------------------------------------------------------------------------------
#axis_ratio = gal_oneD_fit_parameters_row['ba'][0]
axis_ratio = gal_map_fit_parameters_row['ba_map'][0]
################################################################################
################################################################################
# Center of galaxy
#
# Originally used photometric center, now using kinematic center
#-------------------------------------------------------------------------------
'''
center = np.unravel_index(ma.argmax(mr_band), mr_band.shape)
center_x = center[1]
center_y = center[0]
''';
center_x = gal_map_fit_parameters_row['x0_map'][0]
center_y = gal_map_fit_parameters_row['y0_map'][0]
################################################################################
################################################################################
# Systemic velocity (velocity at galaxy's center)
#-------------------------------------------------------------------------------
#v_sys = mHa_vel[center]
################################################################################
################################################################################
# Rotation angle (from photometry)
#-------------------------------------------------------------------------------
#phi = gal_oneD_fit_parameters_row['phi'][0].value
phi = gal_map_fit_parameters_row['phi_map'][0]
#phi_guess = find_phi(center, phi, mHa_vel)
################################################################################
################################################################################
# Maximum velocity
#-------------------------------------------------------------------------------
#v_max = gal_oneD_fit_parameters_row['avg_v_max'][0].value
v_max = gal_map_fit_parameters_row['Vmax_map'][0]
#v_max_index = np.unravel_index(ma.argmax(np.abs(mHa_vel)), mHa_vel.shape)
#v_max_guess = np.abs(mHa_vel[v_max_index]/np.sin(i_angle))
################################################################################
################################################################################
# Turn radius
#-------------------------------------------------------------------------------
#r_turn = gal_oneD_fit_parameters_row['avg_r_turn'][0].value
r_turn = gal_map_fit_parameters_row['Rturn_map'][0]
'''
r_turn_guess_spaxels,_ = deproject_spaxel(v_max_index,
center,
phi_guess,
i_angle)
r_turn_guess = 0.5*r_turn_guess_spaxels*pix_scale_factor
if r_turn_guess < 0.01:
r_turn_guess = 1.1*0.01
''';
################################################################################
################################################################################
# alpha
#-------------------------------------------------------------------------------
#alpha = gal_oneD_fit_parameters_row['avg_alpha'][0]
alpha = gal_map_fit_parameters_row['alpha_map'][0]
#alpha_guess = 2
################################################################################
map_shape = r_band.shape
```
# Test `calc_mass_curve`
```
mass_data_table = calc_mass_curve(sMass_density,
r_band,
map_mask,
center_x,
center_y,
axis_ratio,
phi,
z,
gal_ID)
```
# Test `fit_mass_curve`
```
from rotation_curve_functions import disk_vel
r_sample = np.linspace(0.01,15,100)
test_params = [0.01, 2.5]
#v_sample = disk_vel(test_params, r_sample)
v_sample = disk_vel(r_sample, test_params[0], test_params[1])
plt.figure(figsize=(5,5))
plt.plot(mass_data_table['radius'], mass_data_table['star_vel'], '.')
plt.plot(r_sample, v_sample)
plt.xlabel('radius [kpc]')
plt.ylabel('$V_*$ [km/s]');
param_outputs = fit_mass_curve(mass_data_table, gal_ID)
mass_data_table
```
# Test `disk_mass`
```
M90_disk, M90_disk_err = disk_mass(param_outputs, mass_data_table['radius'][-1])
print(M90_disk, M90_disk_err)
```
| github_jupyter |
# Aspect-based sentiment classification via [PyABSA](https://github.com/yangheng95/PyABSA)
More usages see [here](https://github.com/yangheng95/PyABSA/tree/release/demos/aspect_polarity_classification)
```
!pip install pyabsa
```
## Find Available Checkpoints For **Current Version**
```
from pyabsa import available_checkpoints
checkpoint_map = available_checkpoints(from_local=False)
```
# Init a sentiment classifier from online checkpoint or local checkpoint
```
from pyabsa import APCCheckpointManager, ABSADatasetList
sent_classifier = APCCheckpointManager.get_sentiment_classifier(checkpoint='multilingual',
auto_device=True, # Use CUDA if available
)
```
# Aspect Sentiment Inference
```
examples = [
'The [ASP]battery-life[ASP], and this [ASP]battery[ASP] is ok',
'The [ASP] battery-life [ASP] is bad',
'The [ASP] battery-life [ASP] is good',
'The [ASP] battery-life [ASP] ',
'Strong build though which really adds to its [ASP]durability[ASP] .', # !sent! Positive
'Strong [ASP]build[ASP] though which really adds to its durability . !sent! Positive',
'The [ASP]battery life[ASP] is excellent - 6-7 hours without charging . !sent! Positive',
'I have had my computer for 2 weeks already and it [ASP]works[ASP] perfectly . !sent! Positive',
'And I may be the only one but I am really liking [ASP]Windows 8[ASP] . !sent! Positive',
]
for ex in examples:
result = sent_classifier.infer(ex, print_result=True)
```
# Batch Sentiment Inference
```
inference_sets = ABSADatasetList.Phone
results = sent_classifier.batch_infer(target_file=inference_sets,
print_result=True,
save_result=True,
ignore_error=False,
)
```
# Train a model (Fast-LCF) for Aspect-based sentiment classification
## We dont run a training on Colab as it is too slow
```
from pyabsa.functional import Trainer
from pyabsa.functional import APCConfigManager
from pyabsa.functional import ABSADatasetList
from pyabsa.functional import APCModelList
apc_config_english = APCConfigManager.get_apc_config_english()
apc_config_english.model = APCModelList.FAST_LCF_BERT
apc_config_english.num_epoch = 1
apc_config_english.evaluate_begin = 0
apc_config_english.pretrained_bert = 'microsoft/deberta-v3-base'
apc_config_english.similarity_threshold = 1
apc_config_english.max_seq_len = 80
apc_config_english.dropout = 0.5
apc_config_english.seed = 2672
apc_config_english.log_step = 50
apc_config_english.l2reg = 1e-8
apc_config_english.dynamic_truncate = True
apc_config_english.srd_alignment = True
Dataset = ABSADatasetList.Laptop14
sent_classifier = Trainer(config=apc_config_english,
dataset=Dataset,
checkpoint_save_mode=0,
auto_device=True
).load_trained_model()
```
# Train on your own dataset from a checkpoint
```
apc_config_english = APCConfigManager.get_apc_config_english()
apc_config_english.model = APCModelList.FAST_LSA_T
apc_config_english.evaluate_begin = 2
apc_config_english.similarity_threshold = 1
apc_config_english.max_seq_len = 80
apc_config_english.dropout = 0.5
apc_config_english.log_step = 5
apc_config_english.l2reg = 0.0001
apc_config_english.dynamic_truncate = True
apc_config_english.srd_alignment = True
checkpoint_path = APCCheckpointManager.get_checkpoint('english')
SemEval = ABSADatasetList.SemEval
sent_classifier = Trainer(config=apc_config_english,
dataset=SemEval,
from_checkpoint=checkpoint_path,
checkpoint_save_mode=1,
auto_device=True
).load_trained_model()
```
# We provide GloVe models and BERT-Baseline models, you can use them with some modification
## GloVe model training
```
from pyabsa import APCTrainer, APCConfigManager, GloVeAPCModelList, ABSADatasetList
# Put glove embedding under current path first if you dont want to download GloVe embedding
apc_config_english = APCConfigManager.get_apc_config_glove()
apc_config_english.model = GloVeAPCModelList.TNet_LF
apc_config_english.num_epoch = 20
apc_config_english.cross_validate_fold = -1 # disable cross_validate, enable in {5, 10}
Dataset = ABSADatasetList.SemEval
sent_classifier = APCTrainer(config=apc_config_english, # set config=None will use the apc_config as well
dataset=Dataset, # train set and test set will be automatically detected
checkpoint_save_mode=1, # set model_path_to_save=None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
```
## BERT baseline model training
```
from pyabsa import APCTrainer, APCConfigManager, BERTBaselineAPCModelList, ABSADatasetList
apc_config_english = APCConfigManager.get_apc_config_bert_baseline()
apc_config_english.model = BERTBaselineAPCModelList.ASGCN_BERT
apc_config_english.num_epoch = 10
apc_config_english.evaluate_begin = 2
apc_config_english.max_seq_len = 100
apc_config_english.dropout = 0.5
apc_config_english.log_step = 5
apc_config_english.l2reg = 0.0005
apc_config_english.seed = 1
apc_config_english.use_syntax_based_SRD = True
apc_config_english.similarity_threshold = 1
apc_config_english.cross_validate_fold = -1 # disable cross_validate
laptop14 = ABSADatasetList.Laptop14
sent_classifier = APCTrainer(config=apc_config_english,
dataset=laptop14, # train set and test set will be automatically detected
checkpoint_save_mode=1, # None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
```
# Deplyment Example
| github_jupyter |
```
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import sys
import os
from pathlib import Path
from nilearn.plotting import plot_connectome
path_root = os.environ.get('DECIDENET_PATH')
path_code = os.path.join(path_root, 'code')
if path_code not in sys.path:
sys.path.append(path_code)
from dn_utils.plotting import aligned_imshow_cbar
from dn_utils.style import BLUE, ORANGE
%matplotlib inline
def distance(r1, r2):
'''Euclidean distance between two points in 3D space.'''
return math.sqrt(sum(c ** 2 for c in (c1 - c2 for c1, c2 in zip(r1, r2))))
def sphere_radius(volume):
'''Radius of sphere with specified volume.'''
return (volume * (3 / 4) / math.pi) ** (1 / 3)
path_derivatives = os.path.join(path_root, 'data/main_fmri_study/derivatives')
path_meta_roi = os.path.join(path_derivatives, 'ppi/parcellations/meta_roi')
roi_table_fname = os.path.join(path_meta_roi, 'meta_roi_table.csv')
roi_labels_fname = os.path.join(path_meta_roi, 'meta_roi_labels.txt')
roi_coords_fname = os.path.join(path_meta_roi, 'meta_roi_coords.txt')
Path(path_meta_roi).mkdir(parents=True, exist_ok=True)
```
### Meta analysis data
Meta-analysis ALE data from Fouragnan et al., 2018 – "Separate representations of prediction error valence and surprise: Evidence from an fMRI meta analysis".
Table 2 data presenting ALE clusters for patterns:
- NEG > POS
- POS > NEG
```
ale_clusters_dec_data = [
['dMCC', 'Dorsomedial cingulate cortex', 'R', 2, 24, 36, 12712, 0.051],
['aINS', 'Anterior insula', 'R', 32, 24, -2, 6120, 0.062],
['aINS', 'Anterior insula', 'L', -32, 22, -4, 4880, 0.056],
['PAL', 'Pallidum', 'R', 12, 8, 4, 3360, 0.04],
['PAL', 'Pallidum', 'L', -14, 6, 2, 2520, 0.029],
['mFG1', 'Middle frontal gyrus 1', 'R', 38, 4, 32, 3152, 0.029],
['mFG2', 'Middle frontal gyrus 2', 'R', 30, 10, 56, 488, 0.021],
['mFG', 'Middle frontal gyrus', 'L', -28, 12, 60, 104, 0.019],
['IPL', 'Inferior parietal lobule', 'R', 40, -48, 42, 2416, 0.039],
['IPL', 'Inferior parietal lobule', 'L', -38, -48, 42, 2216, 0.043],
['mTG', 'Middle temporal gyrus', 'R', 60, -28, -6, 1192, 0.031],
['AMYG', 'Amygdala', 'R', 18, -6, -12, 704, 0.024],
['THA1', 'Thalamus 1', 'L', -12, -12, 10, 624, 0.025],
['THA2', 'Thalamus 2', 'L', -6, -26, 8, 280, 0.023],
['HAB', 'Habenula', 'R', 2, -20, -18, 312, 0.022],
['dlPFC', 'Dorsolateral prefrontal cortex', 'L', -44, 28, 32, 360, 0.020],
['dlPFC', 'Dorsolateral prefrontal cortex', 'R', 40, 34, 30, 344, 0.020],
['FUS', 'Fusiform area', 'L', -40, -62, -10, 272, 0.023],
['PREC', 'Precentral cortex', 'L', -52, 0, 34, 256, 0.021],
['dmOFC', 'Dorsomedial orbitofrontal cortex', 'R', 38, 58, -2, 192, 0.020],
['dmPFC', 'Dorsomedial prefrontal cortex', 'R', 20, 50, 4, 120, 0.018],
['STS', 'Superior temporal sulcus', 'R', 58, -42, 22, 120, 0.017],
]
ale_clusters_inc_data = [
['vSTR', 'Ventral striatum', 'L', -12, 8, -4, 4880, 0.052],
['vSTR', 'Ventral striatum', 'R', 8, 8, -2, 2880, 0.038],
['vmPFC', 'Ventromedial prefrontal cortex', 'L', -2, 42, 0, 3416, 0.037],
['PCC', 'Posterior cingulate cortex 1', 'L', 0, -32, 36, 240, 0.016],
['PCC', 'Posterior cingulate cortex 2', 'L', 0, -36, 26, 88, 0.014],
['vlOFC', 'Ventrolateral orbitofrontal cortex', 'R', 32, 44, -10, 144, 0.015],
['dmPFC', 'Dorsomedial prefrontal cortex', 'L', -6, -56, 14, 96, 0.016],
['mPFC', 'Medial prefrontal cortex', 'L', -2, 46, 20, 88, 0.014],
]
for row in ale_clusters_dec_data:
row.append('perr_dec')
for row in ale_clusters_inc_data:
row.append('perr_inc')
columns = ['abbrev', 'name', 'hemisphere', 'x', 'y', 'z', 'cluster_size',
'ale_score', 'netName']
df_rois = pd.DataFrame(
[{k: v for k, v in zip(columns, row)} for row in ale_clusters_dec_data] \
+ [{k: v for k, v in zip(columns, row)} for row in ale_clusters_inc_data])
df_rois['approxRadius'] = df_rois['cluster_size'].map(sphere_radius)
df_rois['radius(mm)'] = 4
df_rois
n_rois = len(df_rois)
distance_matrix = np.zeros((n_rois, n_rois))
for i in range(n_rois):
for j in range(i, n_rois):
distance_matrix[i, j] = distance(
df_rois.loc[i, ['x', 'y', 'z']],
df_rois.loc[j, ['x', 'y', 'z']]
)
distance_matrix = distance_matrix + distance_matrix.T
fig, ax = plt.subplots(figsize=(20, 20))
im = plt.imshow(distance_matrix, cmap='hot', clim=[10, 100])
aligned_imshow_cbar(ax, im)
for i in range(n_rois):
for j in range(n_rois):
if distance_matrix[i, j] < 10:
annotation_color = 'w'
fontWeight = 'bold'
else:
annotation_color = 'k'
fontWeight = 'normal'
text = ax.text(j, i, f'{distance_matrix[i, j]:2.1f}',
ha='center', va='center', color=annotation_color,
fontWeight=fontWeight)
ax.set_xticks(np.arange(n_rois))
ax.set_yticks(np.arange(n_rois))
ax.set_yticklabels(df_rois['abbrev'] + ' ' + df_rois['hemisphere'])
ax.set_xticklabels(df_rois['abbrev'] + ' ' + df_rois['hemisphere'])
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
ax.set_title('Distance matrix for ROIs')
plt.show()
# Fix radius for overlapping ROIs
df_rois.loc[df_rois['abbrev'] == 'PAL', 'radius(mm)'] = 3
df_rois.loc[df_rois['abbrev'] == 'vSTR', 'radius(mm)'] = 3
# Save atlas
df_rois.to_csv(roi_table_fname, index=False)
with open(roi_labels_fname, 'w') as f:
f.write('\n'.join(df_rois['abbrev'] + '_' + df_rois['hemisphere']))
with open(roi_coords_fname, 'w') as f:
f.write('\n'.join(df_rois[['x', 'y', 'z']].astype(str).agg(' '.join, axis=1)))
```
### Show custom ROIs on in brain space
```
adjacency_matrix = np.zeros((n_rois, n_rois))
node_coords = np.array(df_rois.loc[:, ['x', 'y', 'z']])
node_color = df_rois['netName'].map(lambda net: BLUE if net == 'perr_dec' else ORANGE)
node_size = 55
plot_connectome(
adjacency_matrix=adjacency_matrix,
node_coords=node_coords,
node_color=list(node_color),
node_size=node_size,
title='meta ROIs',
display_mode='ortho'
)
plt.show()
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Tokenizing with TF Text
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/text/guide/tokenizers"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/text/blob/master/docs/guide/tokenizers.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/text/blob/master/docs/guide/tokenizers.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/text/docs/guide/tokenizers.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://tfhub.dev/google/zh_segmentation/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub models</a>
</td>
</table>
## Overview
Tokenization is the process of breaking up a string into tokens. Commonly, these tokens are words, numbers, and/or punctuation. The `tensorflow_text` package provides a number of tokenizers available for preprocessing text required by your text-based models. By performing the tokenization in the TensorFlow graph, you will not need to worry about differences between the training and inference workflows and managing preprocessing scripts.
This guide discusses the many tokenization options provided by TensorFlow Text, when you might want to use one option over another, and how these tokenizers are called from within your model.
## Setup
```
!pip install -q tensorflow-text
import requests
import tensorflow as tf
import tensorflow_text as tf_text
```
## Splitter API
The main interfaces are `Splitter` and `SplitterWithOffsets` which have single methods `split` and `split_with_offsets`. The `SplitterWithOffsets` variant (which extends `Splitter`) includes an option for getting byte offsets. This allows the caller to know which bytes in the original string the created token was created from.
The `Tokenizer` and `TokenizerWithOffsets` are specialized versions of the `Splitter` that provide the convenience methods `tokenize` and `tokenize_with_offsets` respectively.
Generally, for any N-dimensional input, the returned tokens are in a N+1-dimensional [RaggedTensor](https://www.tensorflow.org/guide/ragged_tensor) with the inner-most dimension of tokens mapping to the original individual strings.
```python
class Splitter {
@abstractmethod
def split(self, input)
}
class SplitterWithOffsets(Splitter) {
@abstractmethod
def split_with_offsets(self, input)
}
```
There is also a `Detokenizer` interface. Any tokenizer implementing this interface can accept a N-dimensional ragged tensor of tokens, and normally returns a N-1-dimensional tensor or ragged tensor that has the given tokens assembled together.
```python
class Detokenizer {
@abstractmethod
def detokenize(self, input)
}
```
## Tokenizers
Below is the suite of tokenizers provided by TensorFlow Text. String inputs are assumed to be UTF-8. Please review the [Unicode guide](https://www.tensorflow.org/text/guide/unicode) for converting strings to UTF-8.
### Whole word tokenizers
These tokenizers attempt to split a string by words, and is the most intuitive way to split text.
#### WhitespaceTokenizer
The `text.WhitespaceTokenizer` is the most basic tokenizer which splits strings on ICU defined whitespace characters (eg. space, tab, new line). This is often good for quickly building out prototype models.
```
tokenizer = tf_text.WhitespaceTokenizer()
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
You may notice a shortcome of this tokenizer is that punctuation is included with the word to make up a token. To split the words and punctuation into separate tokens, the `UnicodeScriptTokenizer` should be used.
#### UnicodeScriptTokenizer
The `UnicodeScriptTokenizer` splits strings based on Unicode script boundaries. The script codes used correspond to International Components for Unicode (ICU) UScriptCode values. See: http://icu-project.org/apiref/icu4c/uscript_8h.html
In practice, this is similar to the `WhitespaceTokenizer` with the most apparent difference being that it will split punctuation (USCRIPT_COMMON) from language texts (eg. USCRIPT_LATIN, USCRIPT_CYRILLIC, etc) while also separating language texts from each other. Note that this will also split contraction words into separate tokens.
```
tokenizer = tf_text.UnicodeScriptTokenizer()
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
### Subword tokenizers
Subword tokenizers can be used with a smaller vocabulary, and allow the model to have some information about novel words from the subwords that make create it.
We briefly discuss the Subword tokenization options below, but the [Subword Tokenization tutorial](https://www.tensorflow.org/text/guide/subwords_tokenizer) goes more in depth and also explains how to generate the vocab files.
#### WordpieceTokenizer
WordPiece tokenization is a data-driven tokenization scheme which generates a set of sub-tokens. These sub tokens may correspond to linguistic morphemes, but this is often not the case.
The WordpieceTokenizer expects the input to already be split into tokens. Because of this prerequisite, you will often want to split using the `WhitespaceTokenizer` or `UnicodeScriptTokenizer` beforehand.
```
tokenizer = tf_text.WhitespaceTokenizer()
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
After the string is split into tokens, the `WordpieceTokenizer` can be used to split into subtokens.
```
url = "https://github.com/tensorflow/text/blob/master/tensorflow_text/python/ops/test_data/test_wp_en_vocab.txt?raw=true"
r = requests.get(url)
filepath = "vocab.txt"
open(filepath, 'wb').write(r.content)
subtokenizer = tf_text.UnicodeScriptTokenizer(filepath)
subtokens = tokenizer.tokenize(tokens)
print(subtokens.to_list())
```
#### BertTokenizer
The BertTokenizer mirrors the original implementation of tokenization from the BERT paper. This is backed by the WordpieceTokenizer, but also performs additional tasks such as normalization and tokenizing to words first.
```
tokenizer = tf_text.BertTokenizer(filepath, token_out_type=tf.string, lower_case=True)
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
#### SentencepieceTokenizer
The SentencepieceTokenizer is a sub-token tokenizer that is highly configurable. This is backed by the Sentencepiece library. Like the BertTokenizer, it can include normalization and token splitting before splitting into sub-tokens.
```
url = "https://github.com/tensorflow/text/blob/master/tensorflow_text/python/ops/test_data/test_oss_model.model?raw=true"
sp_model = requests.get(url).content
tokenizer = tf_text.SentencepieceTokenizer(sp_model, out_type=tf.string)
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
### Other splitters
#### UnicodeCharTokenizer
This splits a string into UTF-8 characters. It is useful for CJK languages that do not have spaces between words.
```
tokenizer = tf_text.UnicodeCharTokenizer()
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
```
The output is Unicode codepoints. This can be also useful for creating character ngrams, such as bigrams. To convert back into UTF-8 characters.
```
characters = tf.strings.unicode_encode(tf.expand_dims(tokens, -1), "UTF-8")
bigrams = tf_text.ngrams(characters, 2, reduction_type=tf_text.Reduction.STRING_JOIN, string_separator='')
print(bigrams.to_list())
```
#### HubModuleTokenizer
This is a wrapper around models deployed to TF Hub to make the calls easier since TF Hub currently does not support ragged tensors. Having a model perform tokenization is particularly useful for CJK languages when you want to split into words, but do not have spaces to provide a heuristic guide. At this time, we have a single segmentation model for Chinese.
```
MODEL_HANDLE = "https://tfhub.dev/google/zh_segmentation/1"
segmenter = tf_text.HubModuleTokenizer(MODEL_HANDLE)
tokens = segmenter.tokenize(["新华社北京"])
print(tokens.to_list())
```
It may be difficult to view the results of the UTF-8 encoded byte strings. Decode the list values to make viewing easier.
```
def decode_list(x):
if type(x) is list:
return list(map(decode_list, x))
return x.decode("UTF-8")
def decode_utf8_tensor(x):
return list(map(decode_list, x.to_list()))
print(decode_utf8_tensor(tokens))
```
#### SplitMergeTokenizer
The `SplitMergeTokenizer` & `SplitMergeFromLogitsTokenizer` have a targeted purpose of splitting a string based on provided values that indicate where the string should be split. This is useful when building your own segmentation models like the previous Segmentation example.
For the `SplitMergeTokenizer`, a value of 0 is used to indicate the start of a new string, and the value of 1 indicates the character is part of the current string.
```
strings = ["新华社北京"]
labels = [[0, 1, 1, 0, 1]]
tokenizer = tf_text.SplitMergeTokenizer()
tokens = tokenizer.tokenize(strings, labels)
print(decode_utf8_tensor(tokens))
```
The `SplitMergeFromLogitsTokenizer` is similar, but it instead accepts logit value pairs from a neural network that predict if each character should be split into a new string or merged into the current one.
```
strings = [["新华社北京"]]
labels = [[[5.0, -3.2], [0.2, 12.0], [0.0, 11.0], [2.2, -1.0], [-3.0, 3.0]]]
tokenizer = tf_text.SplitMergeFromLogitsTokenizer()
tokenizer.tokenize(strings, labels)
print(decode_utf8_tensor(tokens))
```
#### RegexSplitter
The `RegexSplitter` is able to segment strings at arbitrary breakpoints defined by a provided regular expression.
```
splitter = tf_text.RegexSplitter("\s")
tokens = splitter.split(["What you know you can't explain, but you feel it."], )
print(tokens.to_list())
```
## Offsets
When tokenizing strings, it is often desired to know where in the original string the token originated from. For this reason, each tokenizer which implements `TokenizerWithOffsets` has a *tokenize_with_offsets* method that will return the byte offsets along with the tokens. The start_offsets lists the bytes in the original string each token starts at, and the end_offsets lists the bytes immediately after the point where each token ends. To refrase, the start offsets are inclusive and the end offsets are exclusive.
```
tokenizer = tf_text.UnicodeScriptTokenizer()
(tokens, start_offsets, end_offsets) = tokenizer.tokenize_with_offsets(['Everything not saved will be lost.'])
print(tokens.to_list())
print(start_offsets.to_list())
print(end_offsets.to_list())
```
## Detokenization
Tokenizers which implement the `Detokenizer` provide a `detokenize` method which attempts to combine the strings. This has the chance of being lossy, so the detokenized string may not always match exactly the original, pre-tokenized string.
```
tokenizer = tf_text.UnicodeCharTokenizer()
tokens = tokenizer.tokenize(["What you know you can't explain, but you feel it."])
print(tokens.to_list())
strings = tokenizer.detokenize(tokens)
print(strings.numpy())
```
## TF Data
TF Data is a powerful API for creating an input pipeline for training models. Tokenizers work as expected with the API.
```
docs = tf.data.Dataset.from_tensor_slices([['Never tell me the odds.'], ["It's a trap!"]])
tokenizer = tf_text.WhitespaceTokenizer()
tokenized_docs = docs.map(lambda x: tokenizer.tokenize(x))
iterator = iter(tokenized_docs)
print(next(iterator).to_list())
print(next(iterator).to_list())
```
| github_jupyter |
```
import os
import glob
import numpy as np
import torch
import time
import cv2
#import json
import numpy as np
from PIL import Image
from torchvision.transforms import *
import torch.utils.data
import torch
import torch.nn as nn
from torch.autograd import Variable
from model import ConvColumn
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
### Loading model
# set run output folder
gpus = [0,0]
print("=> active GPUs: {}".format(gpus))
model_name = "jester_conv_example"
# create model
model = ConvColumn(27)
# multi GPU setting
model = torch.nn.DataParallel(model, device_ids= gpus).cuda()
if os.path.isfile("/home/hoanganh/Desktop/GulpIO-benchmarks-master/checkpoint.pth.tar"):
print("=> loading checkpoint")
checkpoint = torch.load("/home/hoanganh/Desktop/GulpIO-benchmarks-master/checkpoint.pth.tar")
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
else:
print("=> no checkpoint found ")
cudnn.benchmark = False
# declare initial parameters
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG']
clip_size = 18
nclips = 1
step_size = 2
is_val=True
# declare transform (crop, mean, std) #
transform = Compose([
CenterCrop(84),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
###################################################
# root = './test_video/9223/%05d.jpg'%i % path of frames
# define function to load image from a path of image file
def image_loader(image_path):
"""load image, returns cuda tensor"""
image = Image.open(image_path).convert('RGB')
return image
# define function to take file name of frame in a forder
def get_frame_names(path):
frame_names = []
for ext in IMG_EXTENSIONS:
frame_names.extend(glob.glob(os.path.join(path, "*" + ext)))
frame_names = list(sorted(frame_names))
num_frames = len(frame_names)
# set number of necessary frames
if nclips > -1:
num_frames_necessary = clip_size * nclips * step_size
else:
num_frames_necessary = num_frames
# pick frames
offset = 0
if num_frames_necessary > num_frames:
# Pad last frame if video is shorter than necessary
frame_names += [frame_names[-1]] * \
(num_frames_necessary - num_frames)
elif num_frames_necessary < num_frames:
# If there are more frames, then sample starting offset.
diff = (num_frames - num_frames_necessary)
# temporal augmentation
if not is_val:
offset = np.random.randint(0, diff)
frame_names = frame_names[offset:num_frames_necessary +
offset:step_size]
return frame_names
### get image paths from an input forder
start = time.time()
img_paths = get_frame_names('./test_dataset/109621/')
### get frames form img_paths
imgs = []
for img_path in img_paths:
img = image_loader(img_path)
img = transform(img)
imgs.append(torch.unsqueeze(img, 0))
# print(imgs.shape)
## format data to torch
data = torch.cat(imgs)
data = data.permute(1, 0, 2, 3)
data = data.unsqueeze(0)
print(data.shape)
print(time.time() - start)
gestures = ['Swiping_Left', 'Swiping_Right', 'Swiping_Down', 'Swiping_Up',
'Pushing_Hand_Away', 'Pulling_Hand_In', 'Sliding_Two_Fingers_Left',
'Sliding_Two_Fingers_Right', 'Sliding_Two_Fingers_Down', 'Sliding_Two_Fingers_Up',
'Pushing_Two_Fingers_Away', 'Pulling_Two_Fingers_In', 'Rolling_Hand_Forward',
'Rolling_Hand_Backward', 'Turning_Hand_Clockwise', 'Turning_Hand_Counterclockwise',
'Zooming_In_With_Full_Hand', 'Zooming_Out_With_Full_Hand',
'Zooming_In With_Two_Fingers', 'Zooming_Out_With_Two_Fingers',
'Thumb_Up', 'Thumb_Down', 'Shaking_Hand', 'Stop_Sign',
'Drumming_Fingers', 'No_gesture', 'Doing_other_things']
start = time.time()
input = data
# input_vars = torch.autograd.Variable(input.cuda(), volatile#=True)
input_vars = Variable(input)
output = model(input_vars)
_, predicted = torch.max(output.data, 1)
print(time.time() - start)
predicted = predicted.cpu().numpy()
predicted = predicted[0]
print('predict_gesture: {}'.format(i, gestures[predicted]))
print(time.time() - start)
input = input.cpu().numpy()
m1 = np.array([0.485, 0.456, 0.406])
s1 = np.array([0.229, 0.224, 0.225])
for t in range(18):
cv2.imshow('input',np.array(255*(s1*np.transpose(np.array(input[0,:,t,:,:]),(1,2,0))+m1),dtype=np.uint8))
cv2.waitKey(0)
cv2.destroyAllWindows()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import pickle
import os
pd.set_option('max_columns', None)
pd.set_option('max_rows', 140)
# for visualising the matrices
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import warnings
warnings.filterwarnings('ignore')
import xGils.xT as xT
import xGils.xLoad as xLoad
```
# **Loading Synthetic Shot Data**
```
df_synthetic = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Synthetic/Synthetic_Shots.csv')
```
# **Loading Wyscout Data (Flat CSV load)**
```
cg_xT_repo = r'/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Wyscout xT'
spadl_xT_repo = r'/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Wyscout SPADL xT xG'
ref_repo = r'/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Wyscout Reference'
wyscout_events = r'/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Wyscout Fully Loaded Events'
df_ref = pd.read_csv(os.path.join(ref_repo, 'df_wyscout_formations.csv'))
df_players = pd.read_csv(os.path.join(ref_repo, 'player_positions.csv'))
df_teams = pd.read_csv(os.path.join(ref_repo, 'df_teams.csv'))
%%time
# this is the engineered master data that resulted from the
df_wyscout = pd.read_csv(os.path.join(wyscout_events, 'Wyscout_Engineered_Events.csv'), converters={'tags': eval})
print (f'{len(df_wyscout)} rows in *df_wyscout*')
df_wyscout.head()
```
# **Loading SPADL pre-packed xT data for comparison with our model**
```
df_spadl = pd.read_csv(os.path.join(spadl_xT_repo, 'SPADL_xT.csv'))
```
### **Wyscout Event / Tags**
#### **Wyscount Tags**
```
dic_tags = {
101: 'Goal',
102: 'Own goal',
301: 'Assist',
302: 'Key pass',
1901: 'Counter attack',
401: 'Left foot',
402: 'Right foot',
403: 'Head/body',
1101: 'Direct',
1102: 'Indirect',
2001: 'Dangerous ball lost',
2101: 'Blocked',
801: 'High',
802: 'Low',
1401: 'Interception',
1501: 'Clearance',
201: 'Opportunity',
1301: 'Feint',
1302: 'Missed ball',
501: 'Free space right',
502: 'Free space left',
503: 'Take on left',
504: 'Take on right',
1601: 'Sliding tackle',
601: 'Anticipated',
602: 'Anticipation',
1701: 'Red card',
1702: 'Yellow card',
1703: 'Second yellow card',
1201: 'Position: Goal low center',
1202: 'Position: Goal low right',
1203: 'Position: Goal center',
1204: 'Position: Goal center left',
1205: 'Position: Goal low left',
1206: 'Position: Goal center right',
1207: 'Position: Goal high center',
1208: 'Position: Goal high left',
1209: 'Position: Goal high right',
1210: 'Position: Out low right',
1211: 'Position: Out center left',
1212: 'Position: Out low left',
1213: 'Position: Out center right',
1214: 'Position: Out high center',
1215: 'Position: Out high left',
1216: 'Position: Out high right',
1217: 'Position: Post low right',
1218: 'Position: Post center left',
1219: 'Position: Post low left',
1220: 'Position: Post center right',
1221: 'Position: Post high center',
1222: 'Position: Post high left',
1223: 'Position: Post high right',
901: 'Through',
1001: 'Fairplay',
701: 'Lost',
702: 'Neutral',
703: 'Won',
1801: 'Accurate',
1802: 'Not accurate'
}
```
### Wyscout **eventName** and **subEventName** counts.
```
df_wyscout.eventName.value_counts()
df_wyscout.subEventName.value_counts()
```
## Aim of the game:
**Want to produce an events dataset with a similar set of events as the Opta data**
> We just want to map successful and unsuccessful pass / dribble / shot events
> The tricky thing with Wyscout is getting the dribbles right - and we just need to sort out the tags in this situation
> Create a Christian-Event-Type column to add on to this dataset, and then push it through your xT library
> May need to tweak a few things like the coordinates...
### Transformation Functions
```
# adding a few more flags (general successFlag means whether the shot was accurate (i.e. on target), not necessarily if a goal happened)
df_wyscout['goalScoredFlag'] = df_wyscout.tags.apply(lambda x: 1 if 101 in x else 0)
# take on if 503 or 504
df_wyscout['takeOnFlag'] = df_wyscout.tags.apply(lambda x: 1 if 503 in x else (1 if 504 in x else 0))
# dribble if a 502 or 501
df_wyscout['dribbleFlag'] = df_wyscout.tags.apply(lambda x: 1 if 503 in x else (1 if 504 in x else (1 if 502 in x else (1 if 501 in x else 0))))
# dribble success: win if 703, neutral if 702, loss if 701 (we'll want to punish losses and do nothing with neutral encounters)
df_wyscout['dribbleSuccessFlag'] = df_wyscout.tags.apply(lambda x: 1 if 703 in x else (2 if 702 in x else 0))
# starting off with a blank slate of not relevant
df_wyscout['eventSubType'] = 'Not Relevant'
# passes are nice and easy
df_wyscout.loc[((df_wyscout['eventName'] == 'Pass') & (df_wyscout['successFlag'] == 1)),'eventSubType'] = 'Successful Pass'
df_wyscout.loc[((df_wyscout['eventName'] == 'Pass') & (df_wyscout['successFlag'] == 0)),'eventSubType'] = 'Failed Pass'
# shots require the new shot flag (we're classifying a failed shot as one that does not score a goal)
df_wyscout.loc[((df_wyscout['eventName'] == 'Shot') & (df_wyscout['goalScoredFlag'] == 1)),'eventSubType'] = 'Goal'
df_wyscout.loc[((df_wyscout['eventName'] == 'Shot') & (df_wyscout['goalScoredFlag'] == 0)),'eventSubType'] = 'Failed Shot'
# take-ons (more aggressive dribbles where the attacking player is trying to beat his/her man)
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['dribbleFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 1)),'eventSubType'] = 'Successful Dribble'
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['dribbleFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 2)),'eventSubType'] = 'Neutral Dribble'
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['dribbleFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 0)),'eventSubType'] = 'Failed Dribble'
# dribbles
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['takeOnFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 1)),'eventSubType'] = 'Successful Take-On'
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['takeOnFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 2)),'eventSubType'] = 'Neutral Take-On'
df_wyscout.loc[((df_wyscout['subEventName'] == 'Ground attacking duel') & (df_wyscout['takeOnFlag'] == 1) & (df_wyscout['dribbleSuccessFlag'] == 0)),'eventSubType'] = 'Failed Take-On'
# Quick check of how things look against the Opta data
df_wyscout.loc[df_wyscout['source'] == 'England'].eventSubType.value_counts()
# Quick check of what's being classed as irrelevant
df_wyscout_nr = df_wyscout.loc[df_wyscout['eventSubType'] == 'Not Relevant'].copy()
df_wyscout_nr.subEventName.value_counts()
```
### Looks like we've got about 90% of the right balance with the Opta data (haven't missed anything significant within the tags)
---
## Defining Success / Failure of the three high-level event types: passes (incl. crosses), dribbles, shots.
```
# pass events (inc. crosses)
wyscout_successful_pass_events = ['Successful Pass']
wyscout_failed_pass_events = ['Failed Pass']
# dribble events
wyscout_successful_dribble_events = ['Successful Dribble', 'Successful Take-On']
wyscout_failed_dribble_events = ['Failed Dribble','Failed Take-On']
# shot events
wyscout_successful_shot_events = ['Goal']
wyscout_failed_shot_events = ['Failed Shot']
wyscout_events_relevant = wyscout_successful_dribble_events + wyscout_successful_pass_events + wyscout_successful_shot_events + wyscout_failed_dribble_events + wyscout_failed_pass_events + wyscout_failed_shot_events
wyscout_events_relevant
```
## Transforming Co-Ords
> Wyscout has an inverted y-axis but otherwise the same transformation as Opta
```
%%time
df_wyscout = xLoad.wyscout_coords_in_metres(df_wyscout, 'start_x', 'end_x', 'start_y', 'end_y')
df_wyscout
```
---
# And now, fingers crossed, pushing this through the generalised xT model...
```
N, M = 18, 12
```
## 1) Looking at some of the xT components first, before putting it all together
### Matrix Visualisation
#### xG **WITHOUT** synthetic shots/goals
```
xG = xT.p_score_if_shoot(df_wyscout, wyscout_successful_shot_events, wyscout_failed_shot_events, 'eventSubType', N, M, 105, 68)
fig = plt.figure(figsize=(18,12))
plt.imshow(xG, interpolation='nearest', cmap=cm.Greys_r)
```
#### xG **WITH** synthetic shots/goals
```
xG = xT.p_score_if_shoot(df_wyscout, wyscout_successful_shot_events, wyscout_failed_shot_events, 'eventSubType', N, M, 105, 68, use_synthetic=1, df_synthetic=df_synthetic)
fig = plt.figure(figsize=(18,12))
plt.imshow(xG, interpolation='nearest', cmap=cm.Greys_r)
```
#### Probability of shooting
```
pS, pM = xT.p_shoot_or_move(df_wyscout, wyscout_successful_shot_events, wyscout_failed_shot_events, wyscout_successful_pass_events, wyscout_failed_pass_events, wyscout_successful_dribble_events, wyscout_failed_dribble_events, 'eventSubType', N, M, 105, 68)
fig = plt.figure(figsize=(18,12))
plt.imshow(pS, interpolation='nearest', cmap=cm.Greys_r)
```
#### Probability of Moving
```
fig = plt.figure(figsize=(18,12))
plt.imshow(pM, interpolation='nearest', cmap=cm.Greys_r)
```
## 2. Calculating, visualising, and analysing xT **with synthetic shot data**
```
xT_surface, heatmaps = xT.xT_surface(df_wyscout, wyscout_successful_shot_events, wyscout_failed_shot_events, wyscout_successful_pass_events, wyscout_failed_pass_events, wyscout_successful_dribble_events, wyscout_failed_dribble_events, 'eventSubType', N, M, 105, 68, 1, df_synthetic)
# dumping the Wyscout surface to file
#with open('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/xT Surfaces/Wyscout_xT_Surface.pkl', 'wb') as f:
#pickle.dump(xT_surface, f)
# loading the Opta xT surface to check whether differences are because of the data or the mdoel
#with open('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/xT Surfaces/Original_Opta_xT_Surface.pkl', 'rb') as f:
#xT_surface = pickle.load(f)
xT_interp = xT.bilinear_interp_xT(xT_surface)
df_wyscout['xT'] = xT.apply_xT(df_wyscout, xT_surface, wyscout_successful_pass_events, wyscout_failed_pass_events, wyscout_successful_dribble_events, wyscout_failed_dribble_events, N, M, 105, 68, 100, xT_mode = 3)
plt.figure(figsize=(18,12))
plt.imshow(xT_surface, interpolation='nearest', cmap=cm.coolwarm)
plt.figure(figsize=(18,12))
plt.imshow(xT_interp, interpolation='nearest', cmap=cm.coolwarm)
plt.axis('off')
#plt.savefig(f'bilinear_interp.png', dpi=300, transparent=True)
fig, axs = plt.subplots(5,6, figsize=(25, 25), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = .01, wspace=.2)
axs = axs.ravel()
for i in range(len(heatmaps)):
axs[i].imshow(heatmaps[i], interpolation='nearest', cmap=cm.coolwarm)
axs[i].axes.get_xaxis().set_visible(False)
axs[i].axes.get_yaxis().set_visible(False)
#fig.savefig(f'iterative_solve_xT.png', dpi=300, transparent=True)
```
---
3. Produce some aggregate metrics per player as part of preliminary analysis
(**Will go into this in far, far more detail in the Interim Report analysis**)
```
df_wyscout_ref = df_wyscout.merge(df_ref, how='inner', on=['playerId','teamId','matchId'])
df_xT = df_wyscout_ref.groupby(['source','playerId','shortName','matchId'])\
.agg({'xT':np.sum,'minutesPlayed':np.mean,'x1_m':'count'})\
.reset_index().rename(columns={'x1_m':'numActions'})\
.groupby(['source','playerId','shortName'])\
.agg({'xT':np.sum,'minutesPlayed':np.sum,'numActions':np.sum,'matchId':'nunique'})\
.reset_index()\
.rename(columns={'matchId':'numMatches'})\
.sort_values('xT', ascending=False)
df_xT['xT_per_90'] = (df_xT.xT / df_xT.minutesPlayed) * 90
# min mins filter
df_xT = df_xT.loc[(df_xT['minutesPlayed'] > 180)]
df_xT['source_xT_rank'] = df_xT.sort_values('xT', ascending=False).groupby(['source']).cumcount() + 1
df_xT['source_xT_per_90_rank'] = df_xT.sort_values('xT_per_90', ascending=False).groupby(['source']).cumcount() + 1
# interesting, looks like there's disagreement over the number of minutes played... which obviously makes a difference
# when that's the denominator
df_xT.loc[df_xT['source_xT_per_90_rank'] <= 20].sort_values(['source','source_xT_per_90_rank'], ascending=[True, True])
df_xT.loc[df_xT['source_xT_rank'] <= 20].sort_values(['source','source_xT_rank'], ascending=[True, True])
```
## Checking our model is in agreement with the SPADL model on the same data
I.e. differences are entirely due to data preparation of the Wyscout data and subsequent xT modelling.
> **All looks fine!** Great vaildation of the model.
```
spadl_agg = df_spadl.merge(df_ref, how='inner', left_on=['player_id','game_id','team_id'], right_on=['playerId','matchId','teamId'])\
.merge(df_players, how='inner', on='player_id')\
.merge(df_teams, how='inner', left_on='team_id', right_on='teamId')\
.groupby(['teamName','teamType','teamArea','player_id','player_name','game_id'])\
.agg({'xT_value':np.sum,'minutesPlayed':np.mean,'start_x':'count'})\
.reset_index().rename(columns={'start_x':'numActions'})\
.groupby(['teamName','teamType','teamArea','player_id','player_name'])\
.agg({'xT_value':np.sum,'minutesPlayed':np.sum,'numActions':np.sum,'game_id':'nunique'})\
.reset_index()\
.rename(columns={'game_id':'numMatches'})\
.sort_values('xT_value', ascending=False)
spadl_agg.loc[spadl_agg['teamArea'] == 'England'].head(100)
```
---
# Outputting xT + events data to CSV
```
df_wyscout.to_csv(os.path.join(cg_xT_repo, 'Full_Wyscout_All_Comps_xT.csv'), index=None)
df_domestic = df_wyscout.loc[df_wyscout['source'].isin(['England','Italy','France','Spain','Germany'])].copy()
df_world_cup = df_wyscout.loc[df_wyscout['source'] == 'World_Cup'].copy()
df_euros = df_wyscout.loc[df_wyscout['source'] == 'European_Championship'].copy()
df_domestic.to_csv(os.path.join(cg_xT_repo, 'Wyscout_Domestic_Comps_xT.csv'), index=None)
df_world_cup.to_csv(os.path.join(cg_xT_repo, 'Wyscout_World_Cup_2018_xT.csv'), index=None)
df_euros.to_csv(os.path.join(cg_xT_repo, 'Wyscout_Euros_2016_xT.csv'), index=None)
```
| github_jupyter |
```
%pylab inline
import pandas as pd
rcParams['axes.spines.right'] = False
rcParams['axes.spines.top'] = False
sample_info = pd.read_csv('BC_sample_info.csv', index_col=0)
df = pd.read_table('data/Layer2_BC_count_matrix-1.tsv', index_col=0)
df = df.loc[sample_info.index]
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
import NaiveDE
dfm = NaiveDE.stabilize(df.T).T
res = NaiveDE.regress_out(sample_info, dfm.T, 'np.log(total_counts)').T
import SpatialDE as sde
X = sample_info[['x', 'y']]
plt.scatter(X.x, X.y);
results = sde.run(X, res.sample(1000, axis=1))
from matplotlib.colors import LogNorm
plt.yscale('log')
plt.scatter(results.FSV, results.pval, c=2 * np.sqrt(results.s2_FSV.values), norm=LogNorm());
plt.colorbar()
plt.ylim(1e-13, 10);
plt.gca().invert_yaxis();
alpha_map = {
0.0: 1.,
0.1: 0.5,
1.0: 0.2
}
size_map = {
0.0: 50,
0.1: 25,
1.0: 5
}
plt.yscale('log')
for i, g in results.groupby(pd.cut(2 * np.sqrt(results.s2_FSV), [0, 1e-1, 1e0, np.inf])):
plt.scatter(g.FSV, g.pval, label=i, c='k', s=size_map[i.left]);
plt.legend(scatterpoints=3)
plt.ylim(1e-13, 10);
plt.gca().invert_yaxis();
i.left
pd.cut(2 * np.sqrt(results.s2_FSV), [0, 1e-1, 1e0, np.inf])
def remake_matrices(result, X, exp_mat):
K = sde.base.SE_kernel(X, result.l.values[0])
U, S = sde.base.factor(K)
UT1 = sde.base.get_UT1(U)
UTy = sde.base.get_UTy(U, exp_mat[result.g.values[0]])
n = X.shape[0]
return UTy, UT1, S, n
logdeltas = np.linspace(-10, 20)
bad_gene = results.query('fraction_spatial_variance > 0.8 > pval > 1e-5').sample()
plt.yscale('log')
plt.scatter(results.fraction_spatial_variance, results.pval);
plt.scatter(bad_gene.fraction_spatial_variance, bad_gene.pval);
plt.ylim(1e-13, 1);
plt.gca().invert_yaxis();
plt.yscale('log')
plt.scatter(results.fraction_spatial_variance, results.qval, c=results.d2delta);
plt.ylim(1e-13, 10);
plt.colorbar(label=r'$\frac{\partial^2 LLR}{\partial \log(\delta)}$');
plt.gca().invert_yaxis();
plt.yscale('log')
plt.xscale('log')
plt.scatter(results.max_s2_t_hat * results.Gower, results.qval, c=results.d2delta);
plt.xlabel('Spatial variance')
plt.ylim(1e-13, 10);
plt.colorbar(label=r'$\frac{\partial^2 LLR}{\partial \log(\delta)}$');
plt.gca().invert_yaxis();
UTy, UT1, S, n = remake_matrices(bad_gene, X, res)
LL_obj = sde.base.make_objective(*(UTy, UT1, S, n))
lls = np.array([LL_obj(d) for d in logdeltas])
from scipy.misc import derivative
x0 = np.log(bad_gene.max_delta.values[0])
s2_delta = 1. / derivative(LL_obj, x0, n=2)
s2_t_hat = lambda d: sde.base.s2_t_hat(np.exp(d), UTy, S, n)
s2_s2_t_hat = np.abs(derivative(s2_t_hat, x0, n=1)) ** 2 * s2_delta
s2_s2_t_hat
def FSV(logdelta):
s2_t = sde.base.s2_t_hat(np.exp(logdelta), UTy, S, n)
g = bad_gene.Gower.values[0]
return s2_t * g / (s2_t * g + np.exp(logdelta) * s2_t)
derivative(FSV, x0, n=1) ** 2 * s2_delta
bad_gene
plt.plot(logdeltas, lls);
plt.axvline(np.log(bad_gene.max_delta.values[0]), c='k');
lls
from scipy import stats
deg_f = 1
chi2_samples = pd.Series(stats.chi2.rvs(df=1, size=results.LLR.shape[0]))
pval_samples = 1 - stats.chi2.cdf(chi2_samples.sort_values(), df=deg_f)
pval_observed = 1 - stats.chi2.cdf(results.LLR.sort_values(), df=deg_f)
plt.loglog()
plt.scatter(pval_samples, pval_observed, marker='o', c='k', rasterized=True);
plt.plot([1e-5, 1], [1e-5, 1], c='r');
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
plt.xlabel('Expected P-value')
plt.ylabel('Observed P-value');
plt.title('QQ Plot - Breast Cancer Tissue');
plt.loglog()
plt.scatter(df[results.g].var(), results.max_s2_t_hat * results.Gower, c=results.fraction_spatial_variance);
figsize(5, 4)
plt.yscale('log')
plt.scatter(res[results.g].var(), results.max_s2_t_hat * results.Gower, c=results.fraction_spatial_variance);
plt.xlabel('Total Variance');
plt.ylabel('Spatial Variance')
plt.colorbar(label='fraction spatial variance');
figsize(5, 4)
plt.yscale('log')
plt.scatter(res[results.g].var(), results.max_s2_t_hat * results.Gower, c=results.l);
plt.xlabel('Total Variance');
plt.ylabel('Spatial Variance')
plt.colorbar(label='lengthscale')
plt.yscale('log')
plt.xscale('log')
noise_var = results['max_s2_t_hat'] * results['max_delta']
plt.scatter(noise_var + results.max_s2_t_hat * results.Gower,
results.max_s2_t_hat * results.Gower,
c=results.fraction_spatial_variance);
```
| github_jupyter |
<h1> Text Classification using TensorFlow/Keras on AI Platform </h1>
This notebook illustrates:
<ol>
<li> Creating datasets for AI Platform using BigQuery
<li> Creating a text classification model using the Estimator API with a Keras model
<li> Training on Cloud AI Platform
<li> Rerun with pre-trained embedding
</ol>
```
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
if 'COLAB_GPU' in os.environ: # this is always set on Colab, the value is 0 or 1 depending on whether a GPU is attached
from google.colab import auth
auth.authenticate_user()
# download "sidecar files" since on Colab, this notebook will be on Drive
!rm -rf txtclsmodel
!git clone --depth 1 https://github.com/GoogleCloudPlatform/training-data-analyst
!mv training-data-analyst/courses/machine_learning/deepdive/09_sequence/txtclsmodel/ .
!rm -rf training-data-analyst
# downgrade TensorFlow to the version this notebook has been tested with
!pip install --upgrade tensorflow==$TFVERSION
import tensorflow as tf
print(tf.__version__)
```
We will look at the titles of articles and figure out whether the article came from the New York Times, TechCrunch or GitHub.
We will use [hacker news](https://news.ycombinator.com/) as our data source. It is an aggregator that displays tech related headlines from various sources.
### Creating Dataset from BigQuery
Hacker news headlines are available as a BigQuery public dataset. The [dataset](https://bigquery.cloud.google.com/table/bigquery-public-data:hacker_news.stories?tab=details) contains all headlines from the sites inception in October 2006 until October 2015.
Here is a sample of the dataset:
```
%load_ext google.cloud.bigquery
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
```
Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http://mobile.nytimes.com/...., I want to be left with <i>nytimes</i>
```
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 10
```
Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for AI Platform.
```
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
query="""
SELECT source, LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title FROM
(SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
title
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
)
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
"""
df = bq.query(query + " LIMIT 5").to_dataframe()
df.head()
```
For ML training, we will need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset).
A simple, repeatable way to do this is to use the hash of a well-distributed column in our data (See https://www.oreilly.com/learning/repeatable-sampling-of-data-sets-in-bigquery-for-machine-learning).
```
traindf = bq.query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) > 0").to_dataframe()
evaldf = bq.query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) = 0").to_dataframe()
```
Below we can see that roughly 75% of the data is used for training, and 25% for evaluation.
We can also see that within each dataset, the classes are roughly balanced.
```
traindf['source'].value_counts()
evaldf['source'].value_counts()
```
Finally we will save our data, which is currently in-memory, to disk.
```
import os, shutil
DATADIR='data/txtcls'
shutil.rmtree(DATADIR, ignore_errors=True)
os.makedirs(DATADIR)
traindf.to_csv( os.path.join(DATADIR,'train.tsv'), header=False, index=False, encoding='utf-8', sep='\t')
evaldf.to_csv( os.path.join(DATADIR,'eval.tsv'), header=False, index=False, encoding='utf-8', sep='\t')
!head -3 data/txtcls/train.tsv
!wc -l data/txtcls/*.tsv
```
### TensorFlow/Keras Code
Please explore the code in this <a href="txtclsmodel/trainer">directory</a>: `model.py` contains the TensorFlow model and `task.py` parses command line arguments and launches off the training job.
In particular look for the following:
1. [tf.keras.preprocessing.text.Tokenizer.fit_on_texts()](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer#fit_on_texts) to generate a mapping from our word vocabulary to integers
2. [tf.keras.preprocessing.text.Tokenizer.texts_to_sequences()](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer#texts_to_sequences) to encode our sentences into a sequence of their respective word-integers
3. [tf.keras.preprocessing.sequence.pad_sequences()](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) to pad all sequences to be the same length
The embedding layer in the keras model takes care of one-hot encoding these integers and learning a dense emedding represetation from them.
Finally we pass the embedded text representation through a CNN model pictured below
<img src=images/txtcls_model.png width=25%>
### Run Locally (optional step)
Let's make sure the code compiles by running locally for a fraction of an epoch.
This may not work if you don't have all the packages installed locally for gcloud (such as in Colab).
This is an optional step; move on to training on the cloud.
```
%%bash
pip install google-cloud-storage
rm -rf txtcls_trained
gcloud ai-platform local train \
--module-name=trainer.task \
--package-path=${PWD}/txtclsmodel/trainer \
-- \
--output_dir=${PWD}/txtcls_trained \
--train_data_path=${PWD}/data/txtcls/train.tsv \
--eval_data_path=${PWD}/data/txtcls/eval.tsv \
--num_epochs=0.1
```
### Train on the Cloud
Let's first copy our training data to the cloud:
```
%%bash
gsutil cp data/txtcls/*.tsv gs://${BUCKET}/txtcls/
%%bash
OUTDIR=gs://${BUCKET}/txtcls/trained_fromscratch
JOBNAME=txtcls_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/txtclsmodel/trainer \
--job-dir=$OUTDIR \
--scale-tier=BASIC_GPU \
--runtime-version 2.1 \
--python-version 3.7 \
-- \
--output_dir=$OUTDIR \
--train_data_path=gs://${BUCKET}/txtcls/train.tsv \
--eval_data_path=gs://${BUCKET}/txtcls/eval.tsv \
--num_epochs=5
```
Change the job name appropriately. View the job in the console, and wait until the job is complete.
```
!gcloud ai-platform jobs describe txtcls_190209_224828
```
### Results
What accuracy did you get? You should see around 80%.
### Rerun with Pre-trained Embedding
We will use the popular GloVe embedding which is trained on Wikipedia as well as various news sources like the New York Times.
You can read more about Glove at the project homepage: https://nlp.stanford.edu/projects/glove/
You can download the embedding files directly from the stanford.edu site, but we've rehosted it in a GCS bucket for faster download speed.
```
!gsutil cp gs://cloud-training-demos/courses/machine_learning/deepdive/09_sequence/text_classification/glove.6B.200d.txt gs://$BUCKET/txtcls/
```
Once the embedding is downloaded re-run your cloud training job with the added command line argument:
` --embedding_path=gs://${BUCKET}/txtcls/glove.6B.200d.txt`
While the final accuracy may not change significantly, you should notice the model is able to converge to it much more quickly because it no longer has to learn an embedding from scratch.
#### References
- This implementation is based on code from: https://github.com/google/eng-edu/tree/master/ml/guides/text_classification.
- See the full text classification tutorial at: https://developers.google.com/machine-learning/guides/text-classification/
## Next step
Client-side tokenizing in Python is hugely problematic. See <a href="text_classification_native.ipynb">Text classification with native serving</a> for how to carry out the preprocessing in the serving function itself.
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
## Solving for thermal advection and diffusion in 2D using the Fenics library
John J. Armitage
Abstract:
This is an enxample note book to explore if we can make a submission process for a subsurface online reproducable journal. This notebook has been written to solve classic advection diffusion. The notebook uses for packages: fenics, mshr, and numpy.
Introduction:
This is an example of using the fenics library to solve for the thermal advection and diffusion in two dimensions. The equations are solved using the finite element technique within a cartesian box. The velocity of the material within the box is predefined ans a linear function of space. The aim of this notebook is simply to explore how to deal with such a notebook and how to test it for reproducability.
```
%matplotlib inline
# I need to list all the funtions I use rather than a *
from fenics import Constant,Point,plot,FunctionSpace,VectorFunctionSpace,Expression,interpolate,SubDomain, \
DirichletBC,near,TrialFunction,TestFunction,Expression,dx,dot,grad,lhs,rhs,solve,Function
from mshr import Rectangle,generate_mesh
import numpy as np
import matplotlib.pyplot as plt
```
conda install -c conda-forge fenics
conda install -c conda-forge mshr
I will set up the domain dimensions for the rectanular box and the model resolution.
```
# Domain dimensions in meters
lx = 1.5e5
lz = 1.0e5
# number of elements along z-edge
numz = 32
```
Here I define the physical parameters that are used within the model and the time stepping for the numerical solution.
```
# Physical parameters
year = 365*24*60*60
K = 1e-6 # thermal diffusivity
# Time steps
num_steps = 20 # number of time steps
dtnom = 1e5*year # time step size (in years)
dtc = Constant(dtnom)
```
Now I am ready to define and solve the set of equations. The first step is to create the mesh using the library mshr and display the mesh.
```
# Create mesh and define function space
domain = Rectangle(Point(0,0), Point(lx,lz))
mesh = generate_mesh(domain,numz)
p = plot(mesh)
plt.title("Mesh")
plt.show()
```
Then define the functions in which temperature and velocity will be stored.
```
# Define function space for temperature
V = FunctionSpace(mesh,'CG',1)
# Define function space for velocity
W = VectorFunctionSpace(mesh,'P',2)
```
Define the intial conditions
```
T_D = Expression('x[1]>=lz-10e3 ? 1450 : 0',lz=lz,degree=2)
T_n = interpolate(T_D, V)
```
Create the boundary conditions
```
class East(SubDomain):
def inside(self, x , on_boundary):
return near(x[0], lx)
class West(SubDomain):
def inside(self, x , on_boundary):
return near(x[0], 0.0)
class North(SubDomain):
def inside(self, x , on_boundary):
return near(x[1], lz)
class South(SubDomain):
def inside(self, x , on_boundary):
return near(x[1], 0.0)
Tbc = DirichletBC(V,T_D,North())
```
We then define the test and trial functions.
```
# Define variational problem
#T Temperature
T = TrialFunction(V)
vt = TestFunction(V)
```
The velocity of the material is defined as linarly increasing in the x direction and decreasing in the positive z direction.
```
# solid flow
u = Expression(('1e-2/year/lz*(lz-x[1])','-1e-2/year/lz*(lx-x[0])'),year=year,lz=lz,lx=lx,degree=2)
```
Finally we form the PDE to be solved.
```
# PDE for temperature
Ft = ((T-T_n)/dtc)*vt*dx + dot(u,grad(T))*vt*dx + K*dot(grad(T),grad(vt))*dx
a1, L1 = lhs(Ft), rhs(Ft)
```
We now use a for loop to march forward in time to solve for temperature.
```
# Time-stepping
T = Function(V)
#phi = Function(V)
t = 0
for n in range(num_steps):
# Update current time
t += dtc
# Compute solution
# solve(Ft == 0, T)
solve(a1 == L1, T, Tbc)
# Update previous solution
T_n.assign(T)
```
The final temperature field is then plotted below.
```
p = plot(T)
# set colormap
p.set_cmap("viridis")
p.set_clim(0,1450)
# add a title to the plot
plt.title("Temperature")
# add a colorbar
plt.colorbar(p)
plt.ylim(100000,0)
plt.show()
```
| github_jupyter |
# Dynamic systems with unlimited memory
In this post we build on our preceding discussion of dynamic systems and discuss dynamic systems with unlimited memory. This type of dynamic system is used throughout the sciences and engineering, in particular in the area of *automatic control*. In machine learning such dynamic systems models are the bread and butter of so-called *Recurrent Neural Networks*.
As in our prior posts (e.g., on [Markov chains](https://jermwatt.github.io/control-notes/posts/markov_chains/Markov_chains.html), [recurrence relations](https://jermwatt.github.io/control-notes/posts/recurrence_relations/Recurrence_relations.html), and basic [dynamic systems with *limited* memory](https://jermwatt.github.io/control-notes/posts/dynamic_systems_limited_memory/dynamic_systems_limited_memory.html)) here we will deal with defining dynamic systems over a generic ordered input sequence $x_1,\,x_2,\,...,x_P$.
You can skip around this document to particular subsections via the hyperlinks below.
- [Computing a running sum the smart way](#running-sum)
- [A general definition](#definition)
- [A whole bunch of examples](#examples)
- [What does "unlimited" really mean?](#unlimited-meaning)
- [Deeper dynamic systems](#deeper-systems)
```
## this code cell will not be shown in the HTML version of this notebook
# imports from custom library for animations #
from library import exponential_average_animator
from library import history_animators
from library import plot_input_with_hidden_together
from library import plot_input_with_hidden_separate
from library import plot_hidden_histogram
# import standard libs
import numpy as np
import pandas as pd
from IPython.display import clear_output
# path to data
datapath = '../../datasets/plain_timeseries/'
# This is needed to compensate for matplotlib notebook's tendancy to blow up images when plotted inline
%matplotlib notebook
from matplotlib import rcParams
rcParams['figure.autolayout'] = True
%load_ext autoreload
%autoreload 2
```
<a id='running-sum'></a>
## Computing a running sum the smart way
Here we will tease out the basic idea behind a dynamic system with unlimited memory by exploring a super simple example: computing a *running sum* of input numbers *on the fly* - i.e., as they arrive. So suppose our input sequence arrives - in order - one element at a time. That is $x_1$ arrives first, then $x_2$, then $x_3$, and so on. To compute a "running sum" of these numbers we sum them all up when each new element arrives. That is, when the $p^{th}$ number arrives we want to compute the sum $h_p$ of the the numbers $x_1,\,x_2,\,...,x_p$.
A lazy approach to doing this would be to just sum up our numbers over and over again as each new element arrives, as shown below.
<center> <h3>The lazy way to compute a running sum</h3> </center>
\begin{array}
\
\text{sum of the first $1$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_1 = x_1 \\
\text{sum of the first $2$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_2 = x_1 + x_2 \\
\text{sum of the first $3$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_3 = x_1 + x_2 + x_3 \\
\text{sum of the first $4$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_4 = x_1 + x_2 + x_3 + x_4 \\
\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \\
\text{sum of the first $p$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_{t} = x_1 + x_2 + x_3 + x_4 + \cdots + x_p \\
\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \\
\end{array}
But this is clearly very wasteful - both computationally and in terms of storage (to compute $h_p$ we need to store every single number from $x_1$ to $x_p$). For example, when computing the third sum $h_3 = x_1 + x_2 + x_3$ we waste computation, since we have already computed the sum $h_2 = x_1 + x_2$ previously. A more efficient *recursive* way of computing the sum $h_3$ would instead be $h_3 = h_2 + x_3$. This recursion then carries over to the next running sum $h_4$ as well: instead of computing $h_4 = x_1 + x_2 + x_3 + x_4$ we can instead re-use the work we did to compute $h_3 = x_1 + x_2 + x_3$ previously and compute the sum simply as $h_4 = h_3 + x_4$. This recursion holds at each subsequent level of computation, as shown below.
<center> <h3>The right way to compute a running sum</h3> </center>
\begin{array}
\
\text{sum of the first $1$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_1 = x_1 \\
\text{sum of the first $2$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_2 = h_1 + x_2 \\
\text{sum of the first $3$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_3 = h_2 + x_3 \\
\text{sum of the first $4$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_4 = h_3 + x_4 \\
\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \\
\text{sum of the first $p$ elements:} \,\,\,\,\,\,\,\,\,\,\,\,\, h_{p} = h_{p-1} + x_p \\
\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \vdots \\
\end{array}
As we can see our general recursion $h_p = h_{p-1} + x_t$ is a very efficient way to compute running sums, as we not only save immense computation but only *two* numbers need ever be stored in memory (as opposed to $p$ with the lazy way): $h_{p-1}$ and $x_p$.
This recursive manner of computing a running sum is a simple example of a *dynamic system with unlimited memory*. Here the memory of this system is deemed "unlimited" since - at every instance - the value $h_p$ captures something about (here, the *sum* of) *every value $x_1,\,x_2,\,...,x_p$* as
\begin{equation}
h_p = h_{p-1} + x_p = x_1 + x_2 + \cdots + x_p.
\end{equation}
In the jargon of dynamic systems and machine learning the update $h_p$ is often called a *hidden state* of the system. It might be more aptly referred to as a "summarizer" or "accumulator" since it *summarizes* some aspect of the input sequence $x_1,\,x_2,\,...,x_p$ - in this case by literally *summing* them up.
Note how this differs from a [dynamic system with *limited* memory](https://jermwatt.github.io/control-notes/posts/dynamic_systems_limited_memory/dynamic_systems_limited_memory.html) - here we have no "window" defining a subset of input values used in computing $h_p$.
<a id='definition'></a>
## A general definition
General dynamic systems with unlimited memory look something like the recursive formula for a running sum given in Equation (1), only instead of a simple sum any functional form can be used as
\begin{equation}
h_p = f\left(h_{p-1},x_p \right).
\end{equation}
Regardless of the function chosen (as we show [formally below](#unlimited-meaning)), the hidden state $h_p$ of such a system *always* summarizes the entire input sequence $x_1,\,x_2,\,...,x_p$. As we will see via a range of examples below, the facet(s) of the input sequence summarized in $h_p$ depends entirely on how the function $f$ is chosen (or - more generally - *learned*).
Notice strictly in terms of the formulae, comparing this to a general order $D=1$ [dynamic system with *limited* memory]((https://blog.dgsix.com/posts/dynamic_systems_limited_memory/dynamic_systems_limited_memory.html))
\begin{equation}
h_p = f\left(x_p\right)
\end{equation}
the only difference lies in the latter's lack of recursion on the hidden state $h_p$. It is this recursion that gives the former "unlimited" memory, and the latter a "limited" memory.
<a id='examples'></a>
## A whole bunch of examples
Here we describe a range of examples of dynamic systems with unlimited memory. Some of these - particularly the *exponential average*, *running maximum*, and the *running histogram* examples have natural analogs in the [limited memory case](https://jermwatt.github.io/control-notes/posts/dynamic_systems_limited_memory/dynamic_systems_limited_memory.html)
#### <span style="color:#a50e3e;">Example 1: </span> Running mean
Instead of computing a running sum, say we wanted to compute a running *mean* of our input numbers. To do this the "lazy way" we would literally average all $p$ numbers when the input $x_p$ arrives as
\begin{equation}
h_{p} = \frac{x_1 + x_2 + \cdots + x_{p-1} + x_p}{p}.
\end{equation}
This approach - of course - would suffer the same sort of computational and storage issues described with the running sum above. But with a little re-arranging of this formula
\begin{equation}
h_{p} = \frac{p-1}{p}\frac{x_1 + x_2 + \cdots + x_{p-1}}{p-1} + \frac{1}{p}x_p = \frac{p-1}{p}h_{p-1} + \frac{1}{p}x_p
\end{equation}
we can determine a hidden state recursion for the running mean as $h_p = \frac{p-1}{p}h_{p-1} + \frac{1}{p}x_p$.
Computing the running mean in this way solves both the computation and storage problems, and is another example of a dynamic system with unlimited memory. Here the hidden state $h_p$ *summarizes $x_1,\,x_2,\,...,x_p$ by (efficiently) computing its *mean*.
#### <span style="color:#a50e3e;">Example 2: </span> Exponential average
A basic but very popular generalization of the running mean $h_p = \frac{p-1}{p}h_{p-1} + \frac{1}{p}x_p$ is called the *exponential average*
\begin{equation}
h_t = \alpha h_{p-1} + (1 - \alpha) x_p
\end{equation}
$\,\,\,$ where $0 \leq \alpha \leq 1$. This is a popular time series *smoother* (analogous to the [moving average](https://jermwatt.github.io/control-notes/posts/moving_averages/Moving_averages.html) only with unlimited memory). It is also popularly used in [momentum accelerated gradient descent](https://jermwatt.github.io/machine_learning_refined/notes/3_First_order_methods/3_8_Momentum.html). It is called an 'exponential maverage' because if one "rolls back" the recursion on $h_p$ one can see that $h_p$ *summarizes* the input $x_1,\,x_2,\,...,x_p$ as an <a href="https://en.wikipedia.org/wiki/Exponential_smoothing" target="_blank">exponential average</a>.
Below we animate the production of an exponential average (in orange) for a time series input (in black). Here $\alpha$ has been set to $\alpha = 0.9$.
```
## This code cell will not be shown in the HTML version of this notebook
# load in data
csvname = datapath + 'ford_data.csv'
data = pd.read_csv(csvname)
x = np.array(data['Close']) # date: 1980 to 2017
# exponential average function
def exponential_average(x,alpha):
h = [x[0]]
for p in range(len(x) - 1):
# get next element of input series
x_p = x[p]
# make next hidden state
h_p = alpha*h[-1] + (1 - alpha)*x_p
h.append(h_p)
return np.array(h)
# produce moving average time series
alpha = 0.9
h = exponential_average(x,alpha)
# run animator
demo = exponential_average_animator.Visualizer()
demo.animate_exponential_ave(x,h,savepath='videos/animation_1.mp4')
clear_output()
## This code cell will not be shown in the HTML version of this notebook
from IPython.display import HTML
HTML("""
<video width="1000" height="400" controls loop>
<source src="videos/animation_1.mp4" type="video/mp4">
</video>
""")
```
#### <span style="color:#a50e3e;">Example 3. </span> The running Riemann sum
In the instance that $x_1,\,x_2,\,...,x_p$ are $p$ ordered evaluations of a function spaced $\frac{1}{T}$ apart, a slight adjustment to the running sum gives an approximation to the one-dimensional integral or 'area under the curve', known as a *Riemann sum*. As illustrated in the figure below the *Riemann sum* approximates the area under a curve by a series of equally spaced rectangles whose heights are equally spaced evaluations of the function.
<figure>
<p>
<img src= 'images/riemann.png' width="70%" height="70%" alt=""/>
</p>
<figcaption>
</em>
</figcaption>
</figure>
The Riemann sum of a function up to the $p^{th}$ evaluation $x_p$ is just the sum of the area of the rectangles defined by it and its predecessors, that is
\begin{equation}
h_{p} = \frac{1}{T}x_1 + \frac{1}{T}x_2 + \cdots + \frac{1}{T}x_{p-1} + \frac{1}{T}x_{p}
\end{equation}
which - like the running sum (here we are just multiplying the same step by $\frac{1}{T}$) - can be defined in terms of its predecessor simply as
\begin{equation}
\
h_{p} = \left(\frac{1}{T}x_1 + \frac{1}{T}x_2 + \cdots + \frac{1}{T}x_{p-1}\right) + \frac{1}{T}x_{p} \\
\,\,\,\,\,\,\, = h_{p-1} + \frac{1}{T}x_{p}.
\end{equation}
Here the state variable $h_{p}$ summarizes the input from $x_1$ through $x_{p}$ in that it precisely the Reimann sum of the rectangles with these heights.
#### <span style="color:#a50e3e;">Example 4: </span> Running maximum
We can compute the maximum of an input series on the run effectively
\begin{equation}
h_p = \text{maximum}\left(h_{p-1},x_p\right).
\end{equation}
Here the hidden state $h_p$ *summarizes* the input $x_1,\,x_2,\,...,x_p$ by its *maximum value*.
Below we show an example with a input series (in blue), and its corresponding running maximum in dark orange.
```
## This code cell will not be shown in the HTML version of this notebook
# an example input sequence of ordered data
x = []
for t in range(50):
x_t = 0.035*t*np.sin(0.5*t)
x.append(x_t)
# maximum
h = [x[0]]
for p in range(1,len(x)):
# get current average and point
ave = h[-1]
x_p = x[p]
# make next element
new_ave = np.maximum(ave,x_p)
h.append(new_ave)
plotter = plot_input_with_hidden_together.Plotter(hidden_name = 'running max')
animator = history_animators.Animator()
animator.animate_plot(plotter,x,h,num_frames = 100,savepath='videos/animation_2.mp4',fps=15)
## This code cell will not be shown in the HTML version of this notebook
from IPython.display import HTML
HTML("""
<video width="1000" height="400" controls loop>
<source src="videos/animation_2.mp4" type="video/mp4">
</video>
""")
```
#### <span style="color:#a50e3e;">Example 5. </span> Running count of zero-crossings
Many cheap guitar tuners work by feeding in an audio signal - which consists of a sine or sum of sine waves - and determining its pitch by counting the number of times the sine wave crosses zero over a short range of its input. The process of counting the number of non-zero crossings of a sine wave can be easily modeled dynamic system with unlimited memory. For a centered and digitized sine wave like the one shown below in blue, we simply scan through the input sequence two units at a time looking for places where $x_{p-1} < 0$ and $x_{p} > 0$, or vice versa. Hence a dynamic system can be formed where $h_{p}$ is a running count of the number of zero crossings of the series as
\begin{equation}
h_{p} = h_{p-1} + \mathcal{I}_{0}\left(x_{p},x_{p-1}\right)
\end{equation}
where $\mathcal{I}_{0}$ is a simple indicator function that equals $1$ if the two points $x_{p-1}$ and $x_{p}$ straddle $0$, and is equal to zero otherwise.
Below we show an example with a input series (in blue), and its corresponding running number of zero-crossings in dark orange.
```
## This code cell will not be shown in the HTML version of this notebook
# an example input sequence of ordered data
x = []
for t in range(50):
x_t = np.sin(0.5*t)
x.append(x_t)
# running sum
def zero_cross_counter(x_t,x_t_minus_1):
# determine if zero crossing has occured
cross = 0
if (x_t_minus_1 >=0 and x_t <= 0) or (x_t_minus_1 <=0 and x_t >= 0):
cross = 1
return cross
# create simulated monthly income
h = [x[0]]
for t in range(1,len(x)):
# get current average and point
h_t = h[-1]
x_t_minus_1 = x[t-1]
x_t = x[t]
# make next element
cross = zero_cross_counter(x_t,x_t_minus_1)
h.append(h_t + cross)
plotter = plot_input_with_hidden_separate.Plotter()
animator = history_animators.Animator()
animator.animate_plot(plotter,x,h,num_frames = 100,savepath='videos/animation_3.mp4',fps=10)
## This code cell will not be shown in the HTML version of this notebook
from IPython.display import HTML
HTML("""
<video width="1000" height="400" controls loop>
<source src="videos/animation_3.mp4" type="video/mp4">
</video>
""")
```
#### <span style="color:#a50e3e;">Example 6. </span> Running normalized histogram
If one is willing to discretize you (to create a set of bins) we can design $f$ to accumulate an approximate distribution or *histogram* of values in a input series. In particular, below we animate an example of a running average *normalized histogram* (in orange) of an input series (in blue).
```
## This code cell will not be shown in the HTML version of this notebook
from collections import Counter
import numpy as np
# an example input sequence of ordered data
x = []
for t in range(50):
x_t = np.sin(t) + t*0.2
x.append(x_t)
# histogram history
def update_histogram(h_t,x_t,alpha):
# update h
for key in h_t.keys():
h_t[key]=h_t[key]*(1 - alpha)
# round to 100th decimal
x_t = np.round(x_t,1)
# ceiling / floor
if x_t < 0:
x_t = 0
if x_t > 10:
x_t = 10
# one-hot encode
h_t[x_t] += alpha
return h_t
# initialize hidden (histogram) state
bins = np.unique(np.array([np.round(a,1) for a in np.linspace(0,10,10000)]))
h_t = {a:0 for a in bins}
# update hidden state
import copy
h_all = [copy.deepcopy(h_t)]
n = 1
for x_t in x:
alpha = 0.1
h_t = update_histogram(h_t,x_t,alpha)
h_all.append(copy.deepcopy(h_t))
n+=1
animator = history_animators.Animator()
plotter = plot_hidden_histogram.Plotter()
animator.animate_plot(plotter,x,h_all,num_frames = 100,savepath='videos/animation_4.mp4',fps=10)
## This code cell will not be shown in the HTML version of this notebook
from IPython.display import HTML
HTML("""
<video width="1000" height="400" controls loop>
<source src="videos/animation_4.mp4" type="video/mp4">
</video>
""")
```
<a id='unlimited-meaning'></a>
## What does "unlimited" really mean?
In each of the examples above we saw how the state variable $h_{p}$ *provides a summary of all preceding input $x_1$ through $x_{p}$*. We can see that this is true *for every dynamic system with unlimited memory* by 'rolling back' the general update step. If we do so one time - plugging in the formula $h_{p} = f\left(h_{p-1},x_{p}\right)$ into the formula for $h_{p-1}$ we can see dependence on both $x_{p}$ and $x_{p-1}$
\begin{equation}
h_{p} = f\left(f\left(h_{p-2},x_{p-1}\right),x_{p}\right)
\end{equation}
Continuing in this fashion we can 'roll back' all the way to $h_1$
\begin{equation}
h_{p} = f\left(f\left(f\left(\cdots f\left(h_{1},x_{2}\right),x_3\right)\cdots,x_{p-1}\right),x_{p}\right)
\end{equation}
which exposes the fact that $h_{p}$ is dependent on all prior values $x_2$ to $x_{p}$, and $x_1$ as well if simply set the initial condition $h_1 = x_1$. In general then, when we say that '$h_{p}$ provides a summary of all preceding input $x_1$ through $x_{p}$' we mean exactly the statement above. Another common way of saying this is that such a system has a complete 'memory' of all input preceding it.
How valuable is this "unlimited memory" summarizing ability of the hidden state $h_p$? As mentioned previously, this *completely* depends on the function $f$ chosen (or - in the case of Recurrent Neural Networks - *learned*). When $f$ is a simple sum, an average, etc., what is summarized about an input series is not all that distinctive, and thus the fact that these systems have "unlimited memory" is not very valuable. The more intricate the function $f$ the more interesting (and more useful) the summarizing variable of an unlimited memory dynamic system can be.
<a id='deeper-systems'></a>
## Deeper dynamic systems with unlimited memory
We can create 'deeper' versions of what we have seen above by stacking generic dynamic systems on top of one another in a recursive system. For example, stacking two generic dynamic systems (with unlimited memory) on top of one another gives a system
\begin{equation}
h_p^{\left(1\right)} = f^{\left(1\right)}\left( h_{p-1}^{\left(1\right)}, x^{\,}_p \right) \,\,\,\,\, \\
h_p^{\left(2\right)} = f^{\left(2\right)}\left(h_{p-1}^{\left(2\right)}, h_{p}^{\left(1\right)} \right).
\end{equation}
Here the first update $h_p^{\left(1\right)}$ looks exactly like what we have seen before (where $f^{\left(1\right)}$ is some function). We then feed the result of this update into another system defined by $h_p^{\left(2\right)}$ in a very similar manner. This sort of recipe can be extended as far desired by adding additional layers / recursions that feed off the output preceeding them. Thus in terms of modeling such 'deep systems' offer great flexibility.
The most practical example of such a system is the *running variance*. Suppose we compute the running mean of a list of numbers $x_1,\,x_2,\,...,x_p,...$ when the $p^{th}$ number $x_p$ arrives the *smart way* (as shown above)
\begin{equation}
h_{p}^{\text{ave}} = \frac{p-1}{p}h_{p-1}^{\text{ave}} + \frac{1}{p}x_p
\end{equation}
and then - on top of this - we would like to compute the *running variance* of our ever-increasing list of numbers. If we used the standard formula for variance we would compute
\begin{equation}
h_p^{\text{var}} = \frac{1}{p}\sum_{j=1}^p\left(x_j - h_p^{\text{ave}}\right)^2
\end{equation}
each time a new point $x_p$ arrived. However computing the running variance this way would be wasteful - both in terms of storage (we would need to store all of the previous input points) and computation (we're repeating the same sorts of computation over and over again) - in complete analogy to the use of the standard formula when computing the running mean (as we saw above).
With a health dose of algebraic manipulation, one can express the running variance as the following (much more efficient) recursion involving both the current point $x_p$ and the running average as
\begin{equation}
h_{p}^{\text{var}} = \frac{p-1}{p}h_{p-1}^{\text{var}} + \frac{1}{p}\left(x_p^{\,} - h_{p}^{\text{ave}}\right)\left(x_p^{\,} - h_{p-1}^{\text{ave}}\right).
\end{equation}
Together then computing the running variance requires a two-tiered calculation - where we first update our running average recursively and then pass the result onward as
\begin{equation}
h_{p}^{\text{ave}} = \frac{p-1}{p}h_{p-1}^{\text{ave}} + \frac{1}{p}x_p \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \,\,\,\,\,\, \\
h_{p}^{\text{var}} = \frac{p-1}{p}h_{p-1}^{\text{var}} + \frac{1}{p}\left(x_p^{\,} - h_{p}^{\text{ave}}\right)\left(x_p^{\,} - h_{p-1}^{\text{ave}}\right).
\end{equation}
| github_jupyter |
# Inventory Optimization Using (s, Q) and (R, S) Policies
In this notebook, we provide a framework for evaluating inventory management policies such as (s, Q) and (R, S) using a single-echelon supply chain simulator.
```
import math
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-white')
import pandas as pd
from matplotlib import rc, cm
plt.rcParams.update({'pdf.fonttype': 'truetype'})
```
# Simulator
We start with implementing a simulator of a traditional supply chain environment. We use it to analyze various scenarious in the next sections.
```
#
# Random generators
#
class Generator:
def next(self):
pass
class ConstantGenerator(Generator):
def __init__(self, mu):
self.mu = mu
def next(self):
return self.mu
class NormalGenerator(Generator):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def next(self):
return np.random.normal(self.mu, self.sigma, 1)[0]
class FoldedNormalGenerator(Generator):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def next(self):
return abs(np.random.normal(self.mu, self.sigma, 1)[0])
#
# Inventory policies
#
class InventoryPolicy:
def get_order_quantity(self, supply_chain):
pass
class ConstantInventoryPolicy(InventoryPolicy):
def __init__(self, q):
self.q = q
def get_order_quantity(self, supply_chain):
return q
class sQInventoryPolicy(InventoryPolicy):
def __init__(self, s, Q):
self.s = s
self.Q = Q
def __str__(self):
return f'(s={self.s}, Q={self.Q}'
def get_order_quantity(self, supply_chain):
net_inventory = supply_chain.get_stock() + supply_chain.get_intransit()
if net_inventory <= self.s:
return self.Q
else:
return 0
class RSInventoryPolicy(InventoryPolicy):
def __init__(self, R, S):
self.S = S
self.R = R
def __str__(self):
return f'(R={self.R}, S={self.S}'
def get_order_quantity(self, supply_chain):
net_inventory = supply_chain.get_stock() + supply_chain.get_intransit()
if supply_chain.t % self.R == 0 and net_inventory <= self.S:
return self.S - net_inventory
else:
return 0
#
# Supply chain simulator
#
class SupplyChain:
def __init__(self, config):
self.orders = []
self.stock = []
self.demand = []
self.lead_times = []
self.t = 0
self.fixed_tx_cost = config['fixed_transaction_cost']
self.variable_tx_cost = config['variable_transaction_cost']
self.holding_cost = config['holding_cost']
self.demand_generator = config['demand_generator']
self.lead_time_generator = config['lead_time_generator']
self.policy = config['policy']
# auxiliary variables for effeciency calculations
self._filled_demand = []
self._fixed_tx_costs = []
self._variable_tx_costs = []
self._holding_costs = []
def get_stock(self):
if not self.stock:
return 0
else:
return self.stock[-1]
def get_intransit(self):
in_transit = 0
for i in range(self.t):
if self.t <= i + self.lead_times[i]:
in_transit += self.orders[i]
return in_transit
def process_demand(self):
demand = self.demand_generator.next()
self.demand.append(demand)
filled_demand = min(demand, self.stock[self.t])
self.stock[self.t] -= filled_demand
self._filled_demand.append(filled_demand)
def place_order(self):
order = self.policy.get_order_quantity(self)
self.orders.append(order)
lead_time = int(self.lead_time_generator.next())
self.lead_times.append(lead_time)
def procees_orders(self):
new_stock = 0
for i in range(self.t + 1):
if self.t == i + self.lead_times[i]:
new_stock += self.orders[i]
self.stock.append(self.get_stock() + new_stock)
def update_costs(self):
if self.orders[self.t] != 0:
self._fixed_tx_costs.append(self.fixed_tx_cost)
self._variable_tx_costs.append(self.orders[self.t] * self.variable_tx_cost)
else:
self._fixed_tx_costs.append(0)
self._variable_tx_costs.append(0)
self._holding_costs.append(self.stock[self.t] * self.holding_cost)
def act(self):
self.place_order()
self.procees_orders()
self.process_demand()
self.update_costs()
self.t += 1
def plot_supply_chain(supply_chain):
fig, ax = plt.subplots(5, 1, figsize=(12, 12))
x = np.arange(supply_chain.t)
ax[0].plot(x, supply_chain.orders, label='Orders')
ax[1].plot(x, supply_chain.lead_times, label='Lead time')
ax[2].plot(x, supply_chain.stock, label='Stock')
if isinstance(supply_chain.policy, sQInventoryPolicy):
ax[2].axhline(supply_chain.policy.s, color='red', label='Safety stock')
if isinstance(supply_chain.policy, RSInventoryPolicy):
ax[2].axhline(supply_chain.policy.S, color='red', label='Up-to level')
ax[3].plot(x, supply_chain.demand, label='Demand')
ax[3].fill_between(x, np.zeros(len(x)), supply_chain._filled_demand, facecolor="#1DACD6", alpha=.5, label='Filled demand')
ax[3].fill_between(x, supply_chain._filled_demand, supply_chain.demand, facecolor="#CC6666", alpha=.7, label='Lost demand')
fc = np.cumsum(supply_chain._fixed_tx_costs)
vc = np.cumsum(supply_chain._variable_tx_costs)
hc = np.cumsum(supply_chain._holding_costs)
ax[4].fill_between(x, np.zeros(len(x)), fc, alpha=0.5, label='Fixed costs')
ax[4].fill_between(x, fc, fc + vc, alpha=0.5, label='Variable costs')
ax[4].fill_between(x, fc + vc, fc + vc + hc, alpha=0.5, label='Holding costs')
for axi in ax:
axi.grid(True)
axi.legend()
##########################################################################
config = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.1,
'demand_generator': HalfNormalGenerator(5, 2),
'lead_time_generator': HalfNormalGenerator(0, 1),
'policy': sQInventoryPolicy(5, 50)
}
simulator = SupplyChain(config)
for t in range(128):
simulator.act()
plot_supply_chain(simulator)
#
# Helper functions for performance metrics calculation based on the simulation traces
#
def get_holding_costs(supply_chain):
return np.sum(supply_chain._holding_costs)
def get_fixed_transaction_costs(supply_chain):
return np.sum(supply_chain._fixed_tx_costs)
def get_fill_rate(supply_chain):
return 1 - np.sum(np.array(supply_chain.demand) - np.array(supply_chain._filled_demand)) / np.sum(supply_chain.demand)
def get_cycle_service_level(supply_chain):
n_cycles, n_stock_outs = 0, 0
is_stock_out = False
lost_demand = np.array(supply_chain.demand) - np.array(supply_chain._filled_demand)
for t in range(supply_chain.t):
if supply_chain.orders[t] != 0 or t == supply_chain.t - 1:
n_cycles += 1
if is_stock_out:
n_stock_outs += 1
is_stock_out = False
if lost_demand[t] != 0:
is_stock_out = True
return n_cycles, n_stock_outs, 1 - n_stock_outs/n_cycles
```
# Scenario 1: Find Cost-Optimal Order Quantity for (s,Q) policy
We first analyze how order quantity Q in (s,Q) policy impacts the supply chain costs, and how the optimal value can be determined.
```
#
# This function performs multiple simulations
# for a 1D array of configurations
#
def evaluate_scenarios(n_simulations, n_sim_steps, parameter_grid, config_grid):
trace_dims = (len(config_grid), n_simulations)
holding_costs = np.zeros(trace_dims)
fixed_costs = np.zeros(trace_dims)
total_costs = np.zeros(trace_dims)
fill_rates = np.zeros(trace_dims)
service_levels = np.zeros(trace_dims)
for i, config in enumerate(config_grid):
for j in range(n_simulations):
simulator = SupplyChain(config)
for t in range(n_sim_steps):
simulator.act()
holding_costs[i][j] = get_holding_costs(simulator)
fixed_costs[i][j] = get_fixed_transaction_costs(simulator)
total_costs[i][j] = holding_costs[i][j] + fixed_costs[i][j]
fill_rates[i][j] = get_fill_rate(simulator)
service_levels[i][j] = get_cycle_service_level(simulator)[-1]
holding_cost_summary = np.vstack([np.mean(holding_costs, axis=1), np.std(holding_costs, axis=1)])
fixed_cost_summary = np.vstack([np.mean(fixed_costs, axis=1), np.std(fixed_costs, axis=1)])
total_cost_summary = np.vstack([np.mean(total_costs, axis=1), np.std(total_costs, axis=1)])
fill_rate_summary = np.vstack([np.mean(fill_rates, axis=1), np.std(fill_rates, axis=1)])
service_levels_summary = np.vstack([np.mean(service_levels, axis=1), np.std(service_levels, axis=1)])
return {'holding_costs': holding_cost_summary,
'fixed_costs': fixed_cost_summary,
'total_costs': total_cost_summary,
'fill_rates': fill_rate_summary,
'service_levels': service_levels_summary}
#
# This funtion visulizes the dependency between the simulation parameters
# and supply chain costs/SLAs
#
def plot_costs(parameter_grid, evaluation_results, mode):
x = parameter_grid
if mode == 'costs':
metrics = [(evaluation_results['holding_costs'], 'Holding costs', 0), (evaluation_results['fixed_costs'], 'Fixed costs', 0),
(evaluation_results['total_costs'], 'Total costs', 0), (evaluation_results['fill_rates'], 'Fill rate', 1)]
fig, ax = plt.subplots(2, 1, figsize=(12, 7), gridspec_kw={'height_ratios': [4, 1]})
for metric in metrics:
plot_id = metric[2]
ax[plot_id].plot(x, metric[0][0, :], label = metric[1])
ax[plot_id].fill_between(x, metric[0][0, :] - metric[0][1, :], metric[0][0, :] + metric[0][1, :], alpha = 0.3)
ax[plot_id].grid(True)
ax[plot_id].legend()
if mode == 'service_levels':
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
metric = evaluation_results['service_levels']
ax.plot(x, metric[0, :], label = 'Service level')
ax.fill_between(x, metric[0, :] - metric[1, :], metric[0, :] + metric[1, :], alpha = 0.3)
ax.grid(True)
ax.legend()
#####################################################################################################
n_simulations = 64
parameter_grid = np.arange(10, 100, 5)
config_grid = [
{
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.1,
'demand_generator': FoldedNormalGenerator(5, 0.5),
'lead_time_generator': FoldedNormalGenerator(0, 0.5),
'policy': sQInventoryPolicy(10, order_quantity)
}
for order_quantity in parameter_grid ]
evaluation_results = evaluate_scenarios(n_simulations, 128, parameter_grid, config_grid)
plot_costs(parameter_grid, evaluation_results, mode='costs')
#
# This function performs multiple simulations for a 2D array of configurations
#
def evaluate_scenarios_2d(n_simulations, n_sim_steps, parameter_grid1, parameter_grid2, config_grid):
trace_dims = (config_grid.shape[0], config_grid.shape[1], n_simulations)
total_costs = np.zeros(trace_dims)
fill_rates = np.zeros(trace_dims)
service_levels = np.zeros(trace_dims)
for i, config_row in enumerate(config_grid):
for j, config in enumerate(config_row):
for k in range(n_simulations):
simulator = SupplyChain(config)
for t in range(128):
simulator.act()
holding_costs = get_holding_costs(simulator)
fixed_costs = get_fixed_transaction_costs(simulator)
fill_rates[i][j][k] = get_fill_rate(simulator)
total_costs[i][j][k] = holding_costs + fixed_costs
service_levels[i][j][k] = get_cycle_service_level(simulator)[-1]
total_cost_summary = np.mean(total_costs, axis=2)
fill_rate_summary = np.mean(fill_rates, axis=2)
service_level_summary = np.mean(service_levels, axis=2)
return {'total_costs': total_cost_summary,
'fill_rates': fill_rate_summary,
'service_levels': service_level_summary}
#
# This function visulizes the dependency between
# the simulation parameters and supply chain costs/SLAs
#
def plot_performance_2d(evaluation_results, parameter_grid1, parameter_grid2, metrics, levels=None):
fig, ax = plt.subplots(1, len(metrics), figsize=(16, 6))
x, y = np.meshgrid(parameter_grid1, parameter_grid2)
metrics_spec = []
if 'fill_rates' in metrics:
metrics_spec.append((evaluation_results['fill_rates'], 'Fill rate'))
if 'total_costs' in metrics:
metrics_spec.append((evaluation_results['total_costs'], 'Total costs'))
if 'service_levels' in metrics:
metrics_spec.append((evaluation_results['service_levels'], 'Service level'))
i = 0
for metric, metric_name in metrics_spec:
cs = ax[i].contourf(x, y, metric.T, cmap='viridis', levels=levels)
fig.colorbar(cs, ax=ax[i])
ax[i].set_title(metric_name)
i += 1
plt.savefig('sQ.pdf')
#########################################################################################
n_simulations = 256
parameter_grid1 = np.arange(10, 100, 5)
parameter_grid2 = np.arange(1, 10, 2)
config_grid = np.empty((len(parameter_grid1), len(parameter_grid2)), dtype=object)
for i, order_quantity in enumerate(parameter_grid1):
for j, safety_stock in enumerate(parameter_grid2):
config_grid[i][j] = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.1,
'demand_generator': FoldedNormalGenerator(5, 0.5),
'lead_time_generator': FoldedNormalGenerator(0, 0.5),
'policy': sQInventoryPolicy(safety_stock, order_quantity)
}
evaluation_results = evaluate_scenarios_2d(n_simulations, 512, parameter_grid1, parameter_grid2, config_grid)
plot_performance_2d(evaluation_results, parameter_grid1, parameter_grid2, ['fill_rates', 'total_costs'])
```
# Scenario 2: Find Cost-Optimal Review Period R in (R,S) policy
The second scenario we consider is the optimization of the review period of the (R,S) policy. We perform the analysis in three steps:
* First, we do a test simulation to make sure that the set of parameters we have chosen is reasonable.
* Second, we analyze the dependency between the review period and costs.
* Third, we analyze the three-way dependency between review period, up-to level, and costs.
```
config = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(5, 1),
'lead_time_generator': FoldedNormalGenerator(2, 1),
'policy': RSInventoryPolicy(4, 26)
}
simulator = SupplyChain(config)
for t in range(128):
simulator.act()
plot_supply_chain(simulator)
print( f'Fill rate = {get_fill_rate(simulator)}' )
n_simulations = 64
parameter_grid = np.arange(1, 10, 1)
config_grid = [
{
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(5, 0.0),
'lead_time_generator': FoldedNormalGenerator(0, 0.0),
'policy': RSInventoryPolicy(review_period, 10 + 4 * review_period)
}
for review_period in parameter_grid ]
evaluation_results = evaluate_scenarios(n_simulations, 128, parameter_grid, config_grid)
plot_costs(parameter_grid, evaluation_results, mode = 'costs')
n_simulations = 64
parameter_grid1 = np.arange(1, 10, 1)
parameter_grid2 = np.arange(5, 30, 2)
config_grid = np.empty((len(parameter_grid1), len(parameter_grid2)), dtype=object)
for i, review_period in enumerate(parameter_grid1):
for j, up_to_level in enumerate(parameter_grid2):
config_grid[i][j] = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(5, 0),
'lead_time_generator': FoldedNormalGenerator(0, 0),
'policy': RSInventoryPolicy(review_period, up_to_level)
}
evaluation_results = evaluate_scenarios_2d(n_simulations, 128, parameter_grid1, parameter_grid2, config_grid)
plot_performance_2d(evaluation_results, parameter_grid1, parameter_grid2, ['fill_rates', 'total_costs'])
```
# Scenario 3: Analyze the Dependency Between the Demand Variance, Safety Stock, and Service Level
We next analyze the dependecy between the demand variance, safety stock, and service level in the (s,Q) policy:
* First, we do a test simulation to make sure that we have schosen a reasonable set of parameters.
* Second, we analyze the dependency between the safety stock level and cycle service level.
* Third, we analyze the three-way dependency between the demand variance, safety stock, and service level.
```
config = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(10, 3.0),
'lead_time_generator': FoldedNormalGenerator(0, 0.0),
'policy': sQInventoryPolicy(20, 20)
}
simulator = SupplyChain(config)
for t in range(256):
simulator.act()
plot_supply_chain(simulator)
print( f'Fill rate = {get_fill_rate(simulator)}' )
print( f'Cycle servce level = {get_cycle_service_level(simulator)}' )
n_simulations = 128
parameter_grid = np.arange(1, 20, 2)
config_grid = [
{
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(10, 3.0),
'lead_time_generator': FoldedNormalGenerator(0, 0.0),
'policy': sQInventoryPolicy(safety_stock, 20)
}
for safety_stock in parameter_grid ]
evaluation_results = evaluate_scenarios(n_simulations, 256, parameter_grid, config_grid)
plot_costs(parameter_grid, evaluation_results, mode = 'service_levels')
n_simulations = 128
parameter_grid1 = np.arange(1, 16, 1) # safety stock
parameter_grid2 = np.arange(0, 5, 0.5) # demand variance
config_grid = np.empty((len(parameter_grid1), len(parameter_grid2)), dtype=object)
for i, safety_stock in enumerate(parameter_grid1):
for j, demand_variance in enumerate(parameter_grid2):
config_grid[i][j] = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(10, demand_variance),
'lead_time_generator': FoldedNormalGenerator(0, 0),
'policy': sQInventoryPolicy(safety_stock, 20)
}
evaluation_results = evaluate_scenarios_2d(n_simulations, 512, parameter_grid1, parameter_grid2, config_grid)
plot_performance_2d(evaluation_results, parameter_grid1, parameter_grid2, ['service_levels', 'fill_rates'], levels=16)
```
# Scenario 4: Analyze the Dependency Between the Lead Time Variance, Safety Stock, and Service Level
```
config = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(5, 3.0),
'lead_time_generator': FoldedNormalGenerator(0, 4.0),
'policy': sQInventoryPolicy(10, 20)
}
simulator = SupplyChain(config)
for t in range(256):
simulator.act()
plot_supply_chain(simulator)
print( f'Fill rate = {get_fill_rate(simulator)}' )
print( f'Cycle servce level = {get_cycle_service_level(simulator)}' )
n_simulations = 256
parameter_grid1 = np.arange(0, 4, 0.5) # demand variance
parameter_grid2 = np.arange(0, 4, 0.5) # lead time variance
config_grid = np.empty((len(parameter_grid1), len(parameter_grid2)), dtype=object)
for i, demand_variance in enumerate(parameter_grid1):
for j, lead_time_variance in enumerate(parameter_grid2):
config_grid[i][j] = {
'fixed_transaction_cost': 10,
'variable_transaction_cost': 1,
'holding_cost': 0.5,
'demand_generator': FoldedNormalGenerator(5, demand_variance),
'lead_time_generator': FoldedNormalGenerator(0, lead_time_variance),
'policy': sQInventoryPolicy(10, 20)
}
evaluation_results = evaluate_scenarios_2d(n_simulations, 1024, parameter_grid1, parameter_grid2, config_grid)
plot_performance_2d(evaluation_results, parameter_grid1, parameter_grid2, ['service_levels', 'fill_rates'], levels=8)
```
| github_jupyter |
# STEP4 : Creating Facts & Dimensions
Start by connecting to the database by running the cells below. If you are coming back to this exercise, then uncomment and run the first cell to recreate the database. If you recently completed steps 1 and 2, then skip to the second cell.
```
#!PGPASSWORD=student createdb -h 127.0.0.1 -U student pagila
#!PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila -f Data/pagila-schema.sql
#!PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila -f Data/pagila-data.sql
%load_ext sql
DB_ENDPOINT = "127.0.0.1"
DB = 'pagila'
DB_USER = 'student'
DB_PASSWORD = 'student'
DB_PORT = '5433'
# postgresql://username:password@host:port/database
conn_string = "postgresql://{}:{}@{}:{}/{}" \
.format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)
print(conn_string)
%sql $conn_string
```
### Star Schema - Entity Relationship Diagram
<img src="pagila-star.png" width="50%"/>
#### Create the first dimension table
```
%%sql
CREATE TABLE dimDate
(
date_key SERIAL PRIMARY KEY,
date date NOT NULL,
year smallint NOT NULL,
quarter smallint NOT NULL,
month smallint NOT NULL,
day smallint NOT NULL,
week smallint NOT NULL,
is_weekend boolean NOT NULL
);
```
To check your work, run the following query to see a table with the field names and data types. The output should match the table below.
```
%%sql
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_name = 'dimdate'
```
<div class="p-Widget jp-RenderedHTMLCommon jp-RenderedHTML jp-mod-trusted jp-OutputArea-output jp-OutputArea-executeResult" data-mime-type="text/html"><table>
<tbody><tr>
<th>column_name</th>
<th>data_type</th>
</tr>
<tr>
<td>date_key</td>
<td>integer</td>
</tr>
<tr>
<td>date</td>
<td>date</td>
</tr>
<tr>
<td>year</td>
<td>smallint</td>
</tr>
<tr>
<td>quarter</td>
<td>smallint</td>
</tr>
<tr>
<td>month</td>
<td>smallint</td>
</tr>
<tr>
<td>day</td>
<td>smallint</td>
</tr>
<tr>
<td>week</td>
<td>smallint</td>
</tr>
<tr>
<td>is_weekend</td>
<td>boolean</td>
</tr>
</tbody></table></div>
Run the cell below to create the rest of the dimension tables.
```
%%sql
CREATE TABLE dimCustomer
(
customer_key SERIAL PRIMARY KEY,
customer_id smallint NOT NULL,
first_name varchar(45) NOT NULL,
last_name varchar(45) NOT NULL,
email varchar(50),
address varchar(50) NOT NULL,
address2 varchar(50),
district varchar(20) NOT NULL,
city varchar(50) NOT NULL,
country varchar(50) NOT NULL,
postal_code varchar(10),
phone varchar(20) NOT NULL,
active smallint NOT NULL,
create_date timestamp NOT NULL,
start_date date NOT NULL,
end_date date NOT NULL
);
CREATE TABLE dimMovie
(
movie_key SERIAL PRIMARY KEY,
film_id smallint NOT NULL,
title varchar(255) NOT NULL,
description text,
release_year year,
language varchar(20) NOT NULL,
original_language varchar(20),
rental_duration smallint NOT NULL,
length smallint NOT NULL,
rating varchar(5) NOT NULL,
special_features varchar(60) NOT NULL
);
CREATE TABLE dimStore
(
store_key SERIAL PRIMARY KEY,
store_id smallint NOT NULL,
address varchar(50) NOT NULL,
address2 varchar(50),
district varchar(20) NOT NULL,
city varchar(50) NOT NULL,
country varchar(50) NOT NULL,
postal_code varchar(10),
manager_first_name varchar(45) NOT NULL,
manager_last_name varchar(45) NOT NULL,
start_date date NOT NULL,
end_date date NOT NULL
);
```
#### Create the fact table
**Note on REFERENCES constraints:**<br>
The demo video does not cover the REFERENCES constraint. When building a fact table, you use the REFERENCES constrain to identify which table and column a foreign key is connected to. This ensures that the fact table does not refer to items that do not appear in the respective dimension tables. You can read more [here](existhttps://www.postgresql.org/docs/9.2/ddl-constraints.html). Here's an example of the syntax on a different schema:
```
CREATE TABLE orders (
order_id integer PRIMARY KEY,
product_no integer REFERENCES products (product_no),
quantity integer
);
```
```
%%sql
CREATE TABLE factSales
(
sales_key SERIAL PRIMARY KEY,
date_key integer REFERENCES dimDate(date_key),
customer_key integer REFERENCES dimCustomer(customer_key),
movie_key integer REFERENCES dimMovie(movie_key),
store_key integer REFERENCES dimStore(store_key),
sales_amount numeric NOT NULL
);
```
To check your work, run the following query to see a table with the field names and data types. The output should match the table below.
```
%%sql
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_name = 'factsales'
```
<div class="p-Widget jp-RenderedHTMLCommon jp-RenderedHTML jp-mod-trusted jp-OutputArea-output jp-OutputArea-executeResult" data-mime-type="text/html"><table>
<tbody><tr>
<th>column_name</th>
<th>data_type</th>
</tr>
<tr>
<td>sales_key</td>
<td>integer</td>
</tr>
<tr>
<td>date_key</td>
<td>integer</td>
</tr>
<tr>
<td>customer_key</td>
<td>integer</td>
</tr>
<tr>
<td>movie_key</td>
<td>integer</td>
</tr>
<tr>
<td>store_key</td>
<td>integer</td>
</tr>
<tr>
<td>sales_amount</td>
<td>numeric</td>
</tr>
</tbody></table></div>
If you need to delete the table and start over, use the DROP TABLE command: `DROP TABLE <table_name>`
| github_jupyter |
Back to **[Fan](https://fanwangecon.github.io/)**'s R4Econ Homepage **[Table of Content](https://fanwangecon.github.io/R4Econ/)**
# Panel Data Subsidy Optimization over Two Groups
There is some production function or other functional relationship between x and y. When x shifts, y shifts. We are interested in some function over the vector of y. We shift x to maximize some objective over the vector of y.
$$
\max_{\left\{S_i\right\}_{i \in \left\{1,2\right\}}}
O \left(
\left\{
Y_{it}(X_{it}, S_i)
\right\}^{i \in \left\{1,2\right\}}
_{t \in \left\{1,...,T\right\}}
\right)
$$
Specifically: there is a finite amount of subsidies, there are two individuals observed between month 0 and 24. What are the optimal nutritional subsidies to provide to the three individuals given some function that relates nutritional input to outcome (height), ignoring behavior responses.
This exercise assumes that individual specific coefficients are known ex-ante.
## Program
```
# Planer's objective function
# Constant Elasticity of Substitution over All Individuals
f_planer_obj <- function(vec.y = c(80, 85, 90), param.ces = 0.5) {
if (param.ces == 0) {
obj <- prod(vec.y^(1/length(vec.y)))
} else {
obj <- (mean(vec.y^param.ces))^(1/param.ces)
}
return(obj)
}
```
## Use Program
### Load Data
```
# Library
library(tidyverse)
# Load Sample Data
setwd('C:/Users/fan/R4Econ/_data/')
df <- read_csv('height_weight.csv')
```
### Data Selection
We only need information from several individuals
```
# Select 2 individuals, information from second year, four variables
df.i2t6xy <- df %>% filter(svymthRound <= 24 & svymthRound >= 14 &
S.country == 'Cebu' & (indi.id == 4 | indi.id == 13)) %>%
select(i = indi.id, t = svymthRound, y = hgt, x =prot)
t(df.i2t6xy)
```
### Subsidy Group Index
Generate linear index for subsidy groups.
```
var.grp.idx <- 'subsidy.grp'
vars.subsidy.group <- c('i')
df.i2t6xy <- df.i2t6xy %>% mutate(!!var.grp.idx := group_indices(., !!!syms(vars.subsidy.group)))
t(df.i2t6xy)
sca.subsidy.groups <- length(unique(df.i2t6xy[[var.grp.idx]]))
sca.subsidy.groups
```
## Input/Output Relationship and Prediction with Changes in X
Estimate a production function relationship between the input and output
Prediction can be re-done changing x (which is how we will change subsidy)
```
# Regression and Results
res.linfe <- lm(log(y) ~ log(x) + factor(i) , data=df.i2t6xy)
summary(res.linfe)
# Regression Predition, with Changes in X
y.predict <- exp(predict(res.linfe))
y.predict.subsidy <- exp(predict(res.linfe, (df.i2t6xy %>% mutate(x = x + 1))))
cbind(y.predict, y.predict.subsidy - y.predict, df.i2t6xy)
```
## Dataframe with predicitons from different subsidies
Given fixed total subsidy available, distribute over the two subsidy groups. (2 individuals)
A vector of policy determined by proportion of subsidies to the two individauls.
Results below show subsidy effects on height at individual months.
Then we try a vector of subsidies from 0 percent to 100 percent of total subsidy for the 1st of 2 individuals, and look at the resulting height differences between the two individuals.
It is clear that differential subsidies lead to more or less inequality.
Somewhat tricky to write a generic function, but it should have two sub functions
- *Function 1*
+ input:
* vector N: subsidy index group variable
* vector N: subsidy level for each index
+ output:
* vector: vector with subsidy
- *Function 2*
+ This function is specific to each production function
+ Takes in parameters required for that particular production function
+ Also takes in parameters required for Function 1
+ Function 1 invoked insided Function 2
+ Inside Maximizer, Function 2 will executed with do.call
- *Function 3*
+ Wrapper function around Function 2
+ This is the function that gets fed into the optimizer
```
# Function 1
f_subsidy_vec <- function(df, var.grp.idx, subsidy.total, vec.subsidy.frac) {
# var.grp.idx <- 'subsidy.grp'
# subsidy_total <- 2
# vec_frac <- c(0.1, 0.9)
# df <- df.i2t6xy
return(df %>% mutate(subsidy_grp = paste0(vec.subsidy.frac, collapse=','),
subsidy = subsidy.total*vec.subsidy.frac[df[[var.grp.idx]]]))
}
# Function 2
f_subsidy_y_cd <- function(df, res.linfe,
var.grp.idx, subsidy.total, vec.subsidy.frac) {
# Invoke Function 1
df.wth.subsidy <- f_subsidy_vec(df, var.grp.idx, subsidy.total, vec.subsidy.frac)
# Y with subsidy for linear regresion model
df.wth.subsidy %>% mutate(y_subsidy =
exp(predict(res.linfe,
(df.wth.subsidy %>%
mutate(x = x + subsidy)))))
}
# Test Function Invoke with do.call3
f_subsidy_y_params <- list(df.i2t6xy, var.grp.idx, res.linfe,
subsidy.total = 2, vec.subsidy.frac = c(0.1, 0.9))
do.call("f_subsidy_y", f_subsidy_y_params)
```
### Greatest Equality and Greatest Total Height Month 24
if we care only about month 24 height
- Which subsidy achieves the greatest level of total height for the two individuals?
+ 0.496 and 0.504 (See below)
- Which subsidy achieves the greatest level of equality (leontiff)?
+ 0.674 and 0.326 (See below)
```
# Function loop over subsidies
vec.sca.subsidy.frac <- seq(0, 1, 0.001)
subsidy.total <- 100
df.all.subsidy <- bind_rows(lapply(vec.sca.subsidy.frac, f_subsidy_y_n2_wrapper,
df=df.i2t6xy, var.grp.idx=var.grp.idx, res.linfe=res.linfe,
subsidy.total=subsidy.total))
# Summarize Results
df.t24.hgt <- df.all.subsidy %>% filter(t == 24) %>%
group_by(subsidy_grp, subsidy.grp, i) %>% summarize(y_subsidy_mean = mean(y_subsidy)) %>%
spread(i, y_subsidy_mean, sep='_') %>%
mutate_at(vars(contains("i_")), as.numeric) %>%
mutate(total.hgt = i_4 + i_13, min.hgt = min(i_4, i_13), mth = 't24')
# Which Subsidy Achieves Greaterest Equality
head(df.t24.hgt %>% arrange(desc(total.hgt)), 3)
head(df.t24.hgt %>% arrange(desc(min.hgt)), 3)
# Graphical Results
options(repr.plot.width = 6, repr.plot.height = 3.5)
title_line2 <- 'Planer Equality: Only minimum hgt of the two matter'
title_line3 <- 'Planer Efficiency: Only average/sum Height of the two matter'
title_x <- 'Fraction of Subsidy to the First Person'
title_y <- 'Height in cm'
caption <- 'Two individuals, Height at Month 24'
df.t24.hgt %>% gather(variable, value, -mth, -subsidy.grp, -subsidy_grp, -matches('i_')) %>%
ggplot(aes(x=subsidy.grp, y=value)) +
geom_line() + facet_wrap( ~ variable, scale='free') +
labs(title = paste0('Equality Or Maximum Height Optimal Policy\n', title_line2,
'\n', title_line3 ),
x = title_x, y = title_y, caption = caption) +
theme_bw()
```
### Greatest Equality and Greatest Total Height Mth 14 vs Mth 24
if we care only about month 14 height
- Which subsidy achieves the greatest level of total height for the two individuals?
+ 0.526 and 0.474 (See below)
- Which subsidy achieves the greatest level of equality (leontiff)?
+ 0.687 and 0.313 (See below)
```
# Summarize Results
df.t2t12.hgt <- df.all.subsidy %>% filter(t == 14) %>%
group_by(subsidy_grp, subsidy.grp, i) %>% summarize(y_subsidy_mean = mean(y_subsidy)) %>%
spread(i, y_subsidy_mean, sep='_') %>%
mutate_at(vars(contains("i_")), as.numeric) %>%
mutate(total.hgt = i_4 + i_13, min.hgt = min(i_4, i_13), mth = 't14')
# Which Subsidy Achieves Greaterest Equality
head(df.t2t12.hgt %>% arrange(desc(total.hgt)), 3)
head(df.t2t12.hgt %>% arrange(desc(min.hgt)), 3)
# Graphical Results
options(repr.plot.width = 6, repr.plot.height = 3.5)
bind_rows(df.t24.hgt, df.t2t12.hgt) %>%
gather(variable, value, -mth, -subsidy.grp, -subsidy_grp, -matches('i_')) %>%
ggplot(aes(x=subsidy.grp, y=value, colour=mth)) +
geom_line() + facet_wrap( ~ variable, scale='free') +
labs(title = paste0('Equality Or Maximum Height Optimal Policy\n', title_line2,
'\n', title_line3 ),
x = title_x, y = title_y, caption = caption) +
theme_bw()
```
### Constant Elasticity of Substitution
Constant Elasticity Planer objective Function.
Take in as inputs
| github_jupyter |
# Data cleaning of zomato dataset
## Alert: Because this is cleaning part, you'll see lots of words like 'Removing','Replacing' and 'Splitting'.
```
import pandas as pd
df = pd.read_csv('zomato_vadodara.csv')#csv-file we achived by using beautiful soup
df.head(3)
df.dtypes
list_=['Rating_counts','Delivery_rating_counts']
```
### Removing '()' from Rating counts and Delivery Rating Counts
```
for x in list_:
df[x]=df[x].str.strip('()')
```
### Removing (Reviews) text which is right after some values in Delivery Rating Counts
```
df['Delivery_rating_counts']= df['Delivery_rating_counts'].str.split(' ',expand=True)[0]
```
### Removing unnecessary comma in rating and delivery rating counts like 1,000~1000
```
for x in list_:
df[x]=df[x].str.replace(',','')
df.head(3)
```
### We need to change 'K' into column of 1000s and them multiply that column with counts column
```
df['K']=df['Delivery_rating_counts'].str.extract(r'[\d\.]+([K]+)').fillna(1).replace('K',1000)
df['Delivery_rating_counts']=df['Delivery_rating_counts'].replace(r'[K]+$','',regex=True).astype(float)
df['Delivery_rating_counts']= df['Delivery_rating_counts']*df['K']
df.head(2) #Done
```
### Removing unncessary spaces
```
df['Rating_counts']= df['Rating_counts'].str.split(' ',expand=True)[0]
df['Price_for_2']= df['Price_for_2'].str.replace(',','')
df['Rating_counts']=df['Rating_counts'].astype(float)
```
### Replacing '₹' with empty space
```
df['Price_for_2']=df['Price_for_2'].replace('₹','',regex=True).astype(int)
df['Area'].unique()
```
### We have some multiple values for same areas, we needs to fix this
```
df['area'] = df['Area'].str.split(',',expand=True)[1].str.replace(' ','')
area = df['area'].unique()
for i in area:
index = df[df.loc[:,'area']==i].index
df.loc[index,'Area']=i
df['Area'].value_counts()
df[df['Area']=='Sayajiganj'].index
df.loc[df[df['Area']=='Sayajiganj'].index,'Area']='Sayajigunj'
df.drop('K',axis=1,inplace=True)
```
## Now the Hardest part, changing time range into date-time pandas series
### To be honest this took me half a day to figured out. Because we don't have year and date column and also we have range of time (not specific value of time). But eventually hard work paid off
### Splitting Open time column into opening hours and closing hours
```
df[['time','week']] = df['Open_time'].str.split('(',1,expand=True)
df[['1st_time','2nd_time','3rd_time']] = df['time'].str.split(',',expand=True)
df[['1st_time_open','1st_time_close']]=df['1st_time'].str.split('–',expand=True)
df[['2nd_time_open','2nd_time_close']]=df['2nd_time'].str.split('–',expand=True)
```
### Removing empty spaces
```
df['1st_time_open'] = df['1st_time_open'].str.replace(' ','')
df['2nd_time_open'] = df['2nd_time_open'].str.replace(' ','')
df['1st_time_close'] = df['1st_time_close'].str.replace(' ','')
df['2nd_time_close'] = df['2nd_time_close'].str.replace(' ','')
```
### Computer's brain can't understand words, Therefore we need to fix words like 'noon' and 'midnight'.
```
df.loc[df[df.loc[:,'1st_time_open']=='12noon'].index,'1st_time_open']='12pm'
df.loc[df[df.loc[:,'1st_time_open']=='24Hours'].index,'1st_time_open']='12am'
df.loc[df[df.loc[:,'1st_time_open']=='12midnight'].index,'1st_time_open']='12am'
df.loc[df[df.loc[:,'1st_time_close']=='12midnight'].index,'1st_time_close']='12am'
df.loc[47,'1st_time_open']='12pm'
df.loc[51,'1st_time_open']='11pm'
df.loc[161,'1st_time_open']='6pm'
df.loc[322,'1st_time_open']='1pm'
df.loc[404,'1st_time_open']='9am'
df.loc[47,'2nd_time_open']='7pm'
df.loc[47,'2nd_time_close']='10:30pm'
df.loc[47,'1st_time_close']='3pm'
df.loc[51,'1st_time_close']='12pm'
df.loc[161,'1st_time_close']='11pm'
df.loc[322,'1st_time_close']='12am'
df.loc[404,'1st_time_close']='9am'
df.loc[df[df.loc[:,'2nd_time_open']=='12midnight'].index,'2nd_time_open']='12am'
df.loc[df[df.loc[:,'2nd_time_open']=='12noon'].index,'2nd_time_open']='12pm'
df.loc[df[df.loc[:,'2nd_time_close']=='12midnight'].index,'2nd_time_close']='12am'
df.loc[df[df.loc[:,'2nd_time_close']=='12midnight...'].index,'2nd_time_close']='12am'
```
### Another Problem, we need to change the format 11am to 11:00am
```
df['1st_time_open_ampm'] = df['1st_time_open'].str.extract(r'[\d\.]+([am|pm]+)')
df['1st_time_open']=df['1st_time_open'].replace(r'[am|pm]+$','',regex=True)
df[['1st_time_open_hour','1st_time_open_minute']]=df['1st_time_open'].str.split(':',expand=True)
df['1st_time_open_minute'] =df['1st_time_open_minute'].fillna('00')
df['1st_time_open']=df['1st_time_open_hour']+':'+df['1st_time_open_minute']+df['1st_time_open_ampm']
df['1st_time_close_ampm'] = df['1st_time_close'].str.extract(r'[\d\.]+([am|pm]+)')
df['1st_time_close']=df['1st_time_close'].replace(r'[am|pm]+$','',regex=True)
df[['1st_time_close_hour','1st_time_close_minute']]=df['1st_time_close'].str.split(':',expand=True)
df['1st_time_close_minute'] =df['1st_time_close_minute'].fillna('00')
df['1st_time_close']=df['1st_time_close_hour']+':'+df['1st_time_close_minute']+df['1st_time_close_ampm']
df['2nd_time_open_ampm'] = df['2nd_time_open'].str.extract(r'[\d\.]+([am|pm]+)')
df['2nd_time_open']=df['2nd_time_open'].replace(r'[am|pm]+$','',regex=True)
df[['2nd_time_open_hour','2nd_time_open_minute']]=df['2nd_time_open'].str.split(':',expand=True)
df['2nd_time_open_minute'] =df['2nd_time_open_minute'].fillna('00')
df['2nd_time_open']=df['2nd_time_open_hour']+':'+df['2nd_time_open_minute']+df['2nd_time_open_ampm']
df['2nd_time_close_ampm'] = df['2nd_time_close'].str.extract(r'[\d\.]+([am|pm]+)')
df['2nd_time_close']=df['2nd_time_close'].replace(r'[am|pm]+$','',regex=True)
df[['2nd_time_close_hour','2nd_time_close_minute']]=df['2nd_time_close'].str.split(':',expand=True)
df['2nd_time_close_minute'] =df['2nd_time_close_minute'].fillna('00')
df['2nd_time_close']=df['2nd_time_close_hour']+':'+df['2nd_time_close_minute']+df['2nd_time_close_ampm']
```
### Done Now we have all hours in one pattern, then next problem is we don't have year and date in our time like column, After putting so much time, I realized that answer is simple af. Just add dummy year and date who cares! Although we will be using only hours.
```
df['1st_time_open']='2020-10-15'+' '+df['1st_time_open']
df['1st_time_close']='2020-10-15'+' '+df['1st_time_close']
df['2nd_time_open']='2020-10-15'+' '+df['2nd_time_open']
df['2nd_time_close']='2020-10-15'+' '+df['2nd_time_close']
```
### Changing into date-time pandas series
```
df['1st_time_open']=pd.to_datetime(df['1st_time_open'])
df['1st_time_close']=pd.to_datetime(df['1st_time_close'])
df['2nd_time_open']=pd.to_datetime(df['2nd_time_open'])
df['2nd_time_close']=pd.to_datetime(df['2nd_time_close'])
df['1st_time_open'].head()
```
### Hell Yeah!
```
df['week'] = df['week'].str.replace(' ','')
df['week'].value_counts().head(7)
```
### Keeping necessary columns
```
df = df[['Restaurant','Cousines','Rating','Rating_counts','Delivery_rating','Delivery_rating_counts','Area','Adress','Price_for_2','1st_time_open','1st_time_close','2nd_time_open','2nd_time_close','week']]
df.head(3)
```
### converting neat and clean pandas dataframe into csv file
```
df.to_csv('Zomato_cleaned.csv',index=False)
```
### Check out next notebook, notebook of this csv file's analysis and insights
| github_jupyter |
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
This notebook was generated for TensorFlow 2.6.
## The Transformer architecture
### Understanding self-attention
#### Generalized self-attention: the query-key-value model
### Multi-Head attention
### The Transformer encoder
**Getting the data**
```
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
```
**Preparing the data**
```
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
```
**Vectorizing the data**
```
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
```
**Transformer encoder implemented as a subclassed Layer**
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
```
**Text classification model that combines the Transformer encoder and a pooling layer**
```
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
```
**Training and evaluating the Transformer encoder based model**
```
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
```
#### Using positional encoding to reinject order information
**Implementing positional embedding as a subclassed layer**
```
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
```
#### Putting it all together: a text-classification Transformer
**Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**
```
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
```
### When to use sequence models over bag-of-words models?
| github_jupyter |
<h1 align="center"> Built my statistical POS tagger form scratch </h1>
__Assumption__:
* We are considering the __tagged dataset / corpus as input__ and apply __supervised Machine Learning algorithm.__
__Fact__:
* This is __multi-class classification__ task.
### Import dependencies
```
import nltk
from nltk import word_tokenize
from nltk.corpus import brown as cb
from nltk.corpus import treebank as tb
import pprint
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
```
### Explore dataset
#### Explore Brown Corpus
```
raw_text = nltk.Text(cb.words('ca01'))
print (raw_text)
print (cb.words()[0:20])
print (cb.tagged_words()[0:10])
tagged_sentences_brown_corpus = nltk.corpus.brown.tagged_sents()
pprint.pprint(tagged_sentences_brown_corpus[0])
```
#### Explore Penn-Treebank Corpus
```
raw_text = nltk.Text(tb.words()[0:10])
print (raw_text)
print (tb.words()[0:10])
tagged_sentences_treebank_corpus = nltk.corpus.treebank.tagged_sents()
pprint.pprint (tagged_sentences_treebank_corpus[0])
```
<h4 align="center"> We will be using Treebank corpus to build our own POS tagger </h4>
```
print ("Tagged sentences: ", len(tagged_sentences_treebank_corpus))
print ("Tagged words:", len(nltk.corpus.treebank.tagged_words()))
```
### Generate features
#### Function for generating features form tagged corpus
```
def features(sentence, index):
# "sentence: [w1, w2, ...], index: the index of the word"
return {
'word': sentence[index],
'is_first': index == 0,
'is_last': index == len(sentence) - 1,
'is_capitalized': sentence[index][0].upper() == sentence[index][0],
'is_all_caps': sentence[index].upper() == sentence[index],
'is_all_lower': sentence[index].lower() == sentence[index],
'prefix-1': sentence[index][0],
'prefix-2': sentence[index][:2],
'prefix-3': sentence[index][:3],
'suffix-1': sentence[index][-1],
'suffix-2': sentence[index][-2:],
'suffix-3': sentence[index][-3:],
'prev_word': '' if index == 0 else sentence[index - 1],
'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],
'has_hyphen': '-' in sentence[index],
'is_numeric': sentence[index].isdigit(),
'capitals_inside': sentence[index][1:].lower() != sentence[index][1:]
}
pprint.pprint(features(['This', 'is', 'a', 'sentence'], 0))
```
### Transform Dataset
### Extract words form tagged sentences using 'untag' function
```
def untag(tagged_sentence):
return [w for w, t in tagged_sentence]
```
### Transform dataset into X, y pairs where X = Features Y = POS lables¶
```
def transform_to_dataset(tagged_sentences):
X, y = [], []
for tagged in tagged_sentences:
for index in range(len(tagged)):
X.append(features(untag(tagged), index))
y.append(tagged[index][1])
#pprint.pprint(" original word: "+ str(tagged) + " Word: "+ str(untag(tagged))+ "Y: " + y[index])
return X, y
```
### Build training and testing dataset
```
cutoff = int(.75 * len(tagged_sentences_treebank_corpus))
training_sentences = tagged_sentences_treebank_corpus[:cutoff]
test_sentences = tagged_sentences_treebank_corpus[cutoff:]
print (len(training_sentences))
print (len(test_sentences))
X, y = transform_to_dataset(training_sentences)
print(len(X))
print(len(y))
```
### Train model
### Initialize the classifier
```
clf = Pipeline([
('vectorizer', DictVectorizer(sparse=False)),
('classifier', DecisionTreeClassifier(criterion='entropy'))
])
# Use only the first 10K samples if you're running it multiple times. It takes a fair bit :)
clf.fit(X[:20000],y[:20000])
```
### Measure Accuracy
```
X_test, y_test = transform_to_dataset(test_sentences)
print ("Accuracy:{:.3%}".format(clf.score(X_test, y_test)))
```
### Generate POS tags for given sentence
```
def pos_tag(sentence):
tagged_sentence = []
tags = clf.predict([features(sentence, index) for index in range(len(sentence))])
return zip(sentence, tags)
POS_list = list(pos_tag(word_tokenize('This is my friend, John.')))
for t in POS_list:
print(u"{:<16}{:>2}".format(str(t[0]),str(t[1])))
POS_list = list(pos_tag(word_tokenize("We will meet at eight o'clock on Thursday morning.")))
for t in POS_list:
print(u"{:<16}{:>2}".format(str(t[0]),str(t[1])))
POS_list = list(pos_tag(word_tokenize('Alexander, the great...!')))
for t in POS_list:
print(u"{:<16}{:>2}".format(str(t[0]),str(t[1])))
POS_list = list(pos_tag(word_tokenize('Alexander the Great, was a king of the ancient Greek kingdom of Macedon.')))
for t in POS_list:
print(u"{:<16}{:>2}".format(str(t[0]),str(t[1])))
```
| github_jupyter |
# Exploratory Data Analysis of the Lalonde NSW dataset
```
import pandas as pd
import seaborn as sns
from pyuplift.datasets import load_lalonde_nsw
```
## Visualization setups
```
sns.set(style="whitegrid")
sns.set(rc={'figure.figsize':(15, 5)})
df = load_lalonde_nsw(download_if_missing=True)
df.keys()
df['description']
```
## Utils
```
def generate_feature_repr(values):
feature_names = list(set(values))
feature_names.sort()
feature_values = []
for feature_name in feature_names:
value = values[values == feature_name].shape[0]
feature_values.append(value)
feature_names = list(map(lambda x: int(x), feature_names))
return pd.DataFrame(data={
'Name': feature_names,
'Value': feature_values
})
```
## Inspect the treatment variable
```
df['treatment'][:20]
df_repr = generate_feature_repr(df['treatment'])
df_repr['Name'] = df_repr['Name'].apply(lambda x: 'Treatment' if x else 'Control')
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='Number of members',
title='Number of members of treatment and control groups'
);
```
## Inspect the target variable
```
df['target'][:10]
target = df['target']
non_zero = df['target'][target != 0].shape[0]
print("{}% - non zero values". format(100 * non_zero // target.shape[0]))
ax = sns.distplot(df['target'])
ax.set(
xlabel='Target value',
ylabel=' ',
title="Distribution of the target values"
);
```
## Inspect features
```
df_features = pd.DataFrame(data=df['data'], columns=df['feature_names'])
df_features.info()
df_features.head()
```
### Features description
* age - age in years.
* educ - years of schooling.
* black - indicator variable for blacks.
* hisp - indicator variable for Hispanics.
* married - indicator variable for marital| status.
* nodegr - indicator variable for high school diploma.
* re75 - real earnings in 1975.
### Feature: age
How old responder are?
```
df_repr = generate_feature_repr(df_features['age'].values)
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='Age in years',
ylabel='',
title='Distribution of the age feature'
);
```
### Feature: educ
How much years of schooling does responder has?
```
df_repr = generate_feature_repr(df_features['educ'].values)
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='',
title='Distribution of the educ feature'
);
```
### Feature: black
Does responder belongs to the Blacks?
```
df_repr = generate_feature_repr(df_features['black'].values)
df_repr['Name'] = df_repr['Name'].apply(lambda x: 'Blacks' if x else 'Not Blacks')
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='',
title='Distribution of the black feature'
);
```
### Feature: hisp
Does responder belongs to the Hispanics?
```
df_repr = generate_feature_repr(df_features['hisp'].values)
df_repr['Name'] = df_repr['Name'].apply(lambda x: 'Hispanics' if x else 'Not hispanics')
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='',
title='Distribution of the hisp feature'
);
```
### Feature: married
What's the marital status of the responder?
```
df_repr = generate_feature_repr(df_features['married'].values)
df_repr['Name'] = df_repr['Name'].apply(lambda x: 'Married' if x else 'Not married')
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='',
title='Distribution of the married feature'
);
```
### Feature: nodegr
Does responder have a high school diploma?
```
df_repr = generate_feature_repr(df_features['nodegr'].values)
df_repr['Name'] = df_repr['Name'].apply(lambda x: 'Diploma' if x else 'No diploma')
ax = sns.barplot(x='Name', y='Value', data=df_repr)
ax.set(
xlabel='',
ylabel='',
title='Distribution of the nodegr feature'
);
```
### Feature: re75
What's real earnings in 1975 of the responder?
```
ax = sns.distplot(df_features['re75'])
ax.set(
xlabel='Earnings in 1975',
ylabel='',
title="Distribution of the re75 feature"
);
zeros = 100 * df_features[df_features['re75'] == 0].shape[0] // df_features.shape[0]
print(f"{zeros}% of zero values")
```
| github_jupyter |
```
"""
Running this cell takes some time the first time, because the Numba JIT compilaton that compiles some
"""
from Py3DViewer import Trimesh, Quadmesh, Tetmesh, Hexmesh
from Py3DViewer import Viewer, laplacian_smoothing
import numpy as np
mesh = [Trimesh('data/bunny_tris.obj'), Quadmesh('data/bunny_quad.obj')]
"""
After running this cell, you will notice that the quad bunny is quite larger than the tri bunny,
and is also oriented differently.
"""
viewer = Viewer(mesh, width=1000, height=300)
viewer.show()
"""
Let's first scale the meshes to have a bounding box diagonal of 10, then update the viewer.
NOTE: The viewer update takes some time depending
on the quality of your connect, since the new geometry is streamed from the backend to your browser.
"""
mesh[0].vertices /= np.linalg.norm(mesh[0].bbox[1]-mesh[0].bbox[0])
mesh[0].vertices *= 10
mesh[1].vertices /= np.linalg.norm(mesh[1].bbox[1]-mesh[1].bbox[0])
mesh[1].vertices *=10
viewer.update_controls()
viewer.update()
"""
Let's rotate the quad bunny by using the transform_rotation method
"""
mesh[1].transform_rotation(-90, 'x')
viewer.update()
"""
Let's now do the same with a "reactive" approach.
When using a reactive viewer, the user can simply act on the mesh data however they choose, and the viewer
is asynchronously updated (with a minimal, if any, overhead on the computation).
Again, consider that the redraws are strongly bound by the fact that the Python and Javascript backend are
de-coupled in this context, since Binder runs in the cloud. We highly suggest trying the library locally to
properly experience the reactive redraws.
"""
mesh2 = [Trimesh('data/bunny_tris.obj'), Quadmesh('data/bunny_quad.obj')]
viewer2 = Viewer(mesh2, width=1000, height=500, reactive=True)
viewer2.show()
mesh2[0].vertices /= np.linalg.norm(mesh2[0].bbox[1]-mesh2[0].bbox[0])
mesh2[0].vertices *= 10
mesh2[1].vertices /= np.linalg.norm(mesh2[1].bbox[1]-mesh2[1].bbox[0])
mesh2[1].vertices *=10
viewer2.update_controls()
mesh2[1].transform_rotation(-90, 'x')
"""
Here's an example with the GUI, which only works with a single mesh per scene so far. We are working to support multiple meshes.
"""
mesh3 = Hexmesh('data/double_hinge_hex.mesh')
viewer3 = Viewer(mesh3, width=1000, height=300, with_gui=True)
viewer3.show()
"""
Py3DViewer have texture support for surface meshes.
These are some examples.
"""
spot = Trimesh('data/spot.obj', texture='data/textures/spot_texture.png')
blub = Trimesh('data/blub.obj', texture='data/textures/blub_texture.png')
cube_tris = Trimesh('data/cube_tris_mtl.obj', mtl='data/cube.mtl')
cube_quad = Quadmesh('data/cube_quad_mtl.obj', mtl='data/cube.mtl')
spot.show()
blub.show()
cube_tris.show()
cube_quad.show()
```
| github_jupyter |
1) Antes que nada, lee cuidadosamente estas instrucciones y las que aparecen en la hoja con la contraseña.
2) Cambia el nombre de este archivo sustituyendo "nombre.apellido" por los tuyos, tal como aparecen en tu dirección de correo electrónico de la UAM.
3) Este archivo debe quedar en la carpeta "ENTREGA..." que está en el escritorio de tu cuenta de examen. Lo mejor es que esté en esa carpeta desde el comienzo del examen.
4) <FONT color="red">El examen resuelto debe quedar en este único archivo. No se puede usar un archivo para cada pregunta.</FONT>
5) Recuerda que hay que deshabilitar el salvapantallas al comenzar el examen, tal como está indicado en la hoja con la contraseña.
CALIFICACIÓN:
COMENTARIOS:
1)
### Ejercicio 1
(3 puntos) Si un entero $n$ es un producto de dos primos relativamente próximos no es muy difícil factorizarlo: buscamos factores alrededor de $\sqrt{n}$ y como están en esa zona los encontramos. Pierre de Fermat (1601-1665) propuso un método simple para factorizar enteros que funciona bien en la situación indicada.
```
def fermat(n):
x = ceil(sqrt(n))
while not is_square(x^2-n):
x = x+1
y = sqrt(x^2-n)
return (x+y, x-y)
```
Ya en el siglo XX Kraitchik propuso una variante basada en intentar que un múltiplo $k\cdot n$ de $n$ fuera una diferencia de cuadrados. Concretamente,
1. Empezamos con $x$ el menor entero mayor que $\sqrt{n}$ y $k=1$.
2. Si $x^2-k\cdot n$ es un cuadrado, sea $y$ su raíz cuadrada. Si $x+y$ no es múltiplo de $n$ y $x-y$ tampoco, vemos que los factores primos de $n$ deben repartirse entre $x+y$ y $x-y$, y si $n$ sólo tiene dos factores primos $p$ y $q$ los habríamos encontrado: $p$ sería el máximo común divisor de $x+y$ y $n$ y $q$ el de $x-y$ y $n$.
3. Fijado $x$, $k$ debe incrementarse de unidad en unidad mientras sea posible que $x^2-k\cdot n$ sea un cuadrado, hasta que encontramos una factorización, y si no encontramos la factorización debemos incrementar $x$ en una unidad y repetir la búsqueda del $k$ adecuado.
Queremos estudiar si esta variante del método de Fermat es mejor, igual, o peor que el método original.
1. Programa una función $kraitchik(n)$ que implemente este método. Comprueba que funciona correctamente para productos de dos primos grandes pero no muy distantes.
2. Calcula el tiempo total que tarda cada uno de los métodos para factorizar todos los productos de dos primos, $p$ y $q$, en los rangos $10^8 \le p \le 10^8+10^4$, $p<q\le 10^8+10^4$.
3. Repite el cálculo pero ahora $p$ verifica $10^8 \le p \le 10^8+10^4$ y $q$ es un primo que difiere de $p$ en aproximadamente $10^7.$
4. ¿Cuáles son tus conclusiones después los cálculos en 2. y 3.?
```
def kraitchik(n):
x = ceil(sqrt(n))
while True:
k = 1
while x^2 - k*n >= 0:
if is_square(x^2-k*n):
y = sqrt(x^2-k*n)
if (x+y) % n != 0 and (x-y) % n != 0:
a = gcd(x+y, n)
b = gcd(x-y, n)
return a,b
k = k+1
x = x+1
%time kraitchik(nth_prime(1237)*nth_prime(2100))
%time fermat(nth_prime(1237)*nth_prime(2100))
%time factor(nth_prime(1237)*nth_prime(2100))
```
#### Apartado 2
```
def pruebaf(N1,N2):
for p in prime_range(N1,N2):
for q in prime_range(p,N2):
x = fermat(p*q)
%time pruebaf(10^8,10^8+10^4)
def pruebak(N1,N2):
for p in prime_range(N1,N2):
for q in prime_range(p,N2):
x = kraitchik(p*q)
%time pruebak(10^8,10^8+10^4)
```
#### Apartado 3
```
def pruebaf2(N1,N2):
for p in prime_range(N1,N2):
q = next_prime(p+10^7)
x = fermat(p*q)
%time pruebaf2(10^8,10^8+10^4)
def pruebak2(N1,N2):
for p in prime_range(N1,N2):
q = next_prime(p+10^7)
x = kraitchik(p*q)
%time pruebak2(10^8,10^8+10^4)
```
#### Apartado 4
El resultado de 2. nos dice que los tiempos de los dos algoritmos deben ser esencialmente iguales, en promedio, cuando se aplican a valores de $n$ iguales. Para comprobar esto bien habría que comparar uno a uno. Los resultados de 3. no se deben comparar con los de 2. porque se aplican a valores de $n$ bastante dfiferentes.
Los resultados de 3. prueban que cuando la diferencia entre $p$ y $q$ es grande *kraitchik* no es una mejora de Fermat.
### Ejercicio 2
(3 puntos) Comenzamos describiendo, mediante un ejemplo, un sistema criptográfico de clave pública. La clave pública es un entero primo muy grande y los mensajes son enteros $2\le m < p$. Supongamos que $A$ quiere enviar el mensaje $m:=11111$ a $B$ y la clave pública que han acordado es $p=32611$.
1. En primer lugar $A$ elige un entero aleatorio $a$, digamos entre $2$ y $p$, eleva $m$ al exponente $a$ módulo $p$ y envía el resultado $u$ a $B$. Supongamos que $a$ es $3589$, y entonces $u=15950$.
2. $B$ elige un entero aleatorio $b=4037$, entre $2$ y $p$, eleva $u$ a $b$, módulo $p$, y el resultado $v=15422$ se lo envía a $A$.
3. Ahora $A$ eleva, módulo $p$, $v$ a un exponente $a^{\prime}$, que ya no es aleatorio sino que $A$ lo ha calculado y resulta ser $a^{\prime}=15619$, y el resultado $w=27259$ se lo envía a $B$.
4. Cuando lo recibe, $B$ eleva $w$, módulo $p$, a un exponente $b^{\prime}$, que no es aleatorio sino que $B$ lo ha calculado y resulta ser $b^{\prime}=31883$, y obtiene como resultado el mensaje $11111.$
PREGUNTAS:
1. ¿Cuál es la relación entre $a$ y $a^{\prime}$, $b$ y $b^{\prime}$? Es decir, ¿cómo calculan $A$ y $B$ la segunda parte de su clave privada?
2. Programa una función $cifrar\_descifrar(m,p)$ que reciba como parámetros un mensaje $m$ y la clave pública $p$ y devuelva la tupla $(a,a^{\prime},b,b^{\prime},u,v,w,w^{b^{\prime}}\%p)$. Como $a$ y $b$ son aleatorios, cada vez que se ejecute con el mismo $m$ y el mismo $p$ la respuesta será, en parte, distinta.
3. Comprueba que el método funciona en varios ejemplos. ¿Qué codificación usarías para implementar en la práctica este sistema? Explica detalladamente.
```
power_mod(11111,3589,32611)
power_mod(15950,4037,32611)
power_mod(15422,15619,32611)
power_mod(27257,31883,32611)
```
El resumen del ejemplo es que $(((m^a)^b)^{a^\prime})^{b^\prime}=m$ en $\mathbb{Z}_p$. Como $(((m^a)^b)^{a^\prime})^{b^\prime}=
(((m^a)^{a^\prime})^b)^{b^\prime}$ debemos sospechar, gracias al teorema pequeño de Fermat, que $a^\prime$ es el inverso de $a$ módulo $p-1$ y $b^\prime$ el de $b$. Comprobamos
```
print xgcd(3589,32611-1); print (3589*15619)%32610
print xgcd(4037,32611-1); print (4037*-727)%32610;print 31883+727
def cifrar_descifrar(m,p):
a,b = 2,2
while gcd(a,p-1) != 1:
a = randint(2,p)
a += 1
a1 = xgcd(a,p-1)[1]%(p-1)
if (a*a1)%(p-1) != 1:
print "ERROR al calcular a1"
while gcd(b,p-1) != 1:
b= randint(2,p)
b1 = xgcd(b,p-1)[1]%(p-1)
if (b*b1)%(p-1) != 1:
print "ERROR al calcular b1"
u = power_mod(m,a,p)
v = power_mod(u,b,p)
w = power_mod(v,a1,p)
z = power_mod(w,b1,p)
return a,a1,b,b1,u,v,w,z
cifrar_descifrar(11111,32611)
p = nth_prime(768245)
m = randint(2,p)
print m,p
cifrar_descifrar(m,p)
```
Podemos usar la misma codificación que para RSA, es decir, ...
### Ejercicio 3
Estudiamos un sistema criptográfico que tiene como clave secreta un entero $k$ grande, que no sea un cuadrado perfecto, y otro entero $d$. Funciona en la siguiente manera:
1. Los mensajes $m$ son enteros no negativos de $d$ dígitos.
2. Para encriptar calculamos la raíz cuadrada de $k$ con $d$ dígitos a la derecha del punto decimal y llamamos $\alpha$ a su parte decimal, que supondremos que no tiene un cero a la izquierda. Entonces sumamos el mensaje con $\alpha$ módulo $10^d$ y el resultado es el mensaje encriptado $c$.
3. Para desencriptar restamos $\alpha$ a $c$, módulo $10^d$ y recuperamos $m$.
4. Para mayor seguridad, $A$ y $B$, que comparten la clave secreta $(k,d)$ acuerdan, usar en sucesivos mensajes los trozos sucesivos de longitud $d$ de la parte decimal de $\sqrt{k}$ como alfas.
Supongamos que ahora el malo consigue el primer par $(m,c)$, lo que le permitiría calcular el primer valor de $\alpha$, módulo $10^d$, y también conocería $d$. Suponiendo que $k$ es grande, ¿podría el malo obtener $k$ a partir de la información que conoce? Si consiguiera el valor de $k$ a partir del primer par $(m,c)$ podría desencriptar toda la comunicación posterior que usara la misma clave secreta $k$.
1. (2 puntos) Programa una función que busque $k$ conocido $\alpha$, un entero de $d$ dígitos. Pruébala con $k=87$ y $d=8.$
2. (2 puntos) Programa una función que, para $k$ en un rango $[N1,N2]$ y $d$ fijado, calcule el $\alpha$ correspondiente a $k$ y trate de recuperar $k$ usando la función del apartado 1. ¿Parece que siempre se va a poder recuperar $k$ a partir de $\alpha$? Experimenta con diversos valores de los parámetros.
```
def clave(n,N):
d = len(n.digits())
R = RealField(prec=10*d)
L = []
for k in xsrange(1,N):
x = R(sqrt(k))-floor(R(sqrt(k)))
if str(x)[2:d+2]==str(n):
L.append(k)
return L
%time clave(32737905,10**3)
%time clave(32737905,10**4)
%time clave(32737905,10**5)
def clave2(d,N1,N2,N3):
R = RealField(prec=10*d)
for k in xsrange(N1,N2):
x = R(sqrt(k))-floor(R(sqrt(k)))
#print x
C = str(x)[2:d+2]
n = ZZ(C,base=10)
if n != 0 and C[0] != '0':
L = clave(n,N3)
print n,k,L
%time clave2(8,20,100,10**3)
```
Ciertamente parece que el malo podría salirse con la suya, aunque debemos hacer más pruebas.
```
%time clave2(12,500,530,10**4)
%time clave2(20,1237,1239,10**5)
%time clave2(20,5237,5239,10**5)
```
| github_jupyter |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### United States Bubble Map
Note about `sizeref`:
To scale the bubble size, use the attribute sizeref. We recommend using the following formula to calculate a sizeref value:
`sizeref = 2. * max(array of size values) / (desired maximum marker size ** 2)`
Note that setting `sizeref` to a value greater than $1$, decreases the rendered marker sizes, while setting `sizeref` to less than $1$, increases the rendered marker sizes.
See https://plot.ly/python/reference/#scatter-marker-sizeref for more information. Additionally, we recommend setting the sizemode attribute: https://plot.ly/python/reference/#scatter-marker-sizemode to area.
```
import plotly.plotly as py
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_us_cities.csv')
df.head()
df['text'] = df['name'] + '<br>Population ' + (df['pop']/1e6).astype(str)+' million'
limits = [(0,2),(3,10),(11,20),(21,50),(50,3000)]
colors = ["rgb(0,116,217)","rgb(255,65,54)","rgb(133,20,75)","rgb(255,133,27)","lightgrey"]
cities = []
scale = 5000
for i in range(len(limits)):
lim = limits[i]
df_sub = df[lim[0]:lim[1]]
city = dict(
type = 'scattergeo',
locationmode = 'USA-states',
lon = df_sub['lon'],
lat = df_sub['lat'],
text = df_sub['text'],
marker = dict(
size = df_sub['pop']/scale,
# sizeref = 2. * max(df_sub['pop']/scale) / (25 ** 2),
color = colors[i],
line = dict(width=0.5, color='rgb(40,40,40)'),
sizemode = 'area'
),
name = '{0} - {1}'.format(lim[0],lim[1]) )
cities.append(city)
layout = dict(
title = '2014 US city populations<br>(Click legend to toggle traces)',
showlegend = True,
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showland = True,
landcolor = 'rgb(217, 217, 217)',
subunitwidth=1,
countrywidth=1,
subunitcolor="rgb(255, 255, 255)",
countrycolor="rgb(255, 255, 255)"
),
)
fig = dict(data=cities, layout=layout)
py.iplot(fig, validate=False, filename='d3-bubble-map-populations')
```
#### Ebola Cases in West Africa
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_ebola.csv')
df.head()
cases = []
colors = ['rgb(239,243,255)','rgb(189,215,231)','rgb(107,174,214)','rgb(33,113,181)']
months = {6:'June',7:'July',8:'Aug',9:'Sept'}
for i in range(6,10)[::-1]:
cases.append(go.Scattergeo(
lon = df[ df['Month'] == i ]['Lon'], #-(max(range(6,10))-i),
lat = df[ df['Month'] == i ]['Lat'],
text = df[ df['Month'] == i ]['Value'],
name = months[i],
marker = dict(
size = df[ df['Month'] == i ]['Value']/50,
color = colors[i-6],
line = dict(width = 0)
)
)
)
cases[0]['text'] = df[ df['Month'] == 9 ]['Value'].map('{:.0f}'.format).astype(str)+' '+\
df[ df['Month'] == 9 ]['Country']
cases[0]['mode'] = 'markers+text'
cases[0]['textposition'] = 'bottom center'
inset = [
go.Choropleth(
locationmode = 'country names',
locations = df[ df['Month'] == 9 ]['Country'],
z = df[ df['Month'] == 9 ]['Value'],
text = df[ df['Month'] == 9 ]['Country'],
colorscale = [[0,'rgb(0, 0, 0)'],[1,'rgb(0, 0, 0)']],
autocolorscale = False,
showscale = False,
geo = 'geo2'
),
go.Scattergeo(
lon = [21.0936],
lat = [7.1881],
text = ['Africa'],
mode = 'text',
showlegend = False,
geo = 'geo2'
)
]
layout = go.Layout(
title = 'Ebola cases reported by month in West Africa 2014<br> \
Source: <a href="https://data.hdx.rwlabs.org/dataset/rowca-ebola-cases">\
HDX</a>',
geo = dict(
resolution = 50,
scope = 'africa',
showframe = False,
showcoastlines = True,
showland = True,
landcolor = "rgb(229, 229, 229)",
countrycolor = "rgb(255, 255, 255)" ,
coastlinecolor = "rgb(255, 255, 255)",
projection = dict(
type = 'mercator'
),
lonaxis = dict( range= [ -15.0, -5.0 ] ),
lataxis = dict( range= [ 0.0, 12.0 ] ),
domain = dict(
x = [ 0, 1 ],
y = [ 0, 1 ]
)
),
geo2 = dict(
scope = 'africa',
showframe = False,
showland = True,
landcolor = "rgb(229, 229, 229)",
showcountries = False,
domain = dict(
x = [ 0, 0.6 ],
y = [ 0, 0.6 ]
),
bgcolor = 'rgba(255, 255, 255, 0.0)',
),
legend = dict(
traceorder = 'reversed'
)
)
fig = go.Figure(layout=layout, data=cases+inset)
py.iplot(fig, validate=False, filename='West Africa Ebola cases 2014')
```
#### Reference
See https://plot.ly/python/reference/#choropleth and https://plot.ly/python/reference/#scattergeo for more information and chart attribute options!
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'bubble-maps.ipynb', 'python/bubble-maps/', 'Bubble Maps',
'How to make bubble maps in Python with Plotly.',
title = 'Python Bubble Maps | Plotly',
has_thumbnail='true', thumbnail='thumbnail/bubble-map.jpg',
language='python',
page_type='example_index',
display_as='maps', order=3, # ipynb='~notebook_demo/1',
uses_plotly_offline=False)
```
| github_jupyter |
# <a href="http://www.datascience-paris-saclay.fr">Paris Saclay Center for Data Science</a>
# <a href=https://www.ramp.studio/problems/air_passengers>RAMP</a> on predicting the number of air passengers
<i> Balázs Kégl (LAL/CNRS), Alex Gramfort (LTCI/Telecom ParisTech), Djalel Benbouzid (UPMC), Mehdi Cherti (LAL/CNRS) </i>
## Introduction
The data set was donated to us by an unnamed company handling flight ticket reservations. The data is thin, it contains
<ul>
<li> the date of departure
<li> the departure airport
<li> the arrival airport
<li> the mean and standard deviation of the number of weeks of the reservations made before the departure date
<li> a field called <code>log_PAX</code> which is related to the number of passengers (the actual number were changed for privacy reasons)
</ul>
The goal is to predict the <code>log_PAX</code> column. The prediction quality is measured by RMSE.
The data is obviously limited, but since data and location informations are available, it can be joined to external data sets. <b>The challenge in this RAMP is to find good data that can be correlated to flight traffic</b>.
```
%matplotlib inline
import imp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
# !pip install -U seaborn # if you don't have it, or pip3 for python3
# optional
import seaborn as sns; sns.set()
```
## Fetch the data and load it in pandas
First we load `problem.py` that parameterizes the challenge. It contains some objects taken off the shelf from `ramp-workflow` (e.g., `Predictions` type, scores, and data reader).
```
problem = imp.load_source('', 'problem.py')
```
`get_train_data` loads the training data and returns an `pandas` object (input) and a `np.array` object (output).
```
X_df, y_array = problem.get_train_data()
print(min(X_df['DateOfDeparture']))
print(max(X_df['DateOfDeparture']))
X_df.head()
X_df['Departure'].unique()
plt.hist(y_array, bins=50);
X_df.hist('std_wtd', bins=50);
X_df.hist('WeeksToDeparture', bins=50);
X_df.describe()
X_df.dtypes
X_df.shape
print(y_array.mean())
print(y_array.std())
```
## Preprocessing for prediction
Getting dates into numerical columns is a common operation when time series are analyzed with non-parametric predictors. The code below makes all possible choices: ordered columns for the year, month, day, weekday, week, and day in the year, and one-hot columns for year month, day, weekday, and week.
The departure and arrival airports are also converted into one-hot columns.
```
X_encoded = X_df
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['Departure'], prefix='d'))
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['Arrival'], prefix='a'))
X_encoded = X_encoded.drop('Departure', axis=1)
X_encoded = X_encoded.drop('Arrival', axis=1)
# following http://stackoverflow.com/questions/16453644/regression-with-date-variable-using-scikit-learn
X_encoded['DateOfDeparture'] = pd.to_datetime(X_encoded['DateOfDeparture'])
X_encoded['year'] = X_encoded['DateOfDeparture'].dt.year
X_encoded['month'] = X_encoded['DateOfDeparture'].dt.month
X_encoded['day'] = X_encoded['DateOfDeparture'].dt.day
X_encoded['weekday'] = X_encoded['DateOfDeparture'].dt.weekday
X_encoded['week'] = X_encoded['DateOfDeparture'].dt.week
X_encoded['n_days'] = X_encoded['DateOfDeparture'].apply(lambda date: (date - pd.to_datetime("1970-01-01")).days)
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['year'], prefix='y'))
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['month'], prefix='m'))
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['day'], prefix='d'))
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['weekday'], prefix='wd'))
X_encoded = X_encoded.join(pd.get_dummies(X_encoded['week'], prefix='w'))
X_encoded.tail(5)
```
### A linear regressor baseline
We drop the target column and the original data column.
```
features = X_encoded.drop(['DateOfDeparture'], axis=1)
X_columns = features.columns
X_array = features.values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_array, y_array, test_size=0.2, random_state=0)
```
It gives us a pretty nice improvement above baseline
```
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
reg = LinearRegression()
scores = cross_val_score(
reg, X_train, y_train, cv=5, scoring='neg_mean_squared_error')
print("RMSE: {:.4f} +/- {:.4f}".format(
np.mean(np.sqrt(-scores)), np.std(np.sqrt(-scores))))
```
Exercise: Visualize the coefficients, try to make sense of them.
## Random Forests
```
%%time
from sklearn.ensemble import RandomForestRegressor
n_estimators = 10
max_depth = 10
max_features = 10
reg = RandomForestRegressor(
n_estimators=n_estimators, max_depth=max_depth, max_features=max_features)
scores = cross_val_score(
reg, X_train, y_train, cv=5, scoring='neg_mean_squared_error',n_jobs=3)
print("RMSE: {:.4f} +/- {:.4f}".format(
np.mean(np.sqrt(-scores)), np.std(np.sqrt(-scores))))
```
## Variable importances
```
reg.fit(X_train, y_train)
len(X_columns)
plt.figure(figsize=(15, 5))
ordering = np.argsort(reg.feature_importances_)[::-1][:50]
importances = reg.feature_importances_[ordering]
feature_names = X_columns[ordering]
x = np.arange(len(feature_names))
plt.bar(x, importances)
plt.xticks(x + 0.5, feature_names, rotation=90, fontsize=15);
```
## Building predictive models
### The feature extractor
The feature extractor implements a single <code>transform</code> function. It receives the full pandas object X_df (without the labels). It should produce a numpy array representing the features extracted. If you want to use the (training) labels to save some state of the feature extractor, you can do it in the fit function.
The starting kit feature extractor shows you how to join your data to external data. You will have the possibility to submit a single external csv for each of your submission (so if you have several data sets, you first have to do the join offline, and save it as a csv). In this case it is whether data, joined to the database on the <code>DateOfDeparture</code> and <code>Arrival</code> fields. Attention: when you join the data, make sure that the <b><font color=red>order</font> of the rows in the data frame does not change</b>.
```
import pandas as pd
import os
class FeatureExtractor(object):
def __init__(self):
pass
def fit(self, X_df, y_array):
pass
def transform(self, X_df):
X_encoded = X_df
path = os.path.dirname(__file__)
data_weather = pd.read_csv(os.path.join(path, 'external_data.csv'))
X_weather = data_weather[['Date', 'AirPort', 'Max TemperatureC']]
X_weather = X_weather.rename(
columns={'Date': 'DateOfDeparture', 'AirPort': 'Arrival'})
X_encoded = pd.merge(
X_encoded, X_weather, how='left',
left_on=['DateOfDeparture', 'Arrival'],
right_on=['DateOfDeparture', 'Arrival'],
sort=False)
X_encoded = X_encoded.join(pd.get_dummies(
X_encoded['Departure'], prefix='d'))
X_encoded = X_encoded.join(
pd.get_dummies(X_encoded['Arrival'], prefix='a'))
X_encoded = X_encoded.drop('Departure', axis=1)
X_encoded = X_encoded.drop('Arrival', axis=1)
X_encoded = X_encoded.drop('DateOfDeparture', axis=1)
X_array = X_encoded.values
return X_array
# we need this because the global variable __file__ (the path of the current file)
# does not exist if we are in a notebook
__file__ = 'submissions/starting_kit/'
fe = FeatureExtractor()
fe.fit(X_df, y_array)
X_array = fe.transform(X_df)
X_array.shape
```
### The regressor
The regressor should implement an sklearn-like regressor with `fit` and `predict` functions.
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import BaseEstimator
class Regressor(BaseEstimator):
def __init__(self):
self.reg = RandomForestRegressor(
n_estimators=20, max_depth=50, max_features=10)
def fit(self, X, y):
self.reg.fit(X, y)
def predict(self, X):
return self.reg.predict(X)
reg = Regressor()
reg.fit(X_array, y_array)
y_pred = reg.predict(X_array)
score_type = problem.score_types[0]
score = score_type(y_array, y_pred)
score
X_test_df, y_test_array = problem.get_test_data()
X_test_array = fe.transform(X_test_df)
y_test_pred = reg.predict(X_test_array)
score_type(y_test_array, y_test_pred)
cv = problem.get_cv(X_df, y_array)
for fold in cv:
print(fold)
cv = problem.get_cv(X_df, y_array)
valid_scores = np.array([])
for fold in cv:
train_is, valid_is = fold
X_fold_train = X_df.iloc[train_is]
y_fold_train = y_array[train_is]
X_fold_valid = X_df.iloc[valid_is]
y_fold_valid = y_array[valid_is]
fe.fit(X_fold_train, y_fold_train)
X_fold_train_array = fe.transform(X_fold_train)
reg.fit(X_fold_train_array, y_fold_train)
X_fold_valid_array = fe.transform(X_fold_valid)
y_fold_valid_pred = reg.predict(X_fold_valid_array)
valid_score = score_type(y_fold_valid, y_fold_valid_pred)
valid_scores = np.append(valid_scores, valid_score)
print('validation scores =', valid_scores)
print('validation score mean =', valid_scores.mean())
print('validation score std =', valid_scores.std())
```
## Local testing (before submission)
It is <b><span style="color:red">important that you test your submission files before submitting them</span></b>. For this we provide a unit test. Note that the test runs on your files in [`submissions/starting_kit`](http://localhost:8890/tree/submissions/starting_kit).
First `pip install ramp-workflow` or install it from the [github repo](https://github.com/paris-saclay-cds/ramp-workflow). Make sure that the python files `feature_extractor.py`, `regressor.py`, and `external_data.csv` are in the [`submissions/starting_kit`](http://localhost:8890/tree/submissions/starting_kit) folder, and the data `train.csv.bz2` and `test.csv.bz2` are in [`data`](http://localhost:8890/tree/data). Then run
```ramp_test_submission```
If it runs and print training and test errors on each fold, then you can submit the code.
```
!ramp_test_submission
```
Alternatively, load and execute `rampwf.utils.testing.py`, and call `assert_submission`. This may be useful if you would like to understand how we instantiate the workflow, the scores, the data connectors, and the cross validation scheme defined in [`problem.py`](problem.py), and how we insert and train/test your submission.
```
# %load https://raw.githubusercontent.com/paris-saclay-cds/ramp-workflow/master/rampwf/utils/testing.py
# assert_submission()
```
## Submitting to [ramp.studio](http://ramp.studio)
Once you found a good model, you can submit it to [ramp.studio](http://www.ramp.studio). First, if it is your first time using RAMP, [sign up](http://www.ramp.studio/sign_up), otherwise [log in](http://www.ramp.studio/login). Then find an open event on the particular problem, for example, the event [DSSP 6](https://www.ramp.studio/events/air_passengers_dssp6) for this RAMP. Sign up for the event. Both signups are controled by RAMP administrators, so there **can be a delay between asking for signup and being able to submit**.
Once your signup request is accepted, you can go to your [sandbox](http://www.ramp.studio/events/air_passengers_dssp6/sandbox) and copy-paste (or upload) [`feature_extractor.py`](http://localhost:8890/edit/submissions/starting_kit/feature_extractor.py), [`regressor.py`](http://localhost:8890/edit/submissions/starting_kit/regressor.py), and [`external_data.csv`](http://localhost:8890/edit/submissions/starting_kit/external_data.csv) from `submissions/starting_kit`. Save it, rename it, then submit it. The submission is trained and tested on our backend in the same way as `ramp_test_submission` does it locally. While your submission is waiting in the queue and being trained, you can find it in the "New submissions (pending training)" table in [my submissions](http://www.ramp.studio/events/air_passengers_dssp6/my_submissions). Once it is trained, you get a mail, and your submission shows up on the [public leaderboard](http://www.ramp.studio/events/air_passengers_dssp6/leaderboard).
If there is an error (despite having tested your submission locally with `ramp_test_submission`), it will show up in the "Failed submissions" table in [my submissions](http://www.ramp.studio/events/air_passengers_dssp6/my_submissions). You can click on the error to see part of the trace.
After submission, do not forget to give credits to the previous submissions you reused or integrated into your submission.
The data set we use at the backend is usually different from what you find in the starting kit, so the score may be different.
The usual way to work with RAMP is to explore solutions, add feature transformations, select models, perhaps do some AutoML/hyperopt, etc., _locally_, and checking them with `ramp_test_submission`. The script prints mean cross-validation scores
```
----------------------------
train rmse = 0.748 ± 0.0117
valid rmse = 0.858 ± 0.0111
test rmse = 0.881 ± 0.005
```
The official score in this RAMP (the first score column after "historical contributivity" on the [leaderboard](http://www.ramp.studio/events/air_passengers_dssp6/leaderboard)) is root mean squared error ("rmse"), so the line that is relevant in the output of `ramp_test_submission` is `valid rmse = 0.858 ± 0.0111`. When the score is good enough, you can submit it at the RAMP.
## More information
You can find more information in the [README](https://github.com/paris-saclay-cds/ramp-workflow/blob/master/README.md) of the [ramp-workflow library](https://github.com/paris-saclay-cds/ramp-workflow).
## Contact
Don't hesitate to [contact us](mailto:admin@ramp.studio?subject=air passengers notebook).
```
from problem import get_train_data
X_df, y = get_train_data()
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor()
reg.fit(X_df, y)
X_df.head()
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
ohe = OneHotEncoder(sparse=False)
X_departure = X_df[['Departure']]
X_departure.head()
ohe.fit_transform(X_departure)
ohe.fit_transform(X_departure).shape
X_departure['Departure'].unique().shape
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import FunctionTransformer
column_name_ohe = ['Departure', 'Arrival']
processing_ohe = OneHotEncoder()
column_name_numerical = ['WeeksToDeparture', 'std_wtd']
processing_numerical = StandardScaler()
def func_preprocessing_date(column):
column_date = pd.to_datetime(column.iloc[:, 0])
weekday = column_date.dt.weekday.values[:, np.newaxis]
weeknumber = column_date.dt.week.values[:, np.newaxis]
return np.concatenate([weekday, weeknumber], axis=1)
column_date = ['DateOfDeparture']
processing_date = FunctionTransformer(
func=func_preprocessing_date, validate=False
)
preprocessor = ColumnTransformer(
[
("airports", processing_ohe, column_name_ohe),
("wtd", processing_numerical, column_name_numerical),
("date", processing_date, column_date)
]
)
preprocessor.fit_transform(X_df)
X_transform = preprocessor.fit_transform(X_df)
reg.fit(X_transform, y)
y_pred = reg.predict(X_transform)
y_pred
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_df, y
)
X_train_transform = preprocessor.fit_transform(X_train)
reg.fit(X_train_transform, y_train)
X_test_trans = preprocessor.transform(X_test)
reg.score(X_test_trans, y_test)
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score
column_name_ohe = ['Departure', 'Arrival']
processing_ohe = OneHotEncoder()
column_name_numerical = ['WeeksToDeparture', 'std_wtd']
processing_numerical = StandardScaler()
def func_preprocessing_date(column):
column_date = pd.to_datetime(column.iloc[:, 0])
weekday = column_date.dt.weekday.values[:, np.newaxis]
weeknumber = column_date.dt.week.values[:, np.newaxis]
return np.concatenate([weekday, weeknumber], axis=1)
column_date = ['DateOfDeparture']
processing_date = FunctionTransformer(
func=func_preprocessing_date, validate=False
)
preprocessor = ColumnTransformer(
[
("airports", processing_ohe, column_name_ohe),
("wtd", processing_numerical, column_name_numerical),
("date", processing_date, column_date)
]
)
reg = RandomForestRegressor()
pipe = make_pipeline(preprocessor, reg)
scores = cross_val_score(pipe, X_df, y, cv=3,
scoring='neg_mean_squared_error')
np.sqrt(-scores)
```
| github_jupyter |
<h1> Text generation using a GRU </h1>
```
import tensorflow as tf
import pandas as pd
import numpy as np
import codecs
import re
EMBED_DIMENSION = 50
HIDDEN_SIZE = 256
with codecs.open('/tmp/kernel.txt', 'r', encoding='utf-8', errors='ignore') as kernel_file:
raw_text = kernel_file.read()
kernel_words = re.split('(\-\>)|([\-\>+\=\<\/\&\|\(\)\:\*])',raw_text)
kernel_words = [w for w in kernel_words if w is not None]
kernel_words = kernel_words[0:300000]
kernel_words = set(kernel_words)
kword_to_int = dict((word, i) for i, word in enumerate(kernel_words))
int_to_kword = dict((i, word) for i, word in enumerate(kernel_words))
v_size = len(kword_to_int)
kword_to_int['<UNK>'] = v_size
int_to_kword[v_size] = '<UNK>'
v_size += 1
X_train = [kword_to_int[word] for word in kernel_words]
y_train = X_train[1:]
y_train.append(kword_to_int['<UNK>'])
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_train = np.expand_dims(X_train,axis=1)
y_train = np.expand_dims(y_train,axis=1)
print(X_train.shape, y_train.shape)
def estimator_spec_for_generation(flayer_op, lbls, md):
preds_cls = tf.argmax(flayer_op, 1)
if md == tf.estimator.ModeKeys.PREDICT:
prev_op = tf.reshape(flayer_op, [-1, 1, v_size])[:, -1, :]
preds_op = tf.nn.softmax(prev_op)
return tf.estimator.EstimatorSpec(
mode=md,
predictions={
'preds_probs': preds_op
})
trng_loss = tf.losses.sparse_softmax_cross_entropy(labels=lbls, logits=flayer_op)
if md == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
trng_op = optimizer.minimize(trng_loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(md, loss=trng_loss, train_op=trng_op)
ev_met_ops = {'accy': tf.metrics.accuracy(labels=lbls, predictions=preds_cls)}
return tf.estimator.EstimatorSpec(md, loss=trng_loss, train_op=trng_op)
def rnn_model_fn(features, labels, mode):
embedding = tf.Variable(tf.truncated_normal([v_size, EMBED_DIMENSION],
stddev=1.0/np.sqrt(EMBED_DIMENSION)),
name="word_embeddings")
word_emb = tf.nn.embedding_lookup(embedding, features['word'])
rnn_cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
outputs, _ = tf.nn.dynamic_rnn(rnn_cell, word_emb, dtype=tf.float32)
outputs = tf.reshape(outputs, [-1, HIDDEN_SIZE])
flayer_op = tf.layers.dense(outputs, v_size, name="linear")
return estimator_spec_for_generation(flayer_op, labels, mode)
run_config = tf.contrib.learn.RunConfig()
run_config = run_config.replace(model_dir='/tmp/models/',save_summary_steps=10,log_step_count_steps=10)
generator = tf.estimator.Estimator(model_fn=rnn_model_fn,config=run_config)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'word': X_train},
y=y_train,
batch_size=1024,
num_epochs=None,
shuffle=True)
generator.train(input_fn=train_input_fn, steps=300)
maxlen = 40
next_x = X_train[0:60]
text = "".join([int_to_kword[word] for word in next_x.flatten()])
for i in range(maxlen):
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'word': next_x},
num_epochs=1,
shuffle=False)
predictions = generator.predict(input_fn=test_input_fn)
predictions = list(predictions)
word = int_to_kword[np.argmax(predictions[-1]['preds_probs'])]
text = text + word
next_x = np.concatenate((next_x,[[kword_to_int[word]]]))
next_x = next_x[1:]
print(text)
```
| github_jupyter |
# <font color = #254117>[EEP 147]: ESG Analysis Notebook - Final</font>
<div style="width:image width px; font-size:80%; text-align:center;"><img src="big_creek.jpg" alt="alternate text" width="500" height="height" style="padding-bottom:0.5em;" />Big Creek Hydroelectric Project - Southern California Edison</div>
This notebook can be utilized for analysis of the Electricity Strategy Game.
First on our agenda is to import **<font color = ##008700>dependencies</font>** -- packages in Python that add to the basic functions in Python.
```
from datascience import *
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
%matplotlib inline
import numpy as np
import pandas as pd
from ipywidgets import interact, interactive, Dropdown, IntSlider, BoundedFloatText
import ipywidgets as widgets
from functools import partial
from IPython.display import display
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = [10,6]
import warnings
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
```
The variable **current_period** should contain the current round.
The variable **pab_periods** should contain each of the periods for which there was or will be a pay-as-bid auction. This shouldn't change.
```
current_period = 1
pab_periods = [1]
```
Next we import Demand (realized and forecasted), Bids, Porfolios, and the Auction results.
```
demand_table = Table.read_table('demand.csv')
bids_mc = Table.read_table('Bids/MC_bids.csv').sort('PORTFOLIO')
ESG = Table.read_table('ESGPorfolios.csv')
#auction_results = Table.read_table('portfolio_auction.csv')
```
In the following cell we will join the tables based on the column **Plant_ID**. We will incorporate the actual bids of the rounds completed.
```
def get_bids(section):
bids_all = bids_mc.copy()
if (current_period > 0) & (current_period < 8):
bids_all_df = bids_all.sort("PLANT_ID").to_df()
if current_period > 0:
try:
bids_practice = Table.read_table('Bids/' + section + '_bids_0.csv').sort('PORTFOLIO')
bids_practice_df = bids_practice.sort("PLANT_ID").to_df()
bids_all_df.loc[bids_all_df["PERIOD"] == 0] = bids_practice_df.loc[bids_practice_df["PERIOD"] == 0].values
except:
pass
if current_period > 1:
bids_actual = Table.read_table('Bids/' + section + '_bids_' + str(current_period - 1) + '.csv').sort('PORTFOLIO')
bids_actual_df = bids_actual.sort("PLANT_ID").to_df()
for period_i in range(1,current_period):
bids_all_df.loc[bids_all_df["PERIOD"] == period_i] = bids_actual_df.loc[bids_actual_df["PERIOD"] == period_i].values
bids_all = Table.from_df(bids_all_df)
joined_table_all = bids_all.join("PLANT_ID", ESG, "Plant_ID").sort("PLANT_ID")
return(joined_table_all)
```
Define helper functions
```
energy_colors_dict = {'Bay Views' : '#EC5F67', 'Beachfront' : '#F29056', 'Big Coal' : '#F9C863', 'Big Gas' : '#99C794',
'East Bay' : '#5FB3B3', 'Fossil Light' : '#6699CC', 'Old Timers' : '#C594C5'}
def demand_calc(hour, period, demand_sp):
demand = demand_table.where("round", period).where("hour", hour)["load"].item()
if np.abs(demand_sp) <= 1:
demand *= (1 + demand_sp)
else:
demand = demand_sp
return(demand)
def price_calc(input_table, demand, hour, period):
#hour and period determine which bids are taken from joined_table
joined_table = input_table.copy()
sorted_table = joined_table.where("PERIOD", period).sort("PRICE" + str(hour), descending = False)
price = 0
sum_cap = 0
for i in range(0,len(sorted_table['Capacity_MW'])):
if sum_cap + sorted_table['Capacity_MW'][i] >= demand:
price = sorted_table['PRICE' + str(hour)][i]
break
else:
sum_cap += sorted_table['Capacity_MW'][i]
price = sorted_table['PRICE' + str(hour)][i]
return price
def find_x_pos(widths):
cumulative_widths = [0]
cumulative_widths.extend(np.cumsum(widths))
half_widths = [i/2 for i in widths]
x_pos = []
for i in range(0, len(half_widths)):
x_pos.append(half_widths[i] + cumulative_widths[i])
return x_pos
def price_line_plot(price):
plt.axhline(y=price, color='r', linewidth = 2)
def demand_plot(demand):
plt.axvline(x=demand, color='r', linewidth = 2)
def adjust_by_cp(input_table, hour, period, carbon_price):
joined_table = input_table.copy()
joined_table["Var_Cost_USDperMWH"] += carbon_price * joined_table["Carbon_tonsperMWH"]
if (period >= current_period) | (current_period == 8):
joined_table["PRICE" + str(hour)] += carbon_price * joined_table["Carbon_tonsperMWH"]
return(joined_table)
def user_defined_bids(input_table, hour, period, my_portfolio, def_my_bids, def_others_bids):
joined_table = input_table.copy()
joined_df = joined_table.to_df()
if def_my_bids:
joined_df.loc[(joined_df["Group"] == my_portfolio) &
(joined_df["PERIOD"] == period),
"PRICE" + str(hour)] = list(globals()["bids_" + my_portfolio.replace(" ", "").lower()].values())
if def_others_bids:
for group in set(joined_table['Group']):
if group != my_portfolio:
joined_df.loc[(joined_df["Group"] == group) &
(joined_df["PERIOD"] == period),
"PRICE" + str(hour)] = list(globals()["bids_" + group.replace(" ", "").lower()].values())
joined_table = Table.from_df(joined_df)
return(joined_table)
def profit_calc(input_table, hour, period, demand, price, my_portfolio):
if period in pab_periods:
return(profit_pab(input_table, hour, period, demand, price, my_portfolio))
sorted_joined_table = input_table.copy()
nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"])
marg_demand = demand - nonmarg_capacity
marg_proportion = marg_demand / marg_capacity
sorted_table = sorted_joined_table.where("Group", my_portfolio)
capacity_subset = sum(sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
capacity_subset += sum(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"] * marg_proportion)
revenue = capacity_subset * price
cost = 0
for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])):
cost += sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"][i]\
* sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i]
for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])):
cost += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"][i]\
* (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion)
return revenue - cost
def profit_pab(input_table, hour, period, demand, price, my_portfolio):
sorted_joined_table = input_table.copy()
nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"])
marg_demand = demand - nonmarg_capacity
marg_proportion = marg_demand / marg_capacity
sorted_table = sorted_joined_table.where("Group", my_portfolio)
revenue = 0
for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))['PRICE' + str(hour)])):
revenue += sorted_table.where('PRICE' + str(hour), are.below(price))['PRICE' + str(hour)][i]\
* sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i]
for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))['PRICE' + str(hour)])):
revenue += sorted_table.where('PRICE' + str(hour), are.equal_to(price))['PRICE' + str(hour)][i]\
* (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion)
cost = 0
for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])):
cost += sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"][i]\
* sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i]
for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])):
cost += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"][i]\
* (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion)
return revenue - cost
def emissions_calc(input_table, hour, period, demand, price, my_portfolio):
sorted_joined_table = input_table.copy()
nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"])
marg_demand = demand - nonmarg_capacity
marg_proportion = marg_demand / marg_capacity
sorted_table = sorted_joined_table.where("Group", my_portfolio)
emissions = 0
for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])):
emissions += sorted_table.where('PRICE' + str(hour), are.below(price))["Carbon_tonsperMWH"][i]\
* sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i]
for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])):
emissions += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Carbon_tonsperMWH"][i]\
* (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion)
return emissions
def market_plot(input_table, hour, period, demand, price):
sorted_joined_table = input_table.copy()
width = sorted_joined_table.column("Capacity_MW")
height = sorted_joined_table.column('PRICE' + str(hour))
x_vals = find_x_pos(width)
colors_mapped = list(pd.Series(sorted_joined_table['Group']).map(energy_colors_dict))
sorted_joined_table = sorted_joined_table.with_column('Color', colors_mapped)
group_colors = sorted_joined_table.group("Group", lambda x: x).select("Group", "Color")
group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color")
#prepare the Marginal Cost to be a dashed line
num_plants = len(width)
height_mc = sorted_joined_table.column("Var_Cost_USDperMWH")
x_vec = np.zeros(num_plants * 2)
h_vec = np.zeros(num_plants * 2)
for i, (w, h) in enumerate(zip(width, height_mc)):
h_vec[2*i] = h
h_vec[2*i+1] = h
if i == 0:
x_vec[1] = w
else:
x_vec[2*i] = x_vec[2*i - 1]
x_vec[2*i + 1] = x_vec[2*i] + w
# Make the plot
plt.figure(figsize=(9,6))
plt.bar(x_vals, height, width=width, color=sorted_joined_table['Color'], edgecolor = "black")
plt.bar(x_vals, height_mc, width=width, color=sorted_joined_table['Color'], edgecolor = "black", alpha=.2)
line_mc = plt.plot(x_vec, h_vec, '--k', label='Marginal Cost', linewidth=2)
patches = []
for row in group_colors.rows:
patches += [mpatches.Patch(color=row.item("Color"), label=row.item("Group"))]
patches += line_mc
plt.legend(handles=patches, bbox_to_anchor=(1.1,1))
plt.title('Energy Market')
plt.xlabel('Capacity_MW')
plt.ylabel('Price')
price_line_plot(price)
demand_plot(demand)
plt.show()
def portfolio_plot(input_table, hour, period, demand, price, my_portfolio):
sorted_joined_table = input_table.copy()
your_source = sorted_joined_table.where("Group", my_portfolio)
width_yours = your_source.column("Capacity_MW")
height_yours = your_source.column('PRICE' + str(hour))
new_x_yours = find_x_pos(width_yours)
label_yours = your_source.column("PLANT")
colors_mapped = list(pd.Series(sorted_joined_table['Group']).map(energy_colors_dict))
sorted_joined_table = sorted_joined_table.with_column('Color', colors_mapped)
group_colors = sorted_joined_table.group("Group", lambda x: x).select("Group", "Color")
group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color")
#prepare the Marginal Cost to be a dashed line
num_plants = len(width_yours)
height_mc = your_source.column("Var_Cost_USDperMWH")
x_vec = np.zeros(num_plants * 2)
h_vec = np.zeros(num_plants * 2)
for i, (w, h) in enumerate(zip(width_yours, height_mc)):
h_vec[2*i] = h
h_vec[2*i+1] = h
if i == 0:
x_vec[1] = w
else:
x_vec[2*i] = x_vec[2*i - 1]
x_vec[2*i + 1] = x_vec[2*i] + w
# Make the plot
plt.figure(figsize=(11,6))
plt.bar(new_x_yours, height_yours, width=width_yours,
color = energy_colors_dict[my_portfolio], edgecolor = "black")
plt.bar(new_x_yours, height_mc, width=width_yours,
color = energy_colors_dict[my_portfolio], edgecolor = "black", alpha=.2)
line_mc = plt.plot(x_vec, h_vec, '--k', label='Marginal Cost', linewidth=2)
plt.title("Bids: " + my_portfolio)
plt.xlabel('Capacity_MW')
plt.ylabel('Price')
for new_x_i, height_i, label_i in zip(new_x_yours, height_yours, label_yours):
plt.text(new_x_i, height_i, label_i, ha='center', va='bottom', fontsize=8)
price_line_plot(price)
#the marginal plants should indicate how much capacity they produce
nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"])
marg_demand = demand - nonmarg_capacity
marg_proportion = marg_demand / marg_capacity
curr_capacity = 0
for i, (w, h) in enumerate(zip(width_yours, height_yours)):
if h == price:
x_val = curr_capacity + (w * marg_proportion)
x_vec = [x_val, x_val]
h_vec = [0, h]
plt.plot(x_vec, h_vec, '--k', linewidth=1)
elif h > price:
break
curr_capacity += w
plt.axvline(x=curr_capacity, color='k', linewidth = 2)
plt.show()
def total_profits(section, my_portfolio, carbon_price_vec):
# Merge auction results with input table
portfolio_profit_dict = {}
#auction_results_section = auction_results.where("world_id", section)
if current_period > 1:
joined_table = get_bids(section)
# full_table = joined_table.join("TEAM", auction_results_section, "team")
portfolio_table = full_table.where("Group", my_portfolio)
portfolio_profit = -portfolio_table.where("PERIOD", 1).to_df().loc[0, "adjustment"] * 1.05**(current_period-2)
for period_i in range(1, current_period):
portfolio_table_period = joined_table.where("PERIOD", period_i).where("Group", my_portfolio)
portfolio_profit_period = -sum(portfolio_table_period["FixedCst_OandM_perRound"])
carbon_price = carbon_price_vec[period_i - 1]
for hour_i in range(1, 5):
demand = demand_calc(hour_i, period_i, 0)
joined_table = adjust_by_cp(joined_table, hour_i, period_i, carbon_price)
sorted_joined_table = joined_table.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False)
price = price_calc(sorted_joined_table, demand, hour_i, period_i)
portfolio_profit_period += profit_calc(sorted_joined_table, hour_i, period_i, demand, price, my_portfolio)
portfolio_profit_dict['Round ' + str(period_i)] = portfolio_profit_period
portfolio_profit += portfolio_profit_period * 1.05**(current_period - period_i - 1)
else:
portfolio_profit = 0
portfolio_profit_dict['Total'] = portfolio_profit
output_df = pd.DataFrame.from_dict(portfolio_profit_dict,
orient = 'index', columns = [my_portfolio + ' Profit']).round().astype(int)
return output_df
def total_emissions(section, my_portfolio):
portfolio_emissions_dict = {}
if current_period > 1:
joined_table = get_bids(section)
portfolio_emissions = 0
for period_i in range(1, current_period):
portfolio_emissions_period = 0
for hour_i in range(1, 5):
demand = demand_calc(hour_i, period_i, 0)
sorted_joined_table = joined_table.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False)
price = price_calc(sorted_joined_table, demand, hour_i, period_i)
portfolio_emissions_period += emissions_calc(sorted_joined_table, hour_i, period_i, demand, price, my_portfolio)
portfolio_emissions_dict['Round ' + str(period_i)] = portfolio_emissions_period
portfolio_emissions += portfolio_emissions_period
else:
portfolio_emissions = 0
portfolio_emissions_dict['Total'] = portfolio_emissions
output_df = pd.DataFrame.from_dict(portfolio_emissions_dict,
orient = 'index', columns = [my_portfolio + ' Emissions']).round().astype(int)
return output_df
```
Here is the main wrapper function
```
def all_output(section, hour, period, my_portfolio, demand_sp, carbon_p4, carbon_p5, carbon_p6, def_my_bids, def_others_bids):
print('')
#print that the current period is a pay-as-bid auction if it is
if period in pab_periods:
print('\033[1mNote:\033[0;0m The current period is a pay-as-bid auction.')
print('')
#print demand
demand = demand_calc(hour, period, demand_sp)
print("Demand: " + str(round(demand, 2)))
#print price
joined_table = get_bids(section)
carbon_price_vec = [0, 0, 0, carbon_p4, carbon_p5, carbon_p6]
carbon_price = carbon_price_vec[period - 1]
joined_table = adjust_by_cp(joined_table, hour, period, carbon_price)
joined_table = user_defined_bids(joined_table, hour, period, my_portfolio, def_my_bids, def_others_bids)
sorted_joined_table = joined_table.where("PERIOD", period).sort("PRICE" + str(hour), descending = False)
price = price_calc(sorted_joined_table, demand, hour, period)
print("Price: $" + str(price))
#print profits and emissions
my_profit = profit_calc(sorted_joined_table, hour, period, demand, price, my_portfolio)
print(my_portfolio + ' Profit: $' + str(round(my_profit, 2)))
my_emissions = emissions_calc(sorted_joined_table, hour, period, demand, price, my_portfolio)
print(my_portfolio + ' Emissions: ' + str(round(my_emissions, 2)) + ' Tons CO2')
#produce plots
market_plot(sorted_joined_table, hour, period, demand, price)
portfolio_plot(sorted_joined_table, hour, period, demand, price, my_portfolio)
#the marginal plants should indicate how much capacity they produce
nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"])
marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"])
marg_demand = demand - nonmarg_capacity
marg_proportion = marg_demand / marg_capacity
#display information about plants
display_bids = sorted_joined_table.where("Group", my_portfolio).to_df()
display_bids.rename(columns = {'PLANT':'Plant', 'Var_Cost_USDperMWH':'Adjusted MC', 'PRICE' + str(hour):'Bid',
'Capacity_MW':'Capacity'}, inplace = True)
display_bids['Output'] = np.where(display_bids['Bid'] < price, display_bids['Capacity'],
np.where(display_bids['Bid'] == price, display_bids['Capacity'] * marg_proportion,
0)).round(1)
display_bids.set_index(keys = 'Plant', inplace = True)
display_bids.index.name = None
display(display_bids[['Adjusted MC', 'Bid', 'Capacity','Output']])
```
In the next cell, we can define the bids.
```
bids_bigcoal = {'fourcorners' : 36.5, 'alamitos7' : 73.72, 'huntingtonbeach1_2' : 40.5, 'huntingtonbeach5' : 66.5,
'redondo5_6' : 41.94, 'redondo7_8' : 41.94}
bids_biggas = {'elsegundo1_2' : 44.83, 'elsegundo3_4' : 41.22, 'longbeach' : 52.5, 'northisland' : 65.5,
'encina' : 41.67, 'kearny' : 90.06, 'southbay' : 43.83}
bids_bayviews = {'morrobay1_2' : 38.78, 'morrobay3_4' : 36.61, 'mosslanding6' : 32.56, 'mosslanding7' : 32.56,
'oakland' : 61.17}
bids_beachfront = {'coolwater' : 42.39, 'etiwanda1_4' : 42.67, 'etiwanda5' : 62.89, 'ellwood' : 75.61, 'mandalay1_2' : 39.06,
'mandalay3' : 52.06, 'ormondbeach1' : 38.06, 'ormondbeach2' : 38.06}
bids_eastbay = {'pittsburgh1_4' : 40.94, 'pittsburgh5_6' : 36.61, 'pittsburgh7' : 59.72, 'contracosta4_5' : 58.28,
'contracosta6_7' : 39.5, 'potrerohill' : 69.83}
bids_oldtimers = {'bigcreek' : 0, 'mohave1' : 34.5, 'mohave2' : 34.5, 'highgrove' : 49.61, 'sanbernadino' : 53.94}
bids_fossillight = {'humboldt' : 47.44, 'helms' : 0.5, 'hunterspoint1_2' : 49.17, 'hunterspoint4' : 75.89,
'diablocanyon1' : 11.5}
```
The next cell runs everything.
Assign **section** to the section code (including quotes) that corresponds to your own according to the following table.
| Code | Section Time |
|---------|---------------|
| "W8" | Wednesday 8am or Thursday 5pm |
| "W9" | Wednesday 9am |
| "R4" | Thursday 4pm |
**Widget Dictionary**:
**Section**: Section of the ESG game in which you are participating (defined by the table above).
**Hour**: Hour within the current round (ranges from 1 to 4).
**Period**: Round number (ranges from 0 to 6).
**my_portfolio**: Team portfolio of interest.
**demand_sp**: Adjustment to forecasted demand (or realized demand in past rounds). If value is between -1 and 1, gives a percentage change from forecasted demand. If value is greater than 1, gives a new value for demand in MWh.
For example, a value of 0.05 will assign demand to be (forecasted demand times 1.05). A value of 15000 will assign demand to be 15,000 MWh.
**carbon_pX**: Assigns a carbon price in period X.
**def_my_bids**: If TRUE, then allows you to alter the bids for the portfolio selected in **my_portfolio**. Alteration of bids can occur in the code in block 13 (above).
**def_others_bids**: If TRUE, then allows you to alter the bids of the portfolios not selected in **my_portfolio**. Alteration of bids can occur in the code in block 13 (above).
```
interact(lambda section, hour, period, my_portfolio, demand_sp, carbon_p4, carbon_p5, carbon_p6, def_my_bids, def_others_bids:
all_output(section = section,
hour = hour,
period = period,
my_portfolio = my_portfolio,
#demand_sp = 0 uses realized demand for past rounds, forecasted demand for future rounds
#abs(demand_sp) <= 1 will use a percent change in demand
# (e.g. demand_sp = -.03 will cause a 3% reduction in demand by multiplying demand by .97)
#demand_sp > 1 will give a new value for demand.
# (e.g. demand_sp = 10000 will give 10000 demand)
demand_sp = demand_sp,
#Changing the carbon price will automatically adjust MC.
carbon_p4 = carbon_p4,
carbon_p5 = carbon_p5,
carbon_p6 = carbon_p6,
def_my_bids = def_my_bids,
def_others_bids = def_others_bids),
section = Dropdown(options=['W8','W9','R4']),
hour = Dropdown(options=list(range(1,5))),
period = Dropdown(value = min(current_period, 6), options=list(range(0,7))),
my_portfolio = Dropdown(options=np.unique(ESG["Group"])),
demand_sp = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
carbon_p4 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01),
carbon_p5 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01),
carbon_p6 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01),
def_my_bids = Dropdown(options=[False, True]),
def_others_bids = Dropdown(options=[False, True]))
print('')
```
Finally, let's predict emissions under competitive bidding with the given carbon price.
```
def predicted_emissions_456(section, D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4,
D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4, carbon_p4, carbon_p5, carbon_p6):
demand_sp_vec = [D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4,
D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4]
carbon_price_vec = [0, 0, 0, carbon_p4, carbon_p5, carbon_p6]
emissions_dict = {'Round 4':{}, 'Round 5':{}, 'Round 6':{}, 'Total':{}}
joined_table = get_bids(section)
total_emissions = 0
portfolio_emissions = 0
for period_i in range(4, 7):
total_emissions_period = 0
carbon_price = carbon_price_vec[period_i - 1]
for hour_i in range(1, 5):
if (period_i < current_period) & (current_period < 8):
joined_table_adj = user_defined_bids(joined_table, hour_i, period_i,
my_portfolio = 'Big Coal',
def_my_bids = False, def_others_bids = False)
else:
joined_table_adj = adjust_by_cp(joined_table, hour_i, period_i, carbon_price)
demand_sp = demand_sp_vec[4*(period_i - 4) + (hour_i - 1)]
demand = demand_calc(hour_i, period_i, demand_sp)
sorted_joined_table = joined_table_adj.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False)
price = price_calc(sorted_joined_table, demand, hour_i, period_i)
for group in np.unique(ESG["Group"]):
emissions_i = emissions_calc(sorted_joined_table, hour_i, period_i, demand, price, group)
total_emissions_period += emissions_i
if group not in emissions_dict['Round ' + str(period_i)].keys():
emissions_dict['Round ' + str(period_i)][group] = emissions_i
else:
emissions_dict['Round ' + str(period_i)][group] += emissions_i
if group not in emissions_dict['Total'].keys():
emissions_dict['Total'][group] = emissions_i
else:
emissions_dict['Total'][group] += emissions_i
emissions_dict['Round ' + str(period_i)]['Total Emissions'] = total_emissions_period
total_emissions += total_emissions_period
emissions_dict['Total']['Total Emissions'] = total_emissions
output_df = pd.DataFrame(emissions_dict).round(2)
return(output_df)
#function to get emissions across sections
def predicted_emissions_all_456(D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4,
D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4, carbon_p4, carbon_p5, carbon_p6):
#need two dictionaries: one for the dataframe for all sections, one to store individual section dataframes
emissions_dict_all = {'Round 4':{}, 'Round 5':{}, 'Round 6':{}, 'Total':{}}
predicted_emissions_dfs = {}
for section in ['W8','W9','R4']:
predicted_emissions_df = predicted_emissions_456(section = section,
D_R4_H1 = D_R4_H1,
D_R4_H2 = D_R4_H2,
D_R4_H3 = D_R4_H3,
D_R4_H4 = D_R4_H4,
D_R5_H1 = D_R5_H1,
D_R5_H2 = D_R5_H2,
D_R5_H3 = D_R5_H3,
D_R5_H4 = D_R5_H4,
D_R6_H1 = D_R6_H1,
D_R6_H2 = D_R6_H2,
D_R6_H3 = D_R6_H3,
D_R6_H4 = D_R6_H4,
carbon_p4 = carbon_p4,
carbon_p5 = carbon_p5,
carbon_p6 = carbon_p6)
#add section dataframe to the dfs dictionary
predicted_emissions_dfs[section] = predicted_emissions_df
#get the emissions from each round by section for the total dataframe
for period_i in range(4, 7):
emissions_dict_all['Round ' + str(period_i)][section] = predicted_emissions_df.loc['Total Emissions',
'Round ' + str(period_i)]
emissions_dict_all['Total'][section] = predicted_emissions_df.loc['Total Emissions', 'Total']
#create dataframe for all sections
emissions_df = pd.DataFrame(emissions_dict_all).round(2)
emissions_df.loc['Total Emissions',:] = emissions_df.sum(axis = 0)
emissions_df.columns.name = 'All'
display(emissions_df)
#print individual section dataframes
for section in ['W8','W9','R4']:
print('')
predicted_emissions_df = predicted_emissions_dfs[section]
predicted_emissions_df.columns.name = section
display(predicted_emissions_df)
interact(lambda D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4,
D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4, carbon_p4, carbon_p5, carbon_p6:
predicted_emissions_all_456(D_R4_H1 = D_R4_H1,
D_R4_H2 = D_R4_H2,
D_R4_H3 = D_R4_H3,
D_R4_H4 = D_R4_H4,
D_R5_H1 = D_R5_H1,
D_R5_H2 = D_R5_H2,
D_R5_H3 = D_R5_H3,
D_R5_H4 = D_R5_H4,
D_R6_H1 = D_R6_H1,
D_R6_H2 = D_R6_H2,
D_R6_H3 = D_R6_H3,
D_R6_H4 = D_R6_H4,
carbon_p4 = carbon_p4,
carbon_p5 = carbon_p5,
carbon_p6 = carbon_p6),
D_R4_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R4_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R4_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R4_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R5_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R5_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R5_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R5_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R6_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R6_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R6_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
D_R6_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001),
carbon_p4 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01),
carbon_p5 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01),
carbon_p6 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01))
print('')
```
Thanks for help from: Alec Kan, Alma Pineda, Aarish Irfan, Elaine Chien, Octavian Sima, and Eric Van Dusen.
| github_jupyter |
# Getting Data Ready
Forecasting is used in a variety of applications and business use cases: For example, retailers need to forecast the sales of their products to decide how much stock they need by location, Manufacturers need to estimate the number of parts required at their factories to optimize their supply chain, Businesses need to estimate their flexible workforce needs, Utilities need to forecast electricity consumption needs in order to attain an efficient energy network, and enterprises need to estimate their cloud infrastructure needs.
<img src="https://amazon-forecast-samples.s3-us-west-2.amazonaws.com/common/images/forecast_overview_steps.png" width="98%">
In this notebook we will be walking through the first steps outlined in left-box above.
## Table Of Contents
* Step 1: [Setup Amazon Forecast](#setup)
* Step 2: [Prepare the Datasets](#DataPrep)
* Step 3: [Create the Dataset Group and Dataset](#DataSet)
* Step 4: [Create the Target Time Series Data Import Job](#DataImport)
* [Next Steps](#nextSteps)
For more informations about APIs, please check the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/what-is-forecast.html)
## Step 1: Setup Amazon Forecast<a class="anchor" id="setup"></a>
This section sets up the permissions and relevant endpoints.
```
!pip install boto3 --upgrade!pip install pandas s3fs matplotlib ipywidgets
import sys
import os
import pandas as pd
# importing forecast notebook utility from notebooks/common directory
sys.path.insert( 0, os.path.abspath("../../common") )
import util
%reload_ext autoreload
import boto3
import s3fs
```
Configure the S3 bucket name and region name for this lesson.
- If you don't have an S3 bucket, create it first on S3.
- Although we have set the region to us-west-2 as a default value below, you can choose any of the regions that the service is available in.
```
region = 'us-west-2'
bucket_name = 'forecast-demo-uci-electricity'
# Connect API session
session = boto3.Session(region_name=region)
forecast = session.client(service_name='forecast')
forecastquery = session.client(service_name='forecastquery')
```
<b>Create IAM Role for Forecast</b> <br>
Like many AWS services, Forecast will need to assume an IAM role in order to interact with your S3 resources securely. In the sample notebooks, we use the get_or_create_iam_role() utility function to create an IAM role. Please refer to "notebooks/common/util/fcst_utils.py" for implementation.
```
# Create the role to provide to Amazon Forecast.
role_name = "ForecastNotebookRole-Basic"
print(f"Creating Role {role_name} ...")
role_arn = util.get_or_create_iam_role( role_name = role_name )
# echo user inputs without account
print(f"Success! Created role arn = {role_arn.split('/')[1]}")
```
The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that.
```
# check you can communicate with Forecast API session
forecast.list_predictors()
```
## Step 2: Prepare the Datasets<a class="anchor" id="DataPrep"></a>
For this exercise, we use the individual household electric power consumption dataset. (Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.) We aggregate the usage data hourly.
To begin, use Pandas to read the CSV and to show a sample of the data.
```
df = pd.read_csv("../../common/data/item-demand-time.csv", dtype = object, names=['timestamp','value','item'])
df.head(3)
```
Notice in the output above there are 3 columns of data:
1. The Timestamp
1. A Value
1. An Item ID
These are the 3 key required pieces of information to generate a forecast with Amazon Forecast. More can be added but these 3 must always remain present.
The dataset happens to span January 01, 2014 to Deceber 31, 2014. We are only going to use January to October to train Amazon Forecast.
You may notice a variable named `df` this is a popular convention when using Pandas if you are using the library's dataframe object, it is similar to a table in a database. You can learn more here: https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html
```
# Select January to October for one dataframe.
jan_to_oct = df[(df['timestamp'] >= '2014-01-01') & (df['timestamp'] < '2014-11-01')]
print(f"min timestamp = {jan_to_oct.timestamp.min()}")
print(f"max timestamp = {jan_to_oct.timestamp.max()}")
# save an item_id for querying later
item_id = "client_12"
```
Now export them to CSV files and place them into your `data` folder.
```
jan_to_oct.to_csv("data/item-demand-time-train.csv", header=False, index=False)
```
We will now export a second dataset to CSV this time including November 1st. This extra day will be used to validate our forecast.
```
validation = df[(df['timestamp'] >= '2014-01-01') & (df['timestamp'] < '2014-11-02')]
validation.to_csv("data/item-demand-time-validation.csv", header=False, index=False)
```
At this time the data is ready to be sent to S3 where Forecast will use it later. The following cells will upload the data to S3.
```
key="elec_data/item-demand-time-train.csv"
boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file("data/item-demand-time-train.csv")
```
## Step 3: Create the Dataset Group and Dataset <a class="anchor" id="DataSet"></a>
In Amazon Forecast , a dataset is a collection of file(s) which contain data that is relevant for a forecasting task. A dataset must conform to a schema provided by Amazon Forecast. Since data files are imported headerless, it is important to define a schema for your data.
More details about `Domain` and dataset type can be found on the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html) . For this example, we are using [CUSTOM](https://docs.aws.amazon.com/forecast/latest/dg/custom-domain.html) domain with 3 required attributes `timestamp`, `target_value` and `item_id`.
Next, you need to make some choices.
<ol>
<li><b>How many time units do you want to forecast?</b>. For example, if your time unit is Hour, then if you want to forecast out 1 week, that would be 24*7 = 168 hours, so answer = 168. </li>
<li><b>What is the time granularity for your data?</b>. For example, if your time unit is Hour, answer = "H". </li>
<li><b>Think of a name you want to give this project (Dataset Group name)</b>, so all files will have the same names. You should also use this same name for your Forecast DatasetGroup name, to set yourself up for reproducibility. </li>
</ol>
```
# what is your forecast horizon in number time units you've selected?
# e.g. if you're forecasting in months, how many months out do you want a forecast?
FORECAST_LENGTH = 24
# What is your forecast time unit granularity?
# Choices are: ^Y|M|W|D|H|30min|15min|10min|5min|1min$
DATASET_FREQUENCY = "H"
TIMESTAMP_FORMAT = "yyyy-MM-dd hh:mm:ss"
# What name do you want to give this project?
# We will use this same name for your Forecast Dataset Group name.
PROJECT = 'util_power_demo'
DATA_VERSION = 1
```
### Create the Dataset Group
In this task, we define a container name or Dataset Group name, which will be used to keep track of Dataset import files, schema, and all Forecast results which go together.
```
dataset_group = f"{PROJECT}_{DATA_VERSION}"
print(f"Dataset Group Name = {dataset_group}")
dataset_arns = []
create_dataset_group_response = \
forecast.create_dataset_group(Domain="CUSTOM",
DatasetGroupName=dataset_group,
DatasetArns=dataset_arns)
dataset_group_arn = create_dataset_group_response['DatasetGroupArn']
forecast.describe_dataset_group(DatasetGroupArn=dataset_group_arn)
```
### Create the Schema
```
# Specify the schema of your dataset here. Make sure the order of columns matches the raw data files.
ts_schema ={
"Attributes":[
{
"AttributeName":"timestamp",
"AttributeType":"timestamp"
},
{
"AttributeName":"target_value",
"AttributeType":"float"
},
{
"AttributeName":"item_id",
"AttributeType":"string"
}
]
}
```
### Create the Dataset
```
ts_dataset_name = f"{PROJECT}_{DATA_VERSION}"
print(ts_dataset_name)
response = \
forecast.create_dataset(Domain="CUSTOM",
DatasetType='TARGET_TIME_SERIES',
DatasetName=ts_dataset_name,
DataFrequency=DATASET_FREQUENCY,
Schema=ts_schema
)
ts_dataset_arn = response['DatasetArn']
forecast.describe_dataset(DatasetArn=ts_dataset_arn)
```
### Update the dataset group with the datasets we created
You can have multiple datasets under the same dataset group. Update it with the datasets we created before.
```
dataset_arns = []
dataset_arns.append(ts_dataset_arn)
forecast.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=dataset_arns)
```
### Step 4: Create a Target Time Series Dataset Import Job <a class="anchor" id="DataImport"></a>
Now that Forecast knows how to understand the CSV we are providing, the next step is to import the data from S3 into Amazon Forecaast.
```
# Recall path to your data
ts_s3_data_path = "s3://"+bucket_name+"/"+key
print(f"S3 URI for your data file = {ts_s3_data_path}")
ts_dataset_import_job_response = \
forecast.create_dataset_import_job(DatasetImportJobName=dataset_group,
DatasetArn=ts_dataset_arn,
DataSource= {
"S3Config" : {
"Path": ts_s3_data_path,
"RoleArn": role_arn
}
},
TimestampFormat=TIMESTAMP_FORMAT)
ts_dataset_import_job_arn=ts_dataset_import_job_response['DatasetImportJobArn']
ts_dataset_import_job_arn
```
### Stop the data import
Possibly during fine-tuning development, you'll accidentally upload data before you're ready. If you don't want to wait for the data upload and processing, there is a handy "Stop API" call.
```
# StopResource
stop_ts_dataset_import_job_arn = forecast.stop_resource(ResourceArn=ts_dataset_import_job_arn)
# Delete the target time series dataset import job
util.wait_till_delete(lambda: forecast.delete_dataset_import_job(DatasetImportJobArn=ts_dataset_import_job_arn))
```
### Do the data import again
Maybe you fixed something you forgot before, and now you're ready to really upload the data for Forecast ingestion and processing.
```
ts_dataset_import_job_response = \
forecast.create_dataset_import_job(DatasetImportJobName=dataset_group,
DatasetArn=ts_dataset_arn,
DataSource= {
"S3Config" : {
"Path": ts_s3_data_path,
"RoleArn": role_arn
}
},
TimestampFormat=TIMESTAMP_FORMAT)
ts_dataset_import_job_arn=ts_dataset_import_job_response['DatasetImportJobArn']
ts_dataset_import_job_arn
```
Check the status of dataset, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on the data size. It can take 10 mins to be **ACTIVE**. This process will take 5 to 10 minutes.
```
status = util.wait(lambda: forecast.describe_dataset_import_job(DatasetImportJobArn=ts_dataset_import_job_arn))
assert status
forecast.describe_dataset_import_job(DatasetImportJobArn=ts_dataset_import_job_arn)
```
## Next Steps<a class="anchor" id="nextSteps"></a>
At this point you have successfully imported your data into Amazon Forecast and now it is time to get started in the next notebook to build your first model. To Continue, execute the cell below to store important variables where they can be used in the next notebook, then open `2.Building_Your_Predictor.ipynb`.
```
# Now save your choices for the next notebook
%store item_id
%store PROJECT
%store DATA_VERSION
%store FORECAST_LENGTH
%store DATASET_FREQUENCY
%store TIMESTAMP_FORMAT
%store ts_dataset_import_job_arn
%store ts_dataset_arn
%store dataset_group_arn
%store role_arn
%store bucket_name
%store region
%store key
```
| github_jupyter |
## NOAO data reduction
### WESmith
MIT License
Copyright (c) 2018
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
import os
import fnmatch
import numpy as np
import pandas as pd
pd.set_option('max_rows', 32, 'max_columns', 40)
# some important fields:
important = ['DATE-OBS', 'DTCALDAT', 'DTTELESC', 'DTINSTRU',
'OBSTYPE','PROCTYPE','PRODTYPE','DTSITE', 'OBSERVAT',
'REFERENCE','FILESIZE','MD5SUM','DTACQNAM','DTPROPID',
'PI','RELEASE_DATE','RA','DEC','FOOTPRINT','FILTER',
'EXPOSURE','OBSMODE','SEEING','DEPTH','SURVEYID',
'COLLECTIONID','OBJECT','RADIUS / BOX', 'RADIUS/BOX'] # note RADIUS/BOX with and without spaces
# location of test NOAO json data
BASE = '/Users/smithw/python/noao_data/json-scrape/mtn'
# the following will depend upon the organization of NOAO data
DATE = ['{}'.format(x) for x in range(20170701, 20170726)]
# HDF5 storage of dataframe metadata from
# https://stackoverflow.com/questions/29129095/save-additional-attributes-in-pandas-dataframe/29130146#29130146
# note: needed to 'pip install --upgrade tables' for HDFStore
def h5store(filename, df, **kwargs):
store = pd.HDFStore(filename)
store.put('mydata', df)
store.get_storer('mydata').attrs.metadata = kwargs
store.close()
def h5load(store):
data = store['mydata']
metadata = store.get_storer('mydata').attrs.metadata
return data, metadata
#%%writefile ProcessJSON.txt
# to write this cell out for printing, uncomment the line above:
# otherwise leave it commented, or this cell will not compile
class ProcessJSON(object):
def __init__(self, datadir, savdir='../pydata-book/processed', file_hdr='local_file'):
self._datadir = datadir
self._savdir = savdir
self._file_hdr = file_hdr
self._txtfmt = 'DATE:{}, {} FILES' # date, number of files that date
self._savfmt = '{}/{}-processed.hdf5' # savdir, date
self._errmsg = 'ERROR: Need to run .run() method first!'
self._error_group_col = \
'ERROR: grouping column {} in file {} does not have a unique value'
self._metadata = {}
self._date = None
self._important = None # no self-importance here!
self._processed = None
self._group_col = None
self._num = None
self._num_to_read = None
self._full_dataframe = None
self._multi_group_cols = None
self._force_overwrite = False
os.makedirs(self._savdir, exist_ok=True)
def _get_file_list(self):
file_list = []
for dirpath, dirs, files in os.walk(os.path.join(self._datadir, self._date)):
for filename in fnmatch.filter(files, '*.json'):
file_list.append(os.path.join(dirpath, filename))
self._num = min(len(file_list), self._num_to_read)
self._file_list = file_list
def _process(self):
'''process group of json files , save dataframe to disk'''
self._get_file_list()
print('processing ', self._txtfmt.format(self._date, self._num))
# if important keys are provided, make a dummy starting dataframe with those keys
dd = [] if self._important == None else [pd.DataFrame(columns=self._important)]
for k in range(self._num):
jj = pd.read_json(self._file_list[k])
# verify the grouping-column value is unique and not missing
# in this file across the HDUs, otherwise assert an error;
# TODO: make this a try/except: save bad filenames and keep moving
assert jj[self._group_col].nunique() == 1, \
self._error_group_col.format(self._group_col, self._file_list[k])
# if existing and unique, broadcast the grouping-column value
# to the entire grouping column: this is required for proper grouping later;
# usually grouping column is 'DTINSTRU', the instrument name
jj[self._group_col] = jj[self._group_col].dropna().iloc[0]
# add the file-name column to the dataframe:
# this is required for grouping HDUs by filename
jj[self._file_hdr] = self._file_list[k][47:]
dd.append(jj)
# if important keys are provided, cull the dataframe with those keys:
# do this AFTER concat with dummy frame with all the important keys,
# otherwise smaller frames may not have all of the keys
self._processed = pd.concat(dd) if self._important == None else \
pd.concat(dd)[self._important]
self._metadata['num_files'] = self._num
self._metadata['date_record'] = self._date
h5store(self._savfile, self._processed, **self._metadata)
def _get_data(self, date):
self._date = date
self._savfile = self._savfmt.format(self._savdir, self._date)
if (os.path.isfile(self._savfile) and not self._force_overwrite):
print('reading {} from disk'.format(self._savfile))
with pd.HDFStore(self._savfile) as store:
self._processed, self._metadata = h5load(store)
else:
self._process()
def run(self, date_range, group_col='DTINSTRU', important=None,
num_to_read=None, force_overwrite=False):
'''
date_range: list of date directories to read
group_col: column name on which to group: usually 'DTINSTRU'
important: list of columns to keep in processed dataframes
num_to_read: number of files to read
(default: read all files in each date directory)
force_overwrite: if processed dataframe exists on disk,
overwrite if True (default: False)
'''
raw = []
self._num_to_read = np.iinfo(np.int32).max if num_to_read == None else \
num_to_read
self._group_col = group_col
self._multi_group_cols = [self._group_col, self._file_hdr]
# add file-header column for filename:
# important not to use append() method here: it breaks things
self._important = important + [self._file_hdr]
self._force_overwrite = force_overwrite
# ensure date_range is a list if a scalar is input
date_range = date_range if isinstance(date_range, list) else [date_range]
for k in date_range:
self._get_data(k)
raw.append(self._processed)
self._full_dataframe = pd.concat(raw)
@property
def get_full_dataframe(self):
assert self._full_dataframe is not None, self._errmsg
return self._full_dataframe
@property
def get_instr_vs_fields_unique_all_data(self):
# TODO: this needs to be generalized so user can define the top rows
# for display:
# this will be broken when the 'important' list changes
gg = self.get_full_dataframe.groupby(self._group_col).nunique().T
indx = list(gg.index)
# reorder rows to get similar rows at top for direct comparison
indx = [self._file_hdr,'DTACQNAM'] + indx[:12] + indx[13:-1]
return gg.loc[indx,:]
@property
def get_HDU_uniqueness_per_file(self):
gg = self.get_full_dataframe.groupby(self._multi_group_cols).nunique()
# drop corrupted (by nunique()) grouping columns
gg = gg.drop(self._multi_group_cols, axis=1)
# reset index, drop unnecessary local_file column
gg.reset_index().drop(self._multi_group_cols[1], axis=1)
return gg.groupby(self._multi_group_cols[0]).\
agg(['min','max','mean','std']).round(2).stack().T
@property
def get_all_fields(self):
return self._important
@property
def get_HDU_stats(self):
gg = self.get_full_dataframe.groupby(self._multi_group_cols).size()
return gg.groupby(self._group_col).agg(['min','max','mean','std']).\
rename_axis('HDU stats:', axis=1)
def get_num_files_writing_fields(self, instr=True, percent=True):
'''
instr: if True, list percentages (or raw numbers) of files per
instrument that write each field, if False list total
number of files (or percentages) over ALL instruments
(default=True)
percent: if True, list percentages of files that write each field,
if False, list raw numbers of files (default=True)
'''
zz = self.get_full_dataframe.groupby(self._multi_group_cols).nunique() > 0
if not instr:
gg = zz.sum()
return (gg/gg[self._file_hdr]*100).round(2) if percent else gg
else:
gg = zz.drop(['DTINSTRU'], axis=1).\
rename(columns={self._file_hdr:'COUNT'}).\
reset_index().drop(self._file_hdr, axis=1)
gg = gg.groupby('DTINSTRU').sum().T
return (gg/gg.loc['COUNT']*100).round(2) if percent else gg
def get_unique_values_of_field(self, field):
return list(self.get_full_dataframe[field].dropna().unique())
def get_num_unique_values_by_keys(self, field1, field2):
gg = self.get_full_dataframe.groupby([field1, field2]).nunique()
return pd.DataFrame(gg.loc[:, self._file_hdr]).rename(columns=\
{self._file_hdr:'TOTAL OCCURRENCES'})
proc = ProcessJSON(BASE)
dates = DATE
num = None #100 # 'None' to get all files
force_overwrite = False
proc.run(dates, important=important, group_col='DTINSTRU', num_to_read=num, force_overwrite=force_overwrite)
```
## TESTING
```
proc.get_unique_values_of_field('DTINSTRU')
aa = proc.get_full_dataframe.copy() # make copy to experiment: without copying, it is a VIEW (ie, a pointer)
bb = proc.get_instr_vs_fields_unique_all_data
cc = proc.get_HDU_uniqueness_per_file
dd = proc.get_all_fields # list
ee = proc.get_HDU_stats
ff = proc.get_unique_values_of_field('OBSTYPE')
gg1 = proc.get_num_unique_values_by_keys('DTINSTRU', 'OBSTYPE')
gg2 = proc.get_num_unique_values_by_keys('DTINSTRU', 'FILTER')
gg3 = proc.get_num_unique_values_by_keys('DTTELESC','DTINSTRU')
gg4 = proc.get_num_unique_values_by_keys('DTINSTRU', 'DTCALDAT')
hh1 = proc.get_num_files_writing_fields(instr=True, percent=True)
hh2 = proc.get_num_files_writing_fields(instr=True, percent=False)
hh3 = proc.get_num_files_writing_fields(instr=False, percent=True)
hh4 = proc.get_num_files_writing_fields(instr=False, percent=False)
aa # too big for html: 389000 rows!
# TODO: make optional csv, html output a method in ProcessJSON
bb.to_html('html/get_instr_vs_fields_unique_all_data.html')
bb.to_csv('csv/get_instr_vs_fields_unique_all_data.csv')
cc.to_html('html/get_HDU_uniqueness_per_file.html')
cc.to_csv('csv/get_HDU_uniqueness_per_file.csv')
dd # list
ee.to_html('html/get_HDU_stats.html')
ee.to_csv('csv/get_HDU_stats.csv')
gg1.to_html('html/get_num_unique_values_by_keys_DTINSTRU_OBSTYPE.html')
gg1.to_csv('csv/get_num_unique_values_by_keys_DTINSTRU_OBSTYPE.csv')
gg2.to_html('html/get_num_unique_values_by_keys_DTINSTRU_FILTER.html')
gg2.to_csv('csv/get_num_unique_values_by_keys_DTINSTRU_FILTER.csv')
gg3.to_html('html/get_num_unique_values_by_keys_DTTELESC_DTINSTRU.html')
gg3.to_csv('csv/get_num_unique_values_by_keys_DTTELESC_DTINSTRU.csv')
gg4.to_html('html/get_num_unique_values_by_keys_DTINSTRU_DTCALDAT.html')
gg4.to_csv('csv/get_num_unique_values_by_keys_DTINSTRU_DTCALDAT.csv')
hh1.to_html('html/get_num_files_writing_fields(instr=True, percent=True).html')
hh2.to_html('html/get_num_files_writing_fields(instr=True, percent=False).html')
hh3.to_frame().to_html('html/get_num_files_writing_fields(instr=False, percent=True).html')
hh4.to_frame().to_html('html/get_num_files_writing_fields(instr=False, percent=False).html')
```
| github_jupyter |
### ***Assignment 1:***
- Competitions: [Two Sigma: Using News to Predict Stock Movements](https://www.kaggle.com/c/two-sigma-financial-news/data)
***1. Why importantance?***
- If we can use news articles to efficiently predict stock price, we can reduce investment risk.
***2. Data Source:***
- Market data (2007 to present) provided by [Intrinio - contains financial market information](https://intrinio.com/) such as opening price, closing price, trading volume, calculated returns, etc.
- News data (2007 to present) Source: [Thomson Reuters](https://www.reuters.com/) - contains information about news articles/alerts published about assets, such as article details, sentiment, and other commentary.
***3. Data Type:***
- **Market data:**
- __time__ _(datetime64[ns, UTC])_ - the current time (in marketdata, all rows are taken at 22:00 UTC)
- __assetCode__ _(object)_ - a unique id of an asset
- __assetName__ _(category)_ - the name that corresponds to a group of assetCodes. These may be "Unknown" if thcorresponding assetCode does not have any rows in the news data.
- __universe__ _(float64)_ - a boolean indicating whether or not the instrument on that day will be included in scoring. This value is not provided outside of the training data time period. The trading universe on a given date is the set of instruments that are avilable for trading (the scoring function will not consider instruments that are not in the trading universe). The trading universe changes daily.
- __volume__ _(float64)_ - trading volume in shares for the day
- __close__ _(float64)_ - the close price for the day (not adjusted for splits or dividends)
- __open__ _(float64)_ - the open price for the day (not adjusted for splits or dividends)
- __returnsClosePrevRaw1__ _(float64)_ - see returns explanation above
- __returnsOpenPrevRaw1__ _(float64)_ - see returns explanation above
- __returnsClosePrevMktres1__ _(float64)_ - see returns explanation above
- __returnsOpenPrevMktres1__ _(float64)_ - see returns explanation above
- __returnsClosePrevRaw10__ _(float64)_ - see returns explanation above
- __returnsOpenPrevRaw10__ _(float64)_ - see returns explanation above
- __returnsClosePrevMktres10__ _(float64)_ - see returns explanation above
- __returnsOpenPrevMktres10__ _(float64)_ - see returns explanation above
- __returnsOpenNextMktres10__ _(float64)_ - 10 day, market-residualized return. This is the target variable used in competition scoring. The market data has been filtered such that returnsOpenNextMktres10 is always not null.
- **News data:**
- __time__ _(datetime64[ns, UTC])_ - UTC timestamp showing when the data was available on the feed (second precision)
- __sourceTimestamp__ _(datetime64[ns, UTC])_ - UTC timestamp of this news item when it was created
- __firstCreated__ _(datetime64[ns, UTC])_ - UTC timestamp for the first version of the item
- __sourceId__ _(object)_ - an Id for each news item
- __headline__ _(object)_ - the item's headline
- __urgency__ _(int8)_ - differentiates story types (1: alert, 3: article)
- __takeSequence__ _(int16)_ - the take sequence number of the news item, starting at 1. For a given story, alerts and articles have separate sequences.
- __provider__ _(category)_ - identifier for the organization which provided the news item (e.g. RTRS for Reuters News, BSW for Business Wire)
- __subjects__ _(category)_ - topic codes and company identifiers that relate to this news item. Topic codes describe the news item's subject matter. These can cover asset classes, geographies, events, industries/sectors, and other types.
- __audiences__ _(category)_ - identifies which desktop news product(s) the news item belongs to. They are typically tailored to specific audiences. (e.g. "M" for Money International News Service and "FB" for French General News Service)
- __bodySize__ _(int32)_ - the size of the current version of the story body in characters
- __companyCount__ _(int8)_ - the number of companies explicitly listed in the news item in the subjects field
- __headlineTag__ _(object)_ - the Thomson Reuters headline tag for the news item
- __marketCommentary__ _(bool)_ - boolean indicator that the item is discussing general market conditions, such as "After the Bell" summaries
- __sentenceCount__ _(int16)_ - the total number of sentences in the news item. Can be used in conjunction with firstMentionSentence to determine the relative position of the first mention in the item.
- __wordCount__ _(int32)_ - the total number of lexical tokens (words and punctuation) in the news item
- __assetCodes__ _(category)_ - list of assets mentioned in the item
- __vassetName__ _(category)_ - name of the asset
- __firstMentionSentence__ _(int16)_ - the first sentence, starting with the headline, in which the scored asset is mentioned.
1. headline
2. first sentence of the story body
3. second sentence of the body, etc
4. the asset being scored was not found in the news item's headline or body text. As a result, the entire news item's text (headline + body) will be used to determine the sentiment score.
- __relevance__ _(float32)_ - a decimal number indicating the relevance of the news item to the asset. It ranges from 0 to 1. If the asset is mentioned in the headline, the relevance is set to 1. When the item is an alert (urgency == 1), relevance should be gauged by firstMentionSentence instead.
- __sentimentClass__ _(int8)_ - indicates the predominant sentiment class for this news item with respect to the asset. The indicated class is the one with the highest probability.
- __sentimentNegative__ _(float32)_ - probability that the sentiment of the news item was negative for the asset
- __sentimentNeutral__ _(float32)_ - probability that the sentiment of the news item was neutral for the asset
- __sentimentPositive__ _(float32)_ - probability that the sentiment of the news item was positive for the asset
- __sentimentWordCount__ _(int32)_ - the number of lexical tokens in the sections of the item text that are deemed relevant to the asset. This can be used in conjunction with wordCount to determine the proportion of the news item discussing the asset.
- __noveltyCount12H__ _(int16)_ - The 12 hour novelty of the content within a news item on a particular asset. It is calculated by comparing it with the asset-specific text over a cache of previous news items that contain the asset.
- __noveltyCount24H__ _(int16)_ - same as above, but for 24 hours
- __noveltyCount3D__ _(int16)_ - same as above, but for 3 days
- __noveltyCount5D__ _(int16)_ - same as above, but for 5 days
- __noveltyCount7D__ _(int16)_ - same as above, but for 7 days
- __volumeCounts12H__ _(int16)_ - the 12 hour volume of news for each asset. A cache of previous news items is maintained and the number of news items that mention the asset within each of five historical periods is calculated.
- __volumeCounts24H__ _(int16)_ - same as above, but for 24 hours
- __volumeCounts3D__ _(int16)_ - same as above, but for 3 days
- __volumeCounts5D__ _(int16)_ - same as above, but for 5 days
- __volumeCounts7D__ _(int16)_ - same as above, but for 7 days
***4. How to evalute ? (Loss function)***
<img src="img/Hw1_loss_function.png" align="left" height='450' width='600'/>
### ___Assignment 2___:
***1. Main Question:***
- 如何降低司機載客的空窗時間,根據每個時段與每個地段的乘車需求分配相對應的車輛數,不僅能降低司機間載客的空窗期,更能降低車輛支出的成本。
***2. Data Source:***
- [各縣市公路統計資料](https://www.thb.gov.tw/sites/ch/modules/download/download_list?node=3ba953bc-913d-4a13-b77b-0b7128fbe9a9&c=a2c29922-0c40-4a49-a8bc-65c3959b9cc4)
- 行動支付的紀錄與汽車上的sensor
***3. Data Type:***
- __difftime__ _(datetime64)_: 司機載客空窗時間
- __passenger_sex__ _(int)_: 乘客性別
- __car_type__ _(string)_: 車子型號
- __work_time__ _(int)_: 司機工作時間
- __driver_sex__ _(int)_: 司機姓別
- __manufacture_year__ (datetime64): 車子製造日期
- __route__ _(float)_: 行車路徑
- __weather__ _(string)_: 天氣
- __tempeature__ _(float)_: 溫度
- __number_of_passenger__ _(int)_: 一次載客的乘客數目
- ........
***4.How to evaluate?***
- Mean Square Error:
- $ MSE = \frac{1}{n}\sum_{i=1}^{n}{(Y_i - \hat{Y}_i)^2} $ , $Y_i$司機實際載客空窗時間,$\hat{Y}_i$ 預測司機載客空窗時間
## 練習時間
#### 請寫一個函式用來計算 Mean Square Error
$ MSE = \frac{1}{n}\sum_{i=1}^{n}{(Y_i - \hat{Y}_i)^2} $
### Hint: [如何取平方](https://googoodesign.gitbooks.io/-ezpython/unit-1.html)
```
import numpy as np
import matplotlib.pyplot as plt
def mean_squared_error(y, yhat):
"""
請完成這個 Function 後往下執行
Calculate Mean Sequare Error
Args:
y: real value, type: numpy array
yhat: predict value, type: numpy array
"""
mse = sum((y - yhat)**2)/len(y)
return mse
def mean_absolute_error(y, yhat):
'''
Calculate Mean Absoulute Error
Args:
y: real value, type: numpy array
yhat: predict value, type: numpy array
'''
mae = sum(np.abs(y - yhat))/len(y)
return mae
w = 3
b = 0.5
x_lin = np.linspace(0, 100, 101)
y = (x_lin + np.random.randn(101) * 5) * w + b
plt.plot(x_lin, y, 'b.', label = 'data points')
plt.title("Assume we have data points")
plt.legend(loc = 2)
plt.show()
y_hat = x_lin * w + b
plt.plot(x_lin, y, 'b.', label = 'data')
plt.plot(x_lin, y_hat, 'r-', label = 'prediction')
plt.title("Assume we have data points (And the prediction)")
plt.legend(loc = 2)
plt.show()
# 執行 Function, 確認有沒有正常執行
MSE = mean_squared_error(y, y_hat)
MAE = mean_absolute_error(y, y_hat)
print("The Mean squared error is %.3f" % (MSE))
print("The Mean absolute error is %.3f" % (MAE))
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#The-Importance-of-Data-Scaling-in-PCovR-/-KernelPCovR" data-toc-modified-id="The-Importance-of-Data-Scaling-in-PCovR-/-KernelPCovR-1"><span class="toc-item-num">1 </span>The Importance of Data Scaling in PCovR / KernelPCovR</a></span></li></ul></div>
# The Importance of Data Scaling in PCovR / KernelPCovR
```
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_diabetes
from sklearn.preprocessing import StandardScaler
from skcosmo.preprocessing import StandardFlexibleScaler
from skcosmo.decomposition import PCovR
```
In PCovR, and KernelPCovR, we are combining multiple aspects of the dataset, primarily the features and targets.
As such, the results largely depend on the relative contributions of each aspect to the mixed model.
```
X, y = load_diabetes(return_X_y=True)
```
Take the diabetes dataset from sklearn. In their raw form, the magnitudes of the features and targets are
```
print(
"Norm of the features: %0.2f \nNorm of the targets: %0.2f"
% (np.linalg.norm(X), np.linalg.norm(y))
)
```
For the California dataset, we can use the `StandardScaler` class from sklearn, as the features and targets are independent.
```
x_scaler = StandardScaler()
y_scaler = StandardScaler()
X_scaled = x_scaler.fit_transform(X)
y_scaled = y_scaler.fit_transform(y.reshape(-1,1))
```
Looking at the results at `mixing=0.5`, we see an especially large difference in the latent-space projections
```
pcovr_unscaled = PCovR(mixing=0.5, n_components=4).fit(X, y)
T_unscaled = pcovr_unscaled.transform(X)
Yp_unscaled = pcovr_unscaled.predict(X)
pcovr_scaled = PCovR(mixing=0.5, n_components=4).fit(X_scaled, y_scaled)
T_scaled = pcovr_scaled.transform(X_scaled)
Yp_scaled = y_scaler.inverse_transform(pcovr_scaled.predict(X_scaled))
fig, ((ax1_T, ax2_T), (ax1_Y, ax2_Y)) = plt.subplots(2, 2, figsize=(8, 10))
ax1_T.scatter(T_unscaled[:, 0], T_unscaled[:, 1], c=y, cmap="plasma", ec="k")
ax1_T.set_xlabel("PCov1")
ax1_T.set_ylabel("PCov2")
ax1_T.set_title("Latent Projection\nWithout Scaling")
ax2_T.scatter(T_scaled[:, 0], T_scaled[:, 1], c=y, cmap="plasma", ec="k")
ax2_T.set_xlabel("PCov1")
ax2_T.set_ylabel("PCov2")
ax2_T.set_title("Latent Projection\nWith Scaling")
ax1_Y.scatter(Yp_unscaled, y, c=np.abs(y - Yp_unscaled), cmap="bone_r", ec="k")
ax1_Y.plot(ax1_Y.get_xlim(), ax1_Y.get_xlim(), "r--")
ax1_Y.set_xlabel("True Y, unscaled")
ax1_Y.set_ylabel("Predicted Y, unscaled")
ax1_Y.set_title("Regression\nWithout Scaling")
ax2_Y.scatter(Yp_scaled,
y,
c=np.abs(y.flatten() - Yp_scaled.flatten()),
cmap="bone_r",
ec="k")
ax2_Y.plot(ax2_Y.get_xlim(), ax2_Y.get_xlim(), "r--")
ax2_Y.set_xlabel("True Y, unscaled")
ax2_Y.set_ylabel("Predicted Y, unscaled")
ax2_Y.set_title("Regression\nWith Scaling")
fig.subplots_adjust(hspace=0.5, wspace=0.3)
```
Also, we see that when the datasets are unscaled, the total loss (loss in recreating the original dataset and regression loss) does not vary with `mixing`, as expected. Typically, the regression loss should _gradually_ increase with `mixing` (and vice-versa for the loss in reconstructing the original features). When the inputs are not scaled, however, only in the case of `mixing` = 0 or 1 will the losses drastically change, depending on which component is dominating the model. Here, because the features dominate the model, this jump occurs as `mixing` goes to 0. With the scaled inputs, there is still a jump when `mixing>0` due to the change in matrix rank.
```
mixings = np.linspace(0, 1, 21)
losses_unscaled = np.zeros((2, len(mixings)))
losses_scaled = np.zeros((2, len(mixings)))
nc = 4
for mi, mixing in enumerate(mixings):
pcovr_unscaled = PCovR(mixing=mixing, n_components=nc).fit(X, y)
t_unscaled = pcovr_unscaled.transform(X)
yp_unscaled = pcovr_unscaled.predict(T=t_unscaled)
xr_unscaled = pcovr_unscaled.inverse_transform(t_unscaled)
losses_unscaled[:, mi] = (
np.linalg.norm(xr_unscaled - X)**2.0 / np.linalg.norm(X)**2,
np.linalg.norm(yp_unscaled - y)**2.0 / np.linalg.norm(y)**2,
)
pcovr_scaled = PCovR(mixing=mixing, n_components=nc).fit(X_scaled, y_scaled)
t_scaled = pcovr_scaled.transform(X_scaled)
yp_scaled = pcovr_scaled.predict(T=t_scaled)
xr_scaled = pcovr_scaled.inverse_transform(t_scaled)
losses_scaled[:, mi] = (
np.linalg.norm(xr_scaled - X_scaled)**2.0 /
np.linalg.norm(X_scaled)**2,
np.linalg.norm(yp_scaled - y_scaled)**2.0 /
np.linalg.norm(y_scaled)**2,
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True, sharex=True)
ax1.plot(mixings, losses_unscaled[0], marker="o", label=r"$\ell_{X}$")
ax1.plot(mixings, losses_unscaled[1], marker="o", label=r"$\ell_{Y}$")
ax1.plot(mixings,
np.sum(losses_unscaled, axis=0),
marker="o",
label=r"$\ell$")
ax1.legend(fontsize=12)
ax1.set_title('With Inputs Unscaled')
ax1.set_xlabel(r'Mixing parameter $\alpha$')
ax1.set_ylabel(r'Loss $\ell$')
ax2.plot(mixings, losses_scaled[0], marker="o", label=r"$\ell_{X}$")
ax2.plot(mixings, losses_scaled[1], marker="o", label=r"$\ell_{Y}$")
ax2.plot(mixings,
np.sum(losses_scaled, axis=0),
marker="o",
label=r"$\ell$")
ax2.legend(fontsize=12)
ax2.set_title('With Inputs Scaled')
ax2.set_xlabel(r'Mixing parameter $\alpha$')
ax2.set_ylabel(r'Loss $\ell$')
```
**Note: when the relative magnitude of the features or targets is important, such as in load_csd_1000r, one should use the `StandardFlexibleScaler` provided by `scikit-cosmo`.**
| github_jupyter |
# 🗂 Weak supervision in multi-label text classification tasks
WORK IN PROGRESS: This tutorial is a work in progress and you can expect some changes within the next few releases.
We will showcase new features here as soon as they are available.
In this tutorial we use Rubrix and weak supervision to tackle two multi-label classification datasets:
- The first dataset is a curated version of [**GoEmotions**](https://huggingface.co/datasets/go_emotions), a dataset intended for **multi-label emotion classification**.
- We inspect the dataset in Rubrix, come up with good heuristics, and combine them with a label model to train a **weakly supervised Hugging Face transformer**.
- In the second dataset, we [**categorize research papers**](https://www.kaggle.com/shivanandmn/multilabel-classification-dataset) by topic based on their titles, which is a **multi-label topic classification** problem.
- We repeat the process of finding good heuristics, combine them with a label model and train a **lightweight downstream model using sklearn** in the end.
<video width="100%" controls><source src="../_static/tutorials/weak-supervision-multi-label/ws_multi-label.mp4" type="video/mp4"></video>
<div class="alert alert-info">
Note
If you are new to weak supervision, check out our [weak supervision guide](../guides/weak-supervision.ipynb) and our first [weak supervision tutorial](weak-supervision-with-rubrix.ipynb).
</div>
## Setup
Rubrix, is a free and open-source tool to explore, annotate, and monitor data for NLP projects.
If you are new to Rubrix, check out the [Github repository](https://github.com/recognai/rubrix) ⭐.
If you have not installed and launched Rubrix yet, check the [Setup and Installation guide](../getting_started/setup&installation.rst).
For this tutorial we also need some third party libraries that can be installed via pip:
```
%pip install datasets "transformers[torch]" scikit-multilearn ipywidgets -qqq
```
## GoEmotions
The original [GoEmotions](https://huggingface.co/datasets/go_emotions) is a challenging dataset intended for multi-label emotion classification.
For this tutorial, we simplify it a bit by selecting only 6 out of the 28 emotions: *admiration, annoyance, approval, curiosity, gratitude, optimism*.
We also try to accentuate the multi-label part of the dataset by down-sampling the examples that are classified with only one label.
See Appendix A for all the details of this preprocessing step.
### Define rules
Let us start by downloading our curated version of the dataset from the Hugging Face Hub, and log it to Rubrix:
```
import rubrix as rb
from datasets import load_dataset
# Download preprocessed dataset
ds_rb = rb.read_datasets(
load_dataset("rubrix/go_emotions_multi-label", split="train"),
task="TextClassification"
)
# Log dataset to Rubrix to find good heuristics
rb.log(ds_rb, name="go_emotions")
```
After uploading the dataset, we can explore and inspect it to find good heuristic rules.
For this we highly recommend the dedicated [*Define rules* mode](../reference/webapp/define_rules.md) of the Rubrix web app, that allows you to quickly iterate over heuristic rules, compute their metrics and save them.
Here we copy our rules found via the web app to the notebook for you to easily follow along the tutorial.
```
from rubrix.labeling.text_classification import Rule
# Define our heuristic rules, they can surely be improved
rules = [
Rule("thank*", "gratitude"),
Rule("appreciate", "gratitude"),
Rule("text:(thanks AND good)", ["admiration", "gratitude"]),
Rule("advice", "admiration"),
Rule("amazing", "admiration"),
Rule("awesome", "admiration"),
Rule("impressed", "admiration"),
Rule("text:(good AND (point OR call OR idea OR job))", "admiration"),
Rule("legend", "admiration"),
Rule("exactly", "approval"),
Rule("agree", "approval"),
Rule("yeah", "approval"),
Rule("suck", "annoyance"),
Rule("pissed", "annoyance"),
Rule("annoying", "annoyance"),
Rule("ruined", "annoyance"),
Rule("hoping", "optimism"),
Rule('text:("good luck")', "optimism"),
Rule('"nice day"', "optimism"),
Rule('"what is"', "curiosity"),
Rule('"can you"', "curiosity"),
Rule('"would you"', "curiosity"),
]
```
We go on and apply these heuristic rules to our dataset creating our weak label matrix.
Since we are dealing with a multi-label classification task, the weak label matrix will have 3 dimensions.
> Dimensions of the weak multi label matrix: *number of records* x *number of rules* x *number of labels*
It will be filled with 0 and 1, depending on if the rule voted for the respective label or not.
If the rule abstained for a given record, the matrix will be filled with -1.
```
from rubrix.labeling.text_classification import WeakMultiLabels
# Compute the weak labels for our dataset given the rules.
# If your dataset already contains rules you can omit the rules argument.
weak_labels = WeakMultiLabels("go_emotions", rules=rules)
```
We can call the `weak_labels.summary()` method to check the precision of each rule as well as our total coverage of the dataset.
```
# Check coverage/precision of our rules
weak_labels.summary()
```
### Create training set
When we are happy with our heuristics, it is time to combine them and compute weak labels for the training of our downstream model.
Here we will use the simple `MajorityVoter`, that in the multi-label case, sets the probability of a label to 0 or 1 depending on whether at least one non-abstaining rule voted for the respective label or not.
```
from rubrix.labeling.text_classification import MajorityVoter
# Use the majority voter as the label model
label_model = MajorityVoter(weak_labels)
```
From our label model we get the training records together with its weak labels and probabilities.
We will use the weak labels with a probability greater than 0.5 as labels for our training, and hence copy them to the `annotation` property of our records.
```
# Get records with the predictions from the label model to train a down-stream model
train_rb = rb.DatasetForTextClassification(label_model.predict())
# Copy label model predictions to annotation
for rec in train_rb:
rec.annotation = [pred[0] for pred in rec.prediction if pred[1] > 0.5]
```
We extract the test set with manual annotations from our `WeakMultiLabels` object:
```
# Get records with manual annotations to use as test set for the down-stream model
test_rb = rb.DatasetForTextClassification(weak_labels.records(has_annotation=True))
```
We will use the convenient `DatasetForTextClassification.prepare_for_training()` method to create datasets optimized for training with the Hugging Face transformers library:
```
from datasets import DatasetDict
# Create dataset dictionary and shuffle training set
ds = DatasetDict(
train=train_rb.prepare_for_training().shuffle(seed=42),
test=test_rb.prepare_for_training()
)
```
Let us push the dataset to the Hub to share it with our colleagues.
It is also an easy way to outsource the training of the model to an environment with an accelerator, like Google Colab for example.
```
# Push dataset for training our down-stream model to the HF hub
ds.push_to_hub("rubrix/go_emotions_training")
```
### Train a transformer downstream model
The following steps are basically a copy&paste from the amazing documentation of the [Hugging Face transformers](https://huggingface.co/docs/transformers) library.
First, we will load the tokenizer corresponding to our model, which we choose to be the [distilled version](https://huggingface.co/distilbert-base-uncased) of the infamous BERT.
<div class="alert alert-info">
Note
Since we will use a full-blown transformer as a downstream model (albeit a distilled one), we recommend executing the following code on a machine with a GPU, or in a Google Colab with a GPU backend enabled.
</div>
```
from transformers import AutoTokenizer
# Initialize tokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
```
Afterward, we tokenize our data:
```
def tokenize_func(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
# Tokenize the data
tokenized_ds = ds.map(tokenize_func, batched=True)
```
The transformer model expects our labels to follow a common multi-label format of binaries, so let us use [sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.html) for this transformation.
```
from sklearn.preprocessing import MultiLabelBinarizer
# Turn labels into multi-label format
mb = MultiLabelBinarizer()
mb.fit(ds["test"]["label"])
def binarize_labels(examples):
return {"label": mb.transform(examples["label"])}
binarized_tokenized_ds = tokenized_ds.map(binarize_labels, batched=True)
```
Before we start the training, it is important to define our metric for the evaluation.
Here we settle on the commonly used micro averaged *F1* metric, but we will also keep track of the *F1 per label*, for a more in-depth error analysis afterward.
```
from datasets import load_metric
import numpy as np
# Define our metrics
metric = load_metric("f1", config_name="multilabel")
def compute_metrics(eval_pred):
logits, labels = eval_pred
# apply sigmoid
predictions = ( 1. / (1 + np.exp(-logits)) ) > 0.5
# f1 micro averaged
metrics = metric.compute(predictions=predictions, references=labels, average="micro")
# f1 per label
per_label_metric = metric.compute(predictions=predictions, references=labels, average=None)
for label, f1 in zip(ds["train"].features["label"][0].names, per_label_metric["f1"]):
metrics[f"f1_{label}"] = f1
return metrics
```
Now we are ready to load our pretrained transformer model and prepare it for our task: multi-label text classification with 6 labels.
```
from transformers import AutoModelForSequenceClassification
# Init our down-stream model
model = AutoModelForSequenceClassification.from_pretrained(
"distilbert-base-uncased",
problem_type="multi_label_classification",
num_labels=6
)
```
The only thing missing for the training is the `Trainer` and its `TrainingArguments`.
To keep it simple, we mostly rely on the default arguments, that often work out of the box, but tweak a bit the batch size to train faster.
We also checked that 2 epochs are enough for our rather small dataset.
```
from transformers import TrainingArguments
# Set our training arguments
training_args = TrainingArguments(
output_dir="test_trainer",
evaluation_strategy="epoch",
num_train_epochs=2,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
)
from transformers import Trainer
# Init the trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=binarized_tokenized_ds["train"],
eval_dataset=binarized_tokenized_ds["test"],
compute_metrics=compute_metrics,
)
# Train the down-stream model
trainer.train()
```
We achieved an micro averaged *F1* of abut 0.54, which is not perfect, but a good baseline for this challenging dataset.
When inspecting the *F1s per label*, we clearly see that the worst performing labels are the ones with the poorest heuristics in terms of accuracy and coverage, which comes to no surprise.
## Research topic dataset
After covering a multi-label emotion classification task, we will try to do the same for a multi-label classification task related to topic modeling.
In this dataset, research papers were classified with 6 non-exclusive labels based on their title and abstract.
We will try to classify the papers only based on the title, which is considerably harder, but allows us to quickly scan through the data and come up with heuristics.
See Appendix B for all the details of the minimal data preprocessing.
### Define rules
Let us start by downloading our preprocessed dataset from the Hugging Face Hub, and log it to Rubrix:
```
import rubrix as rb
from datasets import load_dataset
# Download preprocessed dataset
ds_rb = rb.read_datasets(
load_dataset("rubrix/research_titles_multi-label", split="train"),
task="TextClassification"
)
# Log dataset to Rubrix to find good heuristics
rb.log(ds_rb, "research_titles")
```
After uploading the dataset, we can explore and inspect it to find good heuristic rules.
For this we highly recommend the dedicated [*Define rules* mode](../reference/webapp/define_rules.md) of the Rubrix web app, that allows you to quickly iterate over heuristic rules, compute their metrics and save them.
Here we copy our rules found via the web app to the notebook for you to easily follow along the tutorial.
```
from rubrix.labeling.text_classification import Rule
# Define our heuristic rules (can probably be improved)
rules = [
Rule("stock*", "Quantitative Finance"),
Rule("*asset*", "Quantitative Finance"),
Rule("trading", "Quantitative Finance"),
Rule("finance", "Quantitative Finance"),
Rule("pric*", "Quantitative Finance"),
Rule("economy", "Quantitative Finance"),
Rule("deep AND neural AND network*", "Computer Science"),
Rule("convolutional", "Computer Science"),
Rule("memor* AND (design* OR network*)", "Computer Science"),
Rule("system* AND design*", "Computer Science"),
Rule("allocat* AND *net*", "Computer Science"),
Rule("program", "Computer Science"),
Rule("classification* AND (label* OR deep)", "Computer Science"),
Rule("scattering", "Physics"),
Rule("astro*", "Physics"),
Rule("material*", "Physics"),
Rule("spin", "Physics"),
Rule("magnetic", "Physics"),
Rule("optical", "Physics"),
Rule("ray", "Physics"),
Rule("entangle*", "Physics"),
Rule("*algebra*", "Mathematics"),
Rule("manifold* AND (NOT learn*)", "Mathematics"),
Rule("equation", "Mathematics"),
Rule("spaces", "Mathematics"),
Rule("operators", "Mathematics"),
Rule("regression", "Statistics"),
Rule("bayes*", "Statistics"),
Rule("estimation", "Statistics"),
Rule("mixture", "Statistics"),
Rule("gaussian", "Statistics"),
Rule("gene", "Quantitative Biology"),
]
```
We go on and apply these heuristic rules to our dataset creating our weak label matrix.
As mentioned in the [GoEmotions](#goemotions) section, the weak label matrix will have 3 dimensions and values of -1, 0 and 1.
```
from rubrix.labeling.text_classification import WeakMultiLabels
# Compute the weak labels for our dataset given the rules
# If your dataset already contains rules you can omit the rules argument.
weak_labels = WeakMultiLabels("research_titles", rules=rules)
```
Let us get an overview of the our heuristics and how they perform:
```
# Check coverage/precision of our rules
weak_labels.summary()
```
### Create training set
When we are happy with our heuristics, it is time to combine them and compute weak labels for the training of our downstream model.
As for the "GoEmotions" dataset, we will use the simple `MajorityVoter`.
```
from rubrix.labeling.text_classification import MajorityVoter
# Use the majority voter as the label model
label_model = MajorityVoter(weak_labels)
```
From our label model we get the training records together with its weak labels and probabilities.
Since we are going to train an sklearn model, we will put the records in a pandas DataFrame that generally has a good integration with the sklearn ecosystem.
```
train_df = rb.DatasetForTextClassification(label_model.predict()).to_pandas()
```
Before training our model, we need to extract the training labels from the label model predictions and transform them into a multi-label compatible format.
```
# Create labels in multi-label format
train_df["label"] = train_df.prediction.map(
lambda x: [
# we will use a threshold of 0.5 for the probability
{p[0]: int(p[1] > 0.5) for p in x}[label]
for label in weak_labels.labels
]
)
```
Now, let us define our downstream model and train it.
We will use the [scikit-multilearn library](http://scikit.ml/) to wrap a multinomial **Naive Bayes classifier** that is suitable for classification with discrete features (e.g., word counts for text classification).
The `BinaryRelevance` class transforms the multi-label problem with L labels into L single-label binary classification problems, so in the end we will automatically fit L naive bayes classifiers to our data.
The features for our classifier will be the counts of different word [n-grams](https://en.wikipedia.org/wiki/N-gram): that is, for each example we count the number of contiguous sequences of *n* words, where n goes from 1 to 5.
We extract these features with the `CountVectorizer`.
Finally, we will put our feature extractor and multi-label classifier in a sklearn pipeline that makes fitting and scoring the model a breeze.
```
from skmultilearn.problem_transform import BinaryRelevance
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
# Define our down-stream model
classifier = Pipeline([
('vect', CountVectorizer()),
('clf', BinaryRelevance(MultinomialNB()))
])
```
Training the model is as easy as calling the `fit` method on the our pipeline, and provide our training text and training labels.
```
import numpy as np
# Fit the down-stream classifier
classifier.fit(
X=train_df.text,
y=np.array(train_df.label.tolist()),
)
```
To score our trained model, we retrieve its predictions of the test set and use sklearn's `classification_report` to get all important classification metrics in a nicely formatted string.
```
# Get predictions for test set
predictions = classifier.predict(
X=[rec.text for rec in weak_labels.records(has_annotation=True)]
)
from sklearn.metrics import classification_report
# Compute metrics
print(classification_report(
weak_labels.annotation(),
predictions,
target_names=weak_labels.labels
))
```
We obtain a micro averaged F1 score of around 0.59, which again is not perfect but can serve as a decent baseline for future improvements.
Looking at the F1 per label, we see that the main problem is the recall of our heuristics and we should either define more of them, or try to find more general ones.
## Summary
In this tutorial we saw how you can use *Rubrix* to tackle multi-label text classification problems with weak supervision.
We showed you how to train two downstream models on two different multi-label datasets using the discovered heuristics.
For the emotion classification task, we trained a full-blown transformer model with Hugging Face, while for the topic classification task, we relied on a more lightweight Bayes classifier from sklearn.
Although the results are not perfect, they can serve as a good baseline for future improvements.
So the next time you encounter a multi-label classification problem, maybe try out weak supervision with *Rubrix* and save some time for your annotation team 😀.
## Next steps
**⭐ Star Rubrix [Github repo](https://github.com/recognai/rubrix) to stay updated.**
**📚 [Rubrix documentation](https://docs.rubrix.ml) for more guides and tutorials.**
**🙋♀️ Join the Rubrix community! A good place to start is the [discussion forum](https://github.com/recognai/rubrix/discussions).**
## Appendix A
This appendix summarizes the preprocessing steps for our curated *GoEmotions* dataset.
The goal was to limit the labels, and down-sample single-label annotations to move the focus to multi-label outputs.
```
# load original dataset and check label frequencies
import pandas as pd
import datasets
go_emotions = datasets.load_dataset("go_emotions")
df = go_emotions["test"].to_pandas()
def int2str(i):
#return int(i)
return go_emotions["train"].features["labels"].feature.int2str(int(i))
label_freq = []
idx_multi = df.labels.map(lambda x: len(x) > 1)
df["is_single"] = df.labels.map(lambda x: 0 if len(x) > 1 else 1)
df[idx_multi].labels.map(lambda x: [label_freq.append(int(l)) for l in x])
pd.Series(label_freq).value_counts();
# limit labels, down-sample single-label annotations and create Rubrix records
import rubrix as rb
def create(split: str) -> pd.DataFrame:
df = go_emotions[split].to_pandas()
df["is_single"] = df.labels.map(lambda x: 0 if len(x) > 1 else 1)
#['admiration', 'approval', 'annoyance', 'gratitude', 'curiosity', 'optimism', 'amusement']
idx_most_common = df.labels.map(lambda x: all([int(label) in [0, 4, 3, 15, 7, 15, 20] for label in x]))
df_multi = df[(df.is_single == 0) & idx_most_common]
df_single = df[idx_most_common].sample(3*len(df_multi), weights="is_single", axis=0, random_state=42)
return pd.concat([df_multi, df_single]).sample(frac=1, random_state=42)
def make_records(row, is_train: bool) -> rb.TextClassificationRecord:
annotation = [int2str(i) for i in row.labels] if not is_train else None
return rb.TextClassificationRecord(
inputs=row.text,
annotation=annotation,
multi_label=True,
id=row.id,
)
train_recs = create("train").apply(make_records, axis=1, is_train=True)
test_recs = create("test").apply(make_records, axis=1, is_train=False)
records = train_recs.to_list() + test_recs.tolist()
# publish dataset in the Hub
ds_rb = rb.DatasetForTextClassification(records).to_datasets()
ds_rb.push_to_hub("rubrix/go_emotions_multi-label", private=True)
```
## Appendix B
This appendix summarizes the minimal preprocessing done to [this multi-label classification dataset](https://www.kaggle.com/shivanandmn/multilabel-classification-dataset) from Kaggle.
You can download the original data (`train.csv`) following the Kaggle link.
The preprocessing consists of extracting only the title from the research paper, and split the data into a train and validation set.
```
# Extact the title and split the data
import pandas as pd
import rubrix as rb
from sklearn.model_selection import train_test_split
df = pd.read_csv("train.csv")
_, test_id = train_test_split(df.ID, test_size=0.2, random_state=42)
labels = ["Computer Science", "Physics", "Mathematics", "Statistics", "Quantitative Biology", "Quantitative Finance"]
def make_record(row):
annotation = [label for label in labels if row[label] == 1]
return rb.TextClassificationRecord(
inputs=row.TITLE,
# inputs={"title": row.TITLE, "abstract": row.ABSTRACT},
annotation=annotation if row.ID in test_id else None,
multi_label=True,
id=row.ID,
)
records = df.apply(make_record, axis=1)
# publish the dataset in the Hub
dataset_rb = rb.DatasetForTextClassification(records.tolist())
dataset_rb.to_datasets().push_to_hub("rubrix/research_titles_multi-label")
```
| github_jupyter |
# Candlestick Three Line Strike
https://www.investopedia.com/terms/t/three-inside-updown.asp
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
import warnings
warnings.filterwarnings("ignore")
# yahoo finance is used to fetch data
import yfinance as yf
yf.pdr_override()
# input
symbol = 'ETSY'
start = '2020-01-01'
end = '2021-10-08'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
```
## Candlestick with Three Line Strike
```
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,10))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
three_line_strike = talib.CDL3LINESTRIKE(df['Open'], df['High'], df['Low'], df['Close'])
three_line_strike = three_line_strike[three_line_strike != 0]
df['three_line_strike'] = talib.CDL3LINESTRIKE(df['Open'], df['High'], df['Low'], df['Close'])
df.loc[df['three_line_strike'] !=0]
df['Adj Close'].loc[df['three_line_strike'] !=0]
df['Adj Close'].loc[df['three_line_strike'] !=0].index
three_line_strike
three_line_strike.index
df
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['three_line_strike'] !=0].index, df['Adj Close'].loc[df['three_line_strike'] !=0],
'Dc', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=10.0)
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
## Plot Certain dates
```
df = df['2020-05-01':'2020-06-01']
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
ax.set_facecolor('black')
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['three_line_strike'] !=0].index, df['Adj Close'].loc[df['three_line_strike'] !=0],
'vy', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=30.0)
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
# Highlight Candlestick
```
from matplotlib.dates import date2num
from datetime import datetime
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.axvspan(date2num(datetime(2020,5,18)), date2num(datetime(2020,5,21)),
label="Three Line Strike",color="green", alpha=0.3)
ax.legend()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
| github_jupyter |
# Estimating School Tour Scheduling
This notebook illustrates how to re-estimate the mandatory tour scheduling component for ActivitySim. This process
includes running ActivitySim in estimation mode to read household travel survey files and write out
the estimation data bundles used in this notebook. To review how to do so, please visit the other
notebooks in this directory.
# Load libraries
```
import os
import larch # !conda install larch -c conda-forge # for estimation
import pandas as pd
```
We'll work in our `test` directory, where ActivitySim has saved the estimation data bundles.
```
os.chdir('test')
```
# Load data and prep model for estimation
```
modelname = "mandatory_tour_scheduling_school"
from activitysim.estimation.larch import component_model
model, data = component_model(modelname, return_data=True)
```
# Review data loaded from the EDB
The next (optional) step is to review the EDB, including the coefficients, utilities specification, and chooser and alternative data.
## Coefficients
```
data.coefficients
```
## Utility specification
```
data.spec
```
## Chooser data
```
data.chooser_data
```
## Alternatives data
```
data.alt_values
```
# Estimate
With the model setup for estimation, the next step is to estimate the model coefficients. Make sure to use a sufficiently large enough household sample and set of zones to avoid an over-specified model, which does not have a numerically stable likelihood maximizing solution. Larch has a built-in estimation methods including BHHH, and also offers access to more advanced general purpose non-linear optimizers in the `scipy` package, including SLSQP, which allows for bounds and constraints on parameters. BHHH is the default and typically runs faster, but does not follow constraints on parameters.
```
model.estimate()
```
### Estimated coefficients
```
model.parameter_summary()
```
# Output Estimation Results
```
from activitysim.estimation.larch import update_coefficients
result_dir = data.edb_directory/"estimated"
update_coefficients(
model, data, result_dir,
output_file=f"{modelname}_coefficients_revised.csv",
);
```
### Write the model estimation report, including coefficient t-statistic and log likelihood
```
model.to_xlsx(
result_dir/f"{modelname}_model_estimation.xlsx",
data_statistics=False,
)
```
# Next Steps
The final step is to either manually or automatically copy the `*_coefficients_revised.csv` file to the configs folder, rename it to `*_coefficients.csv`, and run ActivitySim in simulation mode.
```
pd.read_csv(result_dir/f"{modelname}_coefficients_revised.csv")
```
| github_jupyter |
# Initialization
Welcome to the first assignment of Improving Deep Neural Networks!
Training your neural network requires specifying an initial value of the weights. A well-chosen initialization method helps the learning process.
If you completed the previous course of this specialization, you probably followed the instructions for weight initialization, and seen that it's worked pretty well so far. But how do you choose the initialization for a new neural network? In this notebook, you'll try out a few different initializations, including random, zeros, and He initialization, and see how each leads to different results.
A well-chosen initialization can:
- Speed up the convergence of gradient descent
- Increase the odds of gradient descent converging to a lower training (and generalization) error
Let's get started!
## Table of Contents
- [1 - Packages](#1)
- [2 - Loading the Dataset](#2)
- [3 - Neural Network Model](#3)
- [4 - Zero Initialization](#4)
- [Exercise 1 - initialize_parameters_zeros](#ex-1)
- [5 - Random Initialization](#5)
- [Exercise 2 - initialize_parameters_random](#ex-2)
- [6 - He Initialization](#6)
- [Exercise 3 - initialize_parameters_he](#ex-3)
- [7 - Conclusions](#7)
<a name='1'></a>
## 1 - Packages
```
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from public_tests import *
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
# load image dataset: blue/red dots in circles
# train_X, train_Y, test_X, test_Y = load_dataset()
```
<a name='2'></a>
## 2 - Loading the Dataset
```
train_X, train_Y, test_X, test_Y = load_dataset()
```
For this classifier, you want to separate the blue dots from the red dots.
<a name='3'></a>
## 3 - Neural Network Model
You'll use a 3-layer neural network (already implemented for you). These are the initialization methods you'll experiment with:
- *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.
- *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.
- *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
**Instructions**: Instructions: Read over the code below, and run it. In the next part, you'll implement the three initialization methods that this `model()` calls.
```
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
<a name='4'></a>
## 4 - Zero Initialization
There are two types of parameters to initialize in a neural network:
- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
<a name='ex-1'></a>
### Exercise 1 - initialize_parameters_zeros
Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry," but try it anyway and see what happens. Use `np.zeros((..,..))` with the correct shapes.
```
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
#(≈ 2 lines of code)
# parameters['W' + str(l)] =
# parameters['b' + str(l)] =
# YOUR CODE STARTS HERE
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
# YOUR CODE ENDS HERE
return parameters
parameters = initialize_parameters_zeros([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
initialize_parameters_zeros_test(initialize_parameters_zeros)
```
Run the following code to train your model on 15,000 iterations using zeros initialization.
```
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
```
The performance is terrible, the cost doesn't decrease, and the algorithm performs no better than random guessing. Why? Take a look at the details of the predictions and the decision boundary:
```
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
__Note__: For sake of simplicity calculations below are done using only one example at a time.
Since the weights and biases are zero, multiplying by the weights creates the zero vector which gives 0 when the activation function is ReLU. As `z = 0`
$$a = ReLU(z) = max(0, z) = 0$$
At the classification layer, where the activation function is sigmoid you then get (for either input):
$$\sigma(z) = \frac{1}{ 1 + e^{-(z)}} = \frac{1}{2} = y_{pred}$$
As for every example you are getting a 0.5 chance of it being true our cost function becomes helpless in adjusting the weights.
Your loss function:
$$ \mathcal{L}(a, y) = - y \ln(y_{pred}) - (1-y) \ln(1-y_{pred})$$
For `y=1`, `y_pred=0.5` it becomes:
$$ \mathcal{L}(0, 1) = - (1) \ln(\frac{1}{2}) = 0.6931471805599453$$
For `y=0`, `y_pred=0.5` it becomes:
$$ \mathcal{L}(0, 0) = - (1) \ln(\frac{1}{2}) = 0.6931471805599453$$
As you can see with the prediction being 0.5 whether the actual (`y`) value is 1 or 0 you get the same loss value for both, so none of the weights get adjusted and you are stuck with the same old value of the weights.
This is why you can see that the model is predicting 0 for every example! No wonder it's doing so badly.
In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, so you might as well be training a neural network with $n^{[l]}=1$ for every layer. This way, the network is no more powerful than a linear classifier like logistic regression.
<font color='blue'>
**What you should remember**:
- The weights $W^{[l]}$ should be initialized randomly to break symmetry.
- However, it's okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
<a name='5'></a>
## 5 - Random Initialization
To break symmetry, initialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you'll see what happens when the weights are initialized randomly, but to very large values.
<a name='ex-2'></a>
### Exercise 2 - initialize_parameters_random
Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. You're using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running your code several times always gives you the same initial values for the parameters.
```
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
#(≈ 2 lines of code)
# parameters['W' + str(l)] =
# parameters['b' + str(l)] =
# YOUR CODE STARTS HERE
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
# YOUR CODE ENDS HERE
return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
initialize_parameters_random_test(initialize_parameters_random)
```
Run the following code to train your model on 15,000 iterations using random initialization.
```
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
```
If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff. A more numerically sophisticated implementation would fix this, but for the purposes of this notebook, it isn't really worth worrying about.
In any case, you've now broken the symmetry, and this gives noticeably better accuracy than before. The model is no longer outputting all 0s. Progress!
```
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
**Observations**:
- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
- If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
<font color='blue'>
**In summary**:
- Initializing weights to very large random values doesn't work well.
- Initializing with small random values should do better. The important question is, how small should be these random values be? Let's find out up next!
<font color='black'>
**Optional Read:**
The main difference between Gaussian variable (`numpy.random.randn()`) and uniform random variable is the distribution of the generated random numbers:
- numpy.random.rand() produces numbers in a [uniform distribution](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/rand.jpg).
- and numpy.random.randn() produces numbers in a [normal distribution](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/randn.jpg).
When used for weight initialization, randn() helps most the weights to Avoid being close to the extremes, allocating most of them in the center of the range.
An intuitive way to see it is, for example, if you take the [sigmoid() activation function](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/sigmoid.jpg).
You’ll remember that the slope near 0 or near 1 is extremely small, so the weights near those extremes will converge much more slowly to the solution, and having most of them near the center will speed the convergence.
<a name='6'></a>
## 6 - He Initialization
Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
<a name='ex-3'></a>
### Exercise 3 - initialize_parameters_he
Implement the following function to initialize your parameters with He initialization. This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
```
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
#(≈ 2 lines of code)
# parameters['W' + str(l)] =
# parameters['b' + str(l)] =
# YOUR CODE STARTS HERE
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * np.sqrt(2 / layers_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
# YOUR CODE ENDS HERE
return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
initialize_parameters_he_test(initialize_parameters_he)
# parameters
```
**Expected output**
```
W1 = [[ 1.78862847 0.43650985]
[ 0.09649747 -1.8634927 ]
[-0.2773882 -0.35475898]
[-0.08274148 -0.62700068]]
b1 = [[0.] [0.] [0.] [0.]]
W2 = [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
b2 = [[0.]]
```
Run the following code to train your model on 15,000 iterations using He initialization.
```
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
**Observations**:
- The model with He initialization separates the blue and the red dots very well in a small number of iterations.
<a name='7'></a>
## 7 - Conclusions
You've tried three different types of initializations. For the same number of iterations and same hyperparameters, the comparison is:
<table>
<tr>
<td>
<b>Model</b>
</td>
<td>
<b>Train accuracy</b>
</td>
<td>
<b>Problem/Comment</b>
</td>
</tr>
<td>
3-layer NN with zeros initialization
</td>
<td>
50%
</td>
<td>
fails to break symmetry
</td>
<tr>
<td>
3-layer NN with large random initialization
</td>
<td>
83%
</td>
<td>
too large weights
</td>
</tr>
<tr>
<td>
3-layer NN with He initialization
</td>
<td>
99%
</td>
<td>
recommended method
</td>
</tr>
</table>
**Congratulations**! You've completed this notebook on Initialization.
Here's a quick recap of the main takeaways:
<font color='blue'>
- Different initializations lead to very different results
- Random initialization is used to break symmetry and make sure different hidden units can learn different things
- Resist initializing to values that are too large!
- He initialization works well for networks with ReLU activations
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_4_dropout.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 5: Regularization and Dropout**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 5 Material
* Part 5.1: Part 5.1: Introduction to Regularization: Ridge and Lasso [[Video]](https://www.youtube.com/watch?v=jfgRtCYjoBs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_1_reg_ridge_lasso.ipynb)
* Part 5.2: Using K-Fold Cross Validation with Keras [[Video]](https://www.youtube.com/watch?v=maiQf8ray_s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_2_kfold.ipynb)
* Part 5.3: Using L1 and L2 Regularization with Keras to Decrease Overfitting [[Video]](https://www.youtube.com/watch?v=JEWzWv1fBFQ&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_3_keras_l1_l2.ipynb)
* **Part 5.4: Drop Out for Keras to Decrease Overfitting** [[Video]](https://www.youtube.com/watch?v=bRyOi0L6Rs8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_4_dropout.ipynb)
* Part 5.5: Benchmarking Keras Deep Learning Regularization Techniques [[Video]](https://www.youtube.com/watch?v=1NLBwPumUAs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_5_bootstrap.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
```
# Part 5.4: Drop Out for Keras to Decrease Overfitting
Hinton, Srivastava, Krizhevsky, Sutskever, & Salakhutdinov (2012) introduced the dropout regularization algorithm. [[Cite:srivastava2014dropout]](http://www.jmlr.org/papers/volume15/nandan14a/nandan14a.pdf) Although dropout works in a different way than L1 and L2, it accomplishes the same goal—the prevention of overfitting. However, the algorithm goes about the task by actually removing neurons and connections—at least temporarily. Unlike L1 and L2, no weight penalty is added. Dropout does not directly seek to train small weights.
Dropout works by causing hidden neurons of the neural network to be unavailable during part of the training. Dropping part of the neural network causes the remaining portion to be trained to still achieve a good score even without the dropped neurons. This decreases coadaption between neurons, which results in less overfitting.
Most neural network frameworks implement dropout as a separate layer. Dropout layers function as a regular, densely connected neural network layer. The only difference is that the dropout layers will periodically drop some of their neurons during training. You can use dropout layers on regular feedforward neural networks.
The program implements a dropout layer as a dense layer that can eliminate some of its neurons. Contrary to popular belief about the dropout layer, the program does not permanently remove these discarded neurons. A dropout layer does not lose any of its neurons during the training process, and it will still have exactly the same number of neurons after training. In this way, the program only temporarily masks the neurons rather than dropping them.
The following shows how a dropout layer might be situated with other layers:

The discarded neurons and their connections are shown as dashed lines. The input layer has two input neurons as well as a bias neuron. The second layer is a dense layer with three neurons as well as a bias neuron. The third layer is a dropout layer with six regular neurons even though the program has dropped 50% of them. While the program drops these neurons, it neither calculates nor trains them. However, the final neural network will use all of these neurons for the output. As previously mentioned, the program only temporarily discards the neurons.
During subsequent training iterations, the program chooses different sets of neurons from the dropout layer. Although we chose a probability of 50% for dropout, the computer will not necessarily drop three neurons. It is as if we flipped a coin for each of the dropout candidate neurons to choose if that neuron was dropped out. You must know that the program should never drop the bias neuron. Only the regular neurons on a dropout layer are candidates.
The implementation of the training algorithm influences the process of discarding neurons. The dropout set frequently changes once per training iteration or batch. The program can also provide intervals where all neurons are present. Some neural network frameworks give additional hyper-parameters to allow you to specify exactly the rate of this interval.
Why dropout is capable of decreasing overfitting is a common question. The answer is that dropout can reduce the chance of a codependency developing between two neurons. Two neurons that develop a codependency will not be able to operate effectively when one is dropped out. As a result, the neural network can no longer rely on the presence of every neuron, and it trains accordingly. This characteristic decreases its ability to memorize the information presented to it, thereby forcing generalization.
Dropout also decreases overfitting by forcing a bootstrapping process upon the neural network. Bootstrapping is a very common ensemble technique. We will discuss ensembling in greater detail in Chapter 16, “Modeling with Neural Networks.” Basically, ensembling is a technique of machine learning that combines multiple models to produce a better result than those achieved by individual models. Ensemble is a term that originates from the musical ensembles in which the final music product that the audience hears is the combination of many instruments.
Bootstrapping is one of the most simple ensemble techniques. The programmer using bootstrapping simply trains a number of neural networks to perform exactly the same task. However, each of these neural networks will perform differently because of some training techniques and the random numbers used in the neural network weight initialization. The difference in weights causes the performance variance. The output from this ensemble of neural networks becomes the average output of the members taken together. This process decreases overfitting through the consensus of differently trained neural networks.
Dropout works somewhat like bootstrapping. You might think of each neural network that results from a different set of neurons being dropped out as an individual member in an ensemble. As training progresses, the program creates more neural networks in this way. However, dropout does not require the same amount of processing as does bootstrapping. The new neural networks created are temporary; they exist only for a training iteration. The final result is also a single neural network, rather than an ensemble of neural networks to be averaged together.
Animation that shows how [dropout works](https://yusugomori.com/projects/deep-learning/dropout-relu)
```
import pandas as pd
from scipy.stats import zscore
# Read the data set
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv",
na_values=['NA','?'])
# Generate dummies for job
df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1)
df.drop('job', axis=1, inplace=True)
# Generate dummies for area
df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1)
df.drop('area', axis=1, inplace=True)
# Missing values for income
med = df['income'].median()
df['income'] = df['income'].fillna(med)
# Standardize ranges
df['income'] = zscore(df['income'])
df['aspect'] = zscore(df['aspect'])
df['save_rate'] = zscore(df['save_rate'])
df['age'] = zscore(df['age'])
df['subscriptions'] = zscore(df['subscriptions'])
# Convert to numpy - Classification
x_columns = df.columns.drop('product').drop('id')
x = df[x_columns].values
dummies = pd.get_dummies(df['product']) # Classification
products = dummies.columns
y = dummies.values
########################################
# Keras with dropout for Regression
########################################
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from sklearn.model_selection import KFold
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras import regularizers
# Cross-validate
kf = KFold(5, shuffle=True, random_state=42)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print(f"Fold #{fold}")
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
#kernel_regularizer=regularizers.l2(0.01),
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dropout(0.5))
model.add(Dense(25, activation='relu', activity_regularizer=regularizers.l1(1e-4))) # Hidden 2
#model.add(Dropout(0.5)) # Usually do not add a dropout after final hidden layer
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x_train,y_train,validation_data=(x_test,y_test),verbose=0,epochs=500)
pred = model.predict(x_test)
oos_y.append(y_test)
pred = np.argmax(pred,axis=1) # raw probabilities to chosen class (highest probability)
oos_pred.append(pred)
# Measure this fold's accuracy
y_compare = np.argmax(y_test,axis=1) # For accuracy calculation
score = metrics.accuracy_score(y_compare, pred)
print(f"Fold score (accuracy): {score}")
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
oos_y_compare = np.argmax(oos_y,axis=1) # For accuracy calculation
score = metrics.accuracy_score(oos_y_compare, oos_pred)
print(f"Final score (accuracy): {score}")
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
#oosDF.to_csv(filename_write,index=False)
```
| github_jupyter |
# BERT (from HuggingFace Transformers) for Text Extraction
**Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)<br>
**Date created:** 2020/05/23<br>
**Last modified:** 2020/05/23<br>
**Description:** Fine tune pretrained BERT from HuggingFace Transformers on SQuAD.
## Introduction
This demonstration uses SQuAD (Stanford Question-Answering Dataset).
In SQuAD, an input consists of a question, and a paragraph for context.
The goal is to find the span of text in the paragraph that answers the question.
We evaluate our performance on this data with the "Exact Match" metric,
which measures the percentage of predictions that exactly match any one of the
ground-truth answers.
We fine-tune a BERT model to perform this task as follows:
1. Feed the context and the question as inputs to BERT.
2. Take two vectors S and T with dimensions equal to that of
hidden states in BERT.
3. Compute the probability of each token being the start and end of
the answer span. The probability of a token being the start of
the answer is given by a dot product between S and the representation
of the token in the last layer of BERT, followed by a softmax over all tokens.
The probability of a token being the end of the answer is computed
similarly with the vector T.
4. Fine-tune BERT and learn S and T along the way.
**References:**
- [BERT](https://arxiv.org/pdf/1810.04805.pdf)
- [SQuAD](https://arxiv.org/abs/1606.05250)
## Setup
```
import os
import re
import json
import string
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer, TFBertModel, BertConfig
max_len = 384
configuration = BertConfig() # default parameters and configuration for BERT
```
## Set-up BERT tokenizer
```
# Save the slow pretrained tokenizer
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
```
## Load the data
```
train_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
train_path = keras.utils.get_file("train.json", train_data_url)
eval_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
eval_path = keras.utils.get_file("eval.json", eval_data_url)
```
## Preprocess the data
1. Go through the JSON file and store every record as a `SquadExample` object.
2. Go through each `SquadExample` and create `x_train, y_train, x_eval, y_eval`.
```
class SquadExample:
def __init__(self, question, context, start_char_idx, answer_text, all_answers):
self.question = question
self.context = context
self.start_char_idx = start_char_idx
self.answer_text = answer_text
self.all_answers = all_answers
self.skip = False
def preprocess(self):
context = self.context
question = self.question
answer_text = self.answer_text
start_char_idx = self.start_char_idx
# Clean context, answer and question
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer_text).split())
# Find end character index of answer in context
end_char_idx = start_char_idx + len(answer)
if end_char_idx >= len(context):
self.skip = True
return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_char_idx, end_char_idx):
is_char_in_ans[idx] = 1
# Tokenize context
tokenized_context = tokenizer.encode(context)
# Find tokens that were created from answer characters
ans_token_idx = []
for idx, (start, end) in enumerate(tokenized_context.offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
self.skip = True
return
# Find start and end token index for tokens from answer
start_token_idx = ans_token_idx[0]
end_token_idx = ans_token_idx[-1]
# Tokenize question
tokenized_question = tokenizer.encode(question)
# Create inputs
input_ids = tokenized_context.ids + tokenized_question.ids[1:]
token_type_ids = [0] * len(tokenized_context.ids) + [1] * len(
tokenized_question.ids[1:]
)
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0: # skip
self.skip = True
return
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
self.context_token_to_char = tokenized_context.offsets
with open(train_path) as f:
raw_train_data = json.load(f)
with open(eval_path) as f:
raw_eval_data = json.load(f)
def create_squad_examples(raw_data):
squad_examples = []
for item in raw_data["data"]:
for para in item["paragraphs"]:
context = para["context"]
for qa in para["qas"]:
question = qa["question"]
answer_text = qa["answers"][0]["text"]
all_answers = [_["text"] for _ in qa["answers"]]
start_char_idx = qa["answers"][0]["answer_start"]
squad_eg = SquadExample(
question, context, start_char_idx, answer_text, all_answers
)
squad_eg.preprocess()
squad_examples.append(squad_eg)
return squad_examples
def create_inputs_targets(squad_examples):
dataset_dict = {
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"start_token_idx": [],
"end_token_idx": [],
}
for item in squad_examples:
if item.skip == False:
for key in dataset_dict:
dataset_dict[key].append(getattr(item, key))
for key in dataset_dict:
dataset_dict[key] = np.array(dataset_dict[key])
x = [
dataset_dict["input_ids"],
dataset_dict["token_type_ids"],
dataset_dict["attention_mask"],
]
y = [dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]]
return x, y
train_squad_examples = create_squad_examples(raw_train_data)
x_train, y_train = create_inputs_targets(train_squad_examples)
print(f"{len(train_squad_examples)} training points created.")
eval_squad_examples = create_squad_examples(raw_eval_data)
x_eval, y_eval = create_inputs_targets(eval_squad_examples)
print(f"{len(eval_squad_examples)} evaluation points created.")
```
Create the Question-Answering Model using BERT and Functional API
```
def create_model():
## BERT encoder
encoder = TFBertModel.from_pretrained("bert-base-uncased")
## QA Model
input_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
token_type_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
attention_mask = layers.Input(shape=(max_len,), dtype=tf.int32)
embedding = encoder(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)[0]
start_logits = layers.Dense(1, name="start_logit", use_bias=False)(embedding)
start_logits = layers.Flatten()(start_logits)
end_logits = layers.Dense(1, name="end_logit", use_bias=False)(embedding)
end_logits = layers.Flatten()(end_logits)
start_probs = layers.Activation(keras.activations.softmax)(start_logits)
end_probs = layers.Activation(keras.activations.softmax)(end_logits)
model = keras.Model(
inputs=[input_ids, token_type_ids, attention_mask],
outputs=[start_probs, end_probs],
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=5e-5)
model.compile(optimizer=optimizer, loss=[loss, loss])
return model
```
This code should preferably be run on Google Colab TPU runtime.
With Colab TPUs, each epoch will take 5-6 minutes.
```
use_tpu = True
if use_tpu:
# Create distribution strategy
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
# Create model
with strategy.scope():
model = create_model()
else:
model = create_model()
model.summary()
```
## Create evaluation Callback
This callback will compute the exact match score using the validation data
after every epoch.
```
def normalize_text(text):
text = text.lower()
# Remove punctuations
exclude = set(string.punctuation)
text = "".join(ch for ch in text if ch not in exclude)
# Remove articles
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
text = re.sub(regex, " ", text)
# Remove extra white space
text = " ".join(text.split())
return text
class ExactMatch(keras.callbacks.Callback):
"""
Each `SquadExample` object contains the character level offsets for each token
in its input paragraph. We use them to get back the span of text corresponding
to the tokens between our predicted start and end tokens.
All the ground-truth answers are also present in each `SquadExample` object.
We calculate the percentage of data points where the span of text obtained
from model predictions matches one of the ground-truth answers.
"""
def __init__(self, x_eval, y_eval):
self.x_eval = x_eval
self.y_eval = y_eval
def on_epoch_end(self, epoch, logs=None):
pred_start, pred_end = self.model.predict(self.x_eval)
count = 0
eval_examples_no_skip = [_ for _ in eval_squad_examples if _.skip == False]
for idx, (start, end) in enumerate(zip(pred_start, pred_end)):
squad_eg = eval_examples_no_skip[idx]
offsets = squad_eg.context_token_to_char
start = np.argmax(start)
end = np.argmax(end)
if start >= len(offsets):
continue
pred_char_start = offsets[start][0]
if end < len(offsets):
pred_char_end = offsets[end][1]
pred_ans = squad_eg.context[pred_char_start:pred_char_end]
else:
pred_ans = squad_eg.context[pred_char_start:]
normalized_pred_ans = normalize_text(pred_ans)
normalized_true_ans = [normalize_text(_) for _ in squad_eg.all_answers]
if normalized_pred_ans in normalized_true_ans:
count += 1
acc = count / len(self.y_eval[0])
print(f"\nepoch={epoch+1}, exact match score={acc:.2f}")
```
## Train and Evaluate
```
exact_match_callback = ExactMatch(x_eval, y_eval)
model.fit(
x_train,
y_train,
epochs=1, # For demonstration, 3 epochs are recommended
verbose=2,
batch_size=64,
callbacks=[exact_match_callback],
)
```
| github_jupyter |
# Roles Classifier
The model implemented here follows the following architecture diagram:

We start with the import declarations:
```
from collections import Counter
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from BiLstmClassifier import BiLstmFixedLength, BiLstmVariableLength, BiLstmGloveVector
import numpy as np
import pandas as pd
import spacy
import torch
import torch.nn.functional as F
```
## 0. Hyper Parameters Definition
Defining the correct hyper-parameters to refine the performance of the model is very important. We must consider some
situations around the values to be assigned.
### 0.1 Epochs
We must define a correct number of epochs to allow the model to learn from the dataset. If this is to few, the model
will not learn anything, if the value is too high, we can fall into over-fitting or wasting processing time where the
model can't learn anymore.
### 0.2 Batch Size
It defines the amount of data for each batch to be used on each training epoch. Because we have no much data, the value
here is small.
### 0.3 Embedded Layer Dimension
Word embeddings are always around 50 and 300 in length, longer embedding vectors don't add enough information and
smaller ones don't represent the semantics well enough. For this model, we are using small sentences for most of the
cases.
### 0.4 Hidden Layer Dimension
This parameter represents how complex is the language used in the conversations. For example, if the sentences belong
to a literature book from Shakespeare, it probably will use a sophisticated language, and we can assign a value of 70.
On the other hand, if the sentences belong to simple chat talking about movies, it is maybe simpler, and we can assign
a value of 30.
```
EPOCHS = 100
BATCH_SIZE = 1 # Small batches because the dataset is not bigger than 500 rows
HIDDEN_LAYER_DIM = 60 # AOLME is not too complex language, it represents the language's features
EMBEDDED_LAYER_DIM = 50
LR = 0.002
```
## 1. Load dataset
We are going to use the dataset generated by the Jupyter Notebook ["AOLME Datasets Generator"](main.ipynb)
```
roles = pd.read_csv('output/balanced_372.csv')
print(f'Dataset Size: {roles.shape}\n')
print(roles.head())
```
## 2. Pre-Processing
### 2.1. Mapping 'Roles' labels to numbers for vectorization
```
mapping = {'Student': 0, 'Co-Facilitator': 1, 'Facilitator': 2}
roles['Role'] = roles['Role'].apply(lambda x: mapping[x])
roles.head()
# Load English words model package
tok = spacy.load('en')
def tokenize(text: str):
"""
This method tokenizes a sentence, considering the text is already lowered,
ASCII, and punctuation has been removed
:param text: The sentence to be tokenized
:return: A list containing each word of the sentence
"""
return [token.text for token in tok.tokenizer(text)]
```
### 2.2. Dataset cleaning and Sentence Vectorizing
<a id='encode_sentence'></a>
```
# Count number of occurrences of each word
counts = Counter()
for index, row in roles.iterrows():
counts.update(tokenize(row['Text']))
# Deletes words appearing only once
print(f'Number of Words before cleaning: {len(counts.keys())}')
for word in list(counts):
if counts[word] < 2:
del counts[word]
print(f'Number of Words after cleaning: {len(counts.keys())}\n')
# Creates vocabulary
vocab2index = {'': 0, 'UNK': 1}
words = ['', 'UNK']
for word in counts:
vocab2index[word] = len(words)
words.append(word)
def encode_sentence(text, vocabulary_map, n=70):
"""
Encodes the sentence into a numerical vector, based on the vocabulary map
:param text: The sentence
:param vocabulary_map: A map assigning a number to each word in the vocabulary
:param n: Required vector size
:return: Vectorized sentence and length
"""
tokenized = tokenize(text)
vectorized = np.zeros(n, dtype=int)
enc1 = np.array([vocabulary_map.get(w, vocabulary_map["UNK"]) for w in tokenized])
length = min(n, len(enc1))
vectorized[:length] = enc1[:length]
return vectorized, length
# Creates a new column into Dataset: each sentence expressed as a numeric vector
roles['Vectorized'] = roles['Text'].apply(lambda x: np.array(encode_sentence(x, vocab2index)))
print(roles.head())
```
Check if the dataset is balanced
```
Counter(roles['Role'])
```
### 2.3 Split into training and validation partitions
```
X = list(roles['Vectorized'])
y = list(roles['Role'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2)
class RolesDataset(Dataset):
"""
Simple PyTorch Dataset wrapper defined by an array of vectorized sentences (X) and the role for each sentence (y)
"""
def __init__(self, input_x, input_y):
self.X = input_x
self.y = input_y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return torch.from_numpy(self.X[idx][0].astype(np.int32)), self.y[idx], self.X[idx][1]
training_ds = RolesDataset(X_train, y_train)
validation_ds = RolesDataset(X_valid, y_valid)
```
### 2.4 Training and Validation Functions
```
def train_model(input_model, epochs=10, lr=0.001, verbose=True):
"""
Trains the input model
:param verbose: Prints each batch iteration
:param input_model: Input Model
:param epochs: The number of training epochs
:param lr: Learning Rate
:return: training loss, validation loss, validation accuracy, and validation RMSE
"""
parameters = filter(lambda p: p.requires_grad, input_model.parameters())
optimizer = torch.optim.Adam(parameters, lr=lr)
for i in range(epochs):
input_model.train()
sum_loss = 0.0
total = 0
# Iterates on Training DataLoader
for x, y, l in training_dl:
x = x.long()
y = y.long()
y_pred = input_model(x, l)
optimizer.zero_grad()
loss = F.cross_entropy(y_pred, y)
loss.backward()
optimizer.step()
sum_loss += loss.item() * y.shape[0]
total += y.shape[0]
val_loss, val_acc, val_rmse = get_metrics(input_model, validation_dl)
if verbose and (i + 1) % 20 == 1:
print(f"Epoch {i}: training loss %.3f, valid. loss %.3f, valid. accuracy %.3f, and valid. RMSE %.3f" % (
sum_loss / total, val_loss, val_acc, val_rmse))
print(f"FINAL: training loss %.3f, valid. loss %.3f, valid. accuracy %.3f, and valid. RMSE %.3f" % (
sum_loss / total, val_loss, val_acc, val_rmse))
return sum_loss / total, val_loss, val_acc, val_rmse
def get_metrics(input_model, valid_dl):
"""
Obtains current validation metrics
:param input_model: Input Model
:param valid_dl: Validation PyTorch DataLoader
:return:
"""
input_model.eval()
correct = 0
total = 0
sum_loss = 0.0
sum_rmse = 0.0
# PyTorch uses CrossEntropy function to implement Softmax on the same function
for x, y, l in valid_dl:
x = x.long()
y = y.long()
y_hat = input_model(x, l)
loss = F.cross_entropy(y_hat, y)
pred = torch.max(y_hat, 1)[1]
correct += (pred == y).float().sum()
total += y.shape[0]
sum_loss += loss.item() * y.shape[0]
sum_rmse += np.sqrt(mean_squared_error(pred, y.unsqueeze(-1))) * y.shape[0]
return sum_loss / total, correct / total, sum_rmse / total
vocab_size = len(words)
training_dl = DataLoader(training_ds, batch_size=BATCH_SIZE, shuffle=True)
validation_dl = DataLoader(validation_ds, batch_size=BATCH_SIZE)
```
## BiLSTM - Fixed Length Input
We can see the implemented model class in [BiLstmClassifier class](BiLstmClassifier.py).
<b>BiLstmFixedLength</b> has the following features:
- Word-Embedding Layer. # Embeddings: Vocabulary Size, Embeddings size: 50
- Bi-directional LSTM Layer. Input size: 50, Hidden size: 60
- Linear Layer. Fully connected layer, Input size: 60, 3 output features (roles)
- Dropout: 0.7
- Fixed Length Input (see [encode_sentence](#encode_sentence) function)
```
model_fixed = BiLstmFixedLength(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM)
print(f'\nBiLSTM - Fixed Length: {EPOCHS} epochs, Learning Rate: 0.1')
print('=============================================================')
train_model(model_fixed, epochs=EPOCHS, lr=0.1)
print(f'\nBiLSTM - Fixed Length: {EPOCHS} epochs, Learning Rate: 0.05')
print('=============================================================')
train_model(model_fixed, epochs=EPOCHS, lr=0.05)
print(f'\nBiLSTM - Fixed Length: {EPOCHS} epochs, Learning Rate: 0.01')
print('=============================================================')
train_model(model_fixed, epochs=EPOCHS, lr=0.01)
```
## BiLSTM - Variable Length Input
We can see the implemented model class in [BiLstmClassifier class](BiLstmClassifier.py).
<b>BiLstmVariableLength</b> has the following features:
- Word-Embedding Layer. # Embeddings: Vocabulary Size, Embeddings size: 50
- Bi-directional LSTM Layer. Input size: 50, Hidden size: 60
- Linear Layer. Fully connected layer, Input size: 60, 3 output features (roles)
- Dropout: 0.7
- Variable Length Input. Uses PyTorch's [pack_padded_sequence](https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pack_padded_sequence.html) to create sequences of variable length.
```
model = BiLstmVariableLength(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM)
print(f'\nBiLSTM - Variable Length: {EPOCHS} epochs, Learning Rate: 0.1')
print('=============================================================')
train_model(model, epochs=EPOCHS, lr=0.1)
print(f'\nBiLSTM - Variable Length: {EPOCHS} epochs, Learning Rate: 0.05')
print('=============================================================')
train_model(model, epochs=EPOCHS, lr=0.05)
print(f'\nBiLSTM - Variable Length: {EPOCHS} epochs, Learning Rate: 0.01')
print('=============================================================')
train_model(model, epochs=EPOCHS, lr=0.01)
```
## BiLSTM - with pretrained GloVe Word Embeddings
We can see the implemented model class in [BiLstmClassifier class](BiLstmClassifier.py).
<b>BiLstmGloveVector</b> has the following features:
- Word-Embedding Layer. # Embeddings: Vocabulary Size, Embeddings size: 50
- Bi-directional LSTM Layer. Input size: 50, Hidden size: 60
- Linear Layer. Fully connected layer, Input size: 60, 3 output features (roles)
- Dropout: 0.7
- Uses pretrained GloVe Word Embeddings to initialize weights based on its vocabulary.
```
def load_glove_vectors():
"""Load the glove Global Vectors for Word Representation"""
word_vectors = {}
with open("./glove/glove.6B.50d.txt", encoding="utf8") as f:
for line in f:
split = line.split()
word_vectors[split[0]] = np.array([float(x) for x in split[1:]])
return word_vectors
def get_embedding_matrix(word_counts, emb_size=50):
""" Creates embedding matrix from word vectors"""
vocab_size = len(word_counts) + 2
vocab_to_idx = {}
vocab = ["", "UNK"]
W = np.zeros((vocab_size, emb_size), dtype="float32")
W[0] = np.zeros(emb_size, dtype='float32') # adding a vector for padding
W[1] = np.random.uniform(-0.25, 0.25, emb_size) # adding a vector for unknown words
vocab_to_idx["UNK"] = 1
i = 2
for word in word_counts:
if word in word_vecs:
W[i] = word_vecs[word]
else:
W[i] = np.random.uniform(-0.25, 0.25, emb_size)
vocab_to_idx[word] = i
vocab.append(word)
i += 1
return W, np.array(vocab), vocab_to_idx
word_vecs = load_glove_vectors()
pretrained_weights, vocab, vocab2index = get_embedding_matrix(counts, EMBEDDED_LAYER_DIM)
model = BiLstmGloveVector(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM, pretrained_weights)
print(f'\nBiLSTM - with pretrained GloVe Word Embeddings: {EPOCHS} epochs, Learning Rate: 0.1')
print('====================================================================================')
train_model(model, epochs=EPOCHS, lr=0.1)
print(f'\nBiLSTM - with pretrained GloVe Word Embeddings: {EPOCHS} epochs, Learning Rate: 0.05')
print('====================================================================================')
train_model(model, epochs=EPOCHS, lr=0.05)
print(f'\nBiLSTM - with pretrained GloVe Word Embeddings: {EPOCHS} epochs, Learning Rate: 0.01')
print('====================================================================================')
train_model(model, epochs=EPOCHS, lr=0.01)
```
## Testing Several Files
```
@torch.no_grad()
def get_all_preds(model, loader):
all_preds = torch.tensor([])
for batch in loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
file_size = [150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 645]
accuracy_fixed = []
accuracy_variable = []
accuracy_glove = []
for i in file_size:
BATCH_SIZE = int(i * 0.5)
file_name = f'output/balanced_{i}.csv'
roles = pd.read_csv(file_name)
mapping = {'Student': 0, 'Co-Facilitator': 1, 'Facilitator': 2}
roles['Role'] = roles['Role'].apply(lambda x: mapping[x])
counts = Counter()
for index, row in roles.iterrows():
counts.update(tokenize(row['Text']))
for word in list(counts):
if counts[word] < 2:
del counts[word]
vocab2index = {'': 0, 'UNK': 1}
words = ['', 'UNK']
for word in counts:
vocab2index[word] = len(words)
words.append(word)
roles['Vectorized'] = roles['Text'].apply(lambda x: np.array(encode_sentence(x, vocab2index)))
X = list(roles['Vectorized'])
y = list(roles['Role'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2)
training_ds = RolesDataset(X_train, y_train)
validation_ds = RolesDataset(X_valid, y_valid)
vocab_size = len(words)
training_dl = DataLoader(training_ds, batch_size=BATCH_SIZE, shuffle=True)
validation_dl = DataLoader(validation_ds, batch_size=BATCH_SIZE)
print('\n********************************************')
print(f'* Processing file: {file_name} *')
print('********************************************')
print(f'\nBiLSTM - Fixed Length Input')
print('===========================')
model_fixed = BiLstmFixedLength(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM)
train_model(model_fixed, epochs=EPOCHS, lr=LR, verbose=False)
train_model(model_fixed, epochs=EPOCHS, lr=LR, verbose=False)
_, _, validation_accuracy, _ = train_model(model_fixed, epochs=EPOCHS, lr=LR, verbose=False)
accuracy_fixed.append(validation_accuracy)
print(f'\nBiLSTM - Variable Length Input')
print('==============================')
model_varaiable = BiLstmVariableLength(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM)
train_model(model_varaiable, epochs=EPOCHS, lr=LR, verbose=False)
train_model(model_varaiable, epochs=EPOCHS, lr=LR, verbose=False)
_, _, validation_accuracy, _ = train_model(model_varaiable, epochs=EPOCHS, lr=LR, verbose=False)
accuracy_variable.append(validation_accuracy)
print(f'\nBiLSTM - with pretrained GloVe Word Embeddings')
print('==============================================')
word_vecs = load_glove_vectors()
pretrained_weights, vocab, vocab2index = get_embedding_matrix(counts, EMBEDDED_LAYER_DIM)
model = BiLstmGloveVector(vocab_size, EMBEDDED_LAYER_DIM, HIDDEN_LAYER_DIM, pretrained_weights)
train_model(model, epochs=EPOCHS, lr=LR, verbose=False)
train_model(model, epochs=EPOCHS, lr=LR, verbose=False)
_, _, validation_accuracy, _ = train_model(model, epochs=EPOCHS, lr=LR, verbose=False)
accuracy_glove.append(validation_accuracy)
```
## Graphical Performance Analysis
In the following plots we can see the how the model behaves when it is trained with different amounts of data.
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(file_size, accuracy_fixed)
plt.title('# of Rows vs. Accuracy')
plt.suptitle('BiLSTM - Fixed Length Input')
plt.xlabel('# of Rows')
plt.ylabel('Accuracy')
plt.show()
plt.plot(file_size, accuracy_variable)
plt.title('# of Rows vs. Accuracy')
plt.suptitle('BiLSTM - Variable Length Input')
plt.xlabel('# of Rows')
plt.ylabel('Accuracy')
plt.show()
plt.plot(file_size, accuracy_glove)
plt.title('# of Rows vs. Accuracy')
plt.suptitle('BiLSTM - with pretrained GloVe Word Embeddings')
plt.xlabel('# of Rows')
plt.ylabel('Accuracy')
plt.show()
```
## Conclusions
- The model with the best performance is <b>BiLSTM - with pretrained GloVe Word Embeddings</b>, as we can see that it
has a most stable performance as the model is trained with more data.
- The model reaches an approximated Accuracy of 60% for the selected model, and it will probably improve when it is trained with a
bigger dataset.
- It is important to see that the first iteration with the smallest dataset with 150 rows reaches a high accuracy. This
is happening because over-fitting caused by too few rows, but the accuracy starts to show a more real behavior with bigger
datasets.
| github_jupyter |
# Prediction of BoardGameGeek Reviews
## NAME: Ruochen Chang
## ID: 1001780924
# Introduction
#### This is a blog illustrates the implementation of Naive Bayes from scratch.Our goal in this blog is to build a classification model to predict the rating of reviews using Naive Bayes.
#### I just refered the Naive Bayes model from the Internet and built the classification model from scratch by myself.3
#### The basic idea of Naive Bayes is: For a given item to be classified, find the probability of occurrence of each category under the condition that this item appears, whichever is the largest, it is considered that the item to be classified belongs to that category.
# Naive Bayes model:

#### Because all the Y and P(X) are the same, so we can equate the model to such model:
#### So we need to calculate the probability and conditional probability of our data.
# Steps to do the Naive Bayes
## a. Divide the dataset as train data for 70% and test data for 30%.
### Data Description:
#### This review file has 2 columns, comment and rating.
#### comment is the review text we should classify
#### rating is the score of the reviews.
### Our goal is predicting the rating according to the comment text.
#### For this data, the value of data is continuous. So I make them discreet as such rules:
#### First, I rounded them to integer number. Then,
#### rate as 1 for numbers from 0 to 2;
#### rate as 2 for numbers from 3 to 4;
#### rate as 3 for numbers from 5 to 6;
#### rate as 4 for numbers from 7 to 8;
#### rate as 5 for numbers from 9 to 10;
#### After loading all the data to the jupyter, I did some pre-processing including text cleaning, tokenization and remove stopwords.
#### Our data is often confusing and unintuitive. Therefore, we always have to pre-process the data in a series, which makes the data format more standardized and the content more reasonable. Common data preprocessing methods are: fill in the null value, remove the outliers, data cleaning, tokenization, remove stopwords and so on.
```
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import numpy as np
original_data = pd.read_csv('reviews.csv')
all_data = pd.DataFrame(original_data, columns=['comment', 'rating']).dropna()
all_data = shuffle(all_data)
all_data = pd.DataFrame(all_data).reset_index(drop=True)
def round_amount(a):
res = int(float(a))
if res == 0 or res == 1 or res == 2:
label = 0
if res == 3 or res == 4:
label = 1
if res == 5 or res == 6:
label = 2
if res == 7 or res == 8:
label = 3
if res == 9 or res == 10:
label = 4
return label
all_data['rating'] = all_data['rating'].apply(round_amount)
import re
import string
def clean_text(text):
# Make text lowercase, remove text in square brackets,remove links,remove punctuation
# remove words containing numbers.'''
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
return text
# Applying the cleaning function to both test and training datasets
all_data['comment'] = all_data['comment'].apply(lambda x: clean_text(x))
import nltk
from nltk.corpus import stopwords
def remove_stopwords(text):
words = [w for w in text if w not in stopwords.words('english')]
return words
train = all_data[:int(0.7*len(all_data))]
train = pd.DataFrame(train)
test = all_data[int(0.7*len(all_data)):]
test = pd.DataFrame(test)
print("length of train data: ", len(train))
print("length of test data: ", len(test))
# tokenization
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
train['comment'] = train['comment'].apply(lambda x: tokenizer.tokenize(x))
test['comment'] = test['comment'].apply(lambda x: tokenizer.tokenize(x))
train['comment'] = train['comment'].apply(lambda x: remove_stopwords(x))
test['comment'] = test['comment'].apply(lambda x: remove_stopwords(x))
print("train data:")
print(train.head())
print("\n")
print("test data:")
print(test.head())
```
## b. Build a vocabulary as list.
#### Building a vocabulary means build a dictionary for all the words with their occurrence under every label like this: {'happy': [10, 20, 30, 40, 50], ...}. This example means the word happy occurs 10 times under label 1, 20 times under label 2, 30 times under label 3 and so on.
#### To be more reasonable, I removed words whose occurrence are less than 10.
```
all_words = {}
all_s = ""
for index, row in train.iterrows():
s = " ".join(row['comment'])
all_s = all_s + s
all_words = all_s.lower().split(' ')
def count_words(data):
vocabulary_list = {} # {'word':[]}
for index, row in data.iterrows():
for word in row['comment']:
if word not in vocabulary_list:
vocabulary_list[word] = [0, 0, 0, 0, 0]
else:
if row['rating'] == 0:
vocabulary_list[word][0] += 1
if row['rating'] == 1:
vocabulary_list[word][1] += 1
if row['rating'] == 2:
vocabulary_list[word][2] += 1
if row['rating'] == 3:
vocabulary_list[word][3] += 1
if row['rating'] == 4:
vocabulary_list[word][4] += 1
for word in list(vocabulary_list.keys()):
if vocabulary_list[word][0]+vocabulary_list[word][1]+vocabulary_list[word][2]+vocabulary_list[word][3]+vocabulary_list[word][4] < 10:
del vocabulary_list[word]
return vocabulary_list
vocabulary_list = count_words(train)
print('examples of the vocabulary list:')
print(list(vocabulary_list.items())[:20])
```
#### write the vocabulary to a txt file.
```
f = open('data.txt','w')
f.write(str(vocabulary_list))
f.close()
```
## c. Calculate the probability and conditional probability for all the words.
#### calculate the total number of every label.
```
total_length = len(train)
def cal_label_count():
result = []
for i in range(5):
count = 0
for index, row in train.iterrows():
if row['rating'] == i:
count += 1
result.append(count)
return result
label_count = cal_label_count()
print(label_count)
```
##### Probability of the occurrence: P[word] = num of documents containing this word / num of all documents
##### Conditional probability based on the sentiment: P[word | Positive] = number of positive documents containing this word / num of all positive review documents
#### There are 5 labels totally. So I build a probability list and a conditional probability list to save different 5 labels.
### To make our model more reasonable, I used Laplace smoothing to solve the problem of zero probability.
## Laplace Smoothing:
#### The zero probability problem is that if a certain amount x does not appear in the observation sample library (training set), the result of probability of the entire instance will be 0 when calculating the probability of an instance. In the problem of text classification, when a word does not appear in the training sample, the probability of that word is 0, and it is also 0 when the probability of text occurrence is calculated using multiplication. Clearly, this is unreasonable, and you cannot arbitrarily think that the probability of an event is 0 because it is not observed. In order to solve the problem of zero probability, the French mathematician Laplace first proposed the method of adding 1 to estimate the probability of a phenomenon that a data has not occurred, so this smoothing is also called Laplace smoothing. Assuming that the training sample is very large, the estimated probability change caused by adding 1 to the count of each component x can be ignored, but it can easily and effectively avoid the zero probability problem.
```
def cal_prob(i):
count = 0
for index, row in train.iterrows():
if row['rating'] == i:
count += 1
return (count+1)/(len(train)+5)
# prior probability
prior_list = []
for i in range(5):
prior_list.append(cal_prob(i))
print("prior probability: ", prior_list)
def conditional_prob(word, i):
all_count = label_count[i]
if word in vocabulary_list:
return (vocabulary_list[word][i]+1)/(all_count+5)
if word not in vocabulary_list:
return 1/(all_count+5)
print("\nOcurrence of going word under label 1: ", conditional_prob('going', 1))
```
## d. predict test data
#### For test data, we have also pre-processed before, so it is clean data to make prediction. I classified all the test data accroding to our model and print the accuracy. The result of accuracy is about 40%.
```
def classify(s):
pred_list = []
for i in range(5):
pred = prior_list[i]
for word in s:
newpred = conditional_prob(word, i)
pred *= newpred
pred_list.append(pred)
max_prob = max(pred_list)
return pred_list.index(max_prob)
pred_right = 0
for index, row in test.iterrows():
if row['rating'] == classify(row['comment']):
pred_right += 1
accuracy = pred_right/len(test)
print("*********predict accuracy*********")
print(accuracy)
```
# Challenge:
#### This data is continuous, so I made them discreet. At first, I divided the rating value to 10 grades, but the accuracy is about 20%. So I chose to divide the rating value to 5 grades which is more reasonable because there are so many websites setting the review rating as 5 grades.
#### In the future, I want to have a try to build a SVM model and LSTM model to make classification because the time is limited this time.
| github_jupyter |
[@LorenaABarba](https://twitter.com/LorenaABarba)
12 steps to Navier-Stokes
=====
***
Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)
You are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below).
Convergence and the CFL Condition
----
***
For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.
```
import numpy #numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot #matplotlib is 2D plotting library
%matplotlib inline
def linearconv(nx):
dx = 2.0 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1
u = numpy.ones(nx) #defining a numpy array which is nx elements long with every value equal to 1.
u[int(.5/dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s
un = numpy.ones(nx) #initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u);
```
Now let's examine the results of our linear convection problem with an increasingly fine mesh.
```
linearconv(41) #convection using 41 grid points
```
This is the same result as our Step 1 calculation, reproduced here for reference.
```
linearconv(61)
```
Here, there is still numerical diffusion present, but it is less severe.
```
linearconv(71)
```
Here the same pattern is present -- the wave is more square than in the previous runs.
```
linearconv(85)
```
This doesn't look anything like our original hat function.
### What happened?
To answer that question, we have to think a little bit about what we're actually implementing in code.
In each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.
Each iteration of our time loop covers a time-step of length $\Delta t$, which we have been defining as 0.025
During this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong.
What has happened is that over the time period $\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\Delta t$ step size is calculated with respect to the size of `dx`.
$$\sigma = \frac{u \Delta t}{\Delta x} \leq \sigma_{max}$$
where $u$ is the speed of the wave; $\sigma$ is called the **Courant number** and the value of $\sigma_{max}$ that will ensure stability depends on the discretization used.
In a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`.
```
import numpy
from matplotlib import pyplot
def linearconv(nx):
dx = 2.0 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
c = 1
sigma = .5
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5/dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
linearconv(41)
linearconv(61)
linearconv(81)
linearconv(101)
linearconv(121)
```
Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
Learn More
-----
***
It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
```
from IPython.display import YouTubeVideo
YouTubeVideo('Yw1YPBupZxU')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
| github_jupyter |
# Debugging Faster R-CNN Networks
## Imports
```
import sys
sys.path.append('../')
import numpy as np
np.set_printoptions(precision=3)
import tensorflow as tf
```
## Check Cythonized modules working
#### bbox_overlaps
```
from Lib.bbox_overlaps import bbox_overlaps
boxes = np.array([[1,3,2,4],
[2,5,1,8],
[3,4,9,9],
[1,9,7,11]],dtype=np.float)
query_boxes = np.array([[1,3,2,4],
[5,3,7,4],
[1,8,10,12],
[0,2,3,5],
[0,3,2,9],
[2,1,3,4]],dtype=np.float)
bbox_overlaps(boxes,query_boxes)
```
#### nms
```
from Lib.nms_wrapper import nms
proposals = 10*np.array([[2,1,12,21,0],
[10,5,14,18,.1],
[5,3,11,14,.2],
[1,4,20,20,.3]],dtype=np.float32)
copies = np.random.choice(4,256,p=[0.1,0.2,0.3,0.4])
proposals = proposals[copies,:]
perturbations = np.random.randint(8,size=(256,4))
perturbations = np.concatenate((perturbations,np.zeros([256,1])),1)
proposals = np.array(proposals + perturbations,dtype=np.float32)
keep = nms(proposals,0.7)
proposals[keep,:]
```
## Test fast_rcnn
```
from Networks.convnet import convnet
from Networks.faster_rcnn_networks import rpn, roi_proposal, fast_rcnn
# Global Dictionary of Flags
flags = {
'data_directory': '../Data/MNIST/',
'save_directory': '../Logs/summaries/',
'model_directory': 'resnet101/',
'restore': False,
'restore_file': 'start.ckpt',
'datasets': 'MNIST',
'image_dim': 28,
'hidden_size': 10,
'num_classes': 10,
'batch_size': 1,
'display_step': 200,
'weight_decay': 1e-7,
'lr_decay': 0.999,
'num_epochs': 10,
'lr_iters': [(5e-3, 5000), (5e-3, 7500), (5e-4, 10000), (5e-5, 10000)],
'anchor_scales': [1,2,4]
}
eval_mode = False
```
#### Set up inputs
```
x = tf.placeholder(tf.float32, [1, 128, 128, 3], name='x')
gt_boxes = tf.placeholder(tf.int64, [1, 5], name='gt')
im_dims = tf.placeholder(tf.int64, [1, 2], name='im_dims')
```
#### Convolutional Feature Extractor
```
cnn = convnet(x, [5, 3, 3, 3, 3], [64, 96, 128, 172, 256], strides=[2, 2, 2, 2, 2])
featureMaps = cnn.get_output()
_feat_stride = cnn.get_feat_stride()
```
#### RPN
```
rpn_net = rpn(featureMaps,gt_boxes,im_dims,_feat_stride,eval_mode)
rpn_cls_score = rpn_net.get_rpn_cls_score()
rpn_cls_score.get_shape()
rpn_labels = rpn_net.get_rpn_labels()
rpn_labels.get_shape()
rpn_bbox_pred = rpn_net.get_rpn_bbox_pred()
rpn_bbox_pred.get_shape()
rpn_bbox_targets = rpn_net.get_rpn_bbox_targets()
rpn_bbox_targets.get_shape()
rpn_bbox_outside_weights = rpn_net.get_rpn_bbox_outside_weights()
rpn_bbox_outside_weights.get_shape()
```
#### ROI Proposal
```
rpn_cls_score = rpn_net.get_rpn_cls_score()
rpn_bbox_pred = rpn_net.get_rpn_bbox_pred()
roi_proposal_net = roi_proposal(rpn_net, gt_boxes, im_dims, eval_mode)
roi_proposal_net.get_rois().get_shape()
roi_proposal_net.get_labels().get_shape()
roi_proposal_net.get_bbox_targets().get_shape()
```
#### Fast RCNN
```
fast_rcnn_net = fast_rcnn(featureMaps, roi_proposal_net, eval_mode)
fast_rcnn_net.get_cls_score().get_shape()
fast_rcnn_net.get_bbox_refinement().get_shape()
```
#### Feed dummy values through net
```
# Prevent TensorFlow from completely hogging up all GPUs
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction=0.2
sess = tf.InteractiveSession(config=config)
x_in = np.random.randint(256,size=[1, 128, 128, 3])
gt_in = np.array([1,4,16,20,3]).reshape([1,5])
im_dims_in = np.array([128]*1*2).reshape([1,2])
sess.run(tf.global_variables_initializer())
#sess.run(rpn_bbox_targets,feed_dict={x: x_in, gt: gt_in, im_dims: im_dims_in})
```
## Test ROI pooling layer
```
import tensorflow as tf
from scipy.misc import imread
import matplotlib.pyplot as plt
from Lib.roi_pool import roi_pool
# Set up tensorflow graph
featMap_tf = tf.placeholder(tf.float32, [None, 128, 128, 1])
roi_tf = tf.placeholder(tf.float32, [None, 5])
im_dims_tf = tf.placeholder(tf.int32, [None, 2])
pooledFeatures = roi_pool(featMap_tf,roi_tf,im_dims_tf)
imfile = '../Data/clutteredMNIST/Images/test_img85.png'
img = imread(imfile)
im_dims = np.array(img.shape)
im_dims = im_dims.reshape((-1,2))
plt.imshow(img)
plt.show()
print(im_dims)
gt = np.loadtxt('../Data/clutteredMNIST/Annotations/test_img85.txt', ndmin=2)
gt_box = gt[:,:4]
roi = np.concatenate(([[0]],gt_box),axis=1)
roi
# This would be a really dumb output for a CNN,
# but for testing purposes, it'll do
featureMaps = img[::1,::1]
plt.imshow(featureMaps)
plt.show()
featureMaps = featureMaps.reshape((1,featureMaps.shape[0],featureMaps.shape[1],1))
print(featureMaps.shape)
# Run the graph and pray
out = sess.run(pooledFeatures, feed_dict={featMap_tf: featureMaps, roi_tf: roi, im_dims_tf: im_dims})
im_out = np.squeeze(out)
plt.imshow(im_out)
plt.show()
print(im_out.shape)
np.set_printoptions(precision=2)
print(im_out)
```
## Test losses/reshaping
```
# Dummy RPN Classification Scores
N = 1 # Minibatch size
W = 16 # Width of feature maps
H = 16 # Height of feature maps
K = 9*2 # Number of scores
rpn_cls_score = np.array([[[[k + 100*h + 10000*w + 1000000*n for k in range(K)] for h in range(H)] for w in range(W)] for n in range(N)])
print(rpn_cls_score[0,4,10,0])
print(rpn_cls_score[0,4,10,1])
print(rpn_cls_score[0,11,3,11])
print(rpn_cls_score[0,11,3,10])
print(rpn_cls_score[0,1,8,16])
print(rpn_cls_score[0,1,8,17])
# Reshape for softmax
shape = rpn_cls_score.shape
print("Shape: {0}".format(shape))
rpn_cls_score = np.transpose(rpn_cls_score,[0,3,1,2])
rpn_cls_score = np.reshape(rpn_cls_score,[shape[0],2,shape[3]//2*shape[1],shape[2]])
rpn_cls_score = np.transpose(rpn_cls_score,[0,2,3,1])
print("Reshaped: {0}".format(rpn_cls_score.shape))
print(rpn_cls_score[0,100,10,0])
print(rpn_cls_score[0,100,10,1])
print(rpn_cls_score[0,41,7,0])
print(rpn_cls_score[0,41,7,1])
print(rpn_cls_score[0,141,3,0])
print(rpn_cls_score[0,141,3,1])
# Reshape for loss
rpn_cls_score = np.reshape(rpn_cls_score,[-1,2])
print("Shape: {0}".format(rpn_cls_score.shape))
print(rpn_cls_score[5,:])
print(rpn_cls_score[259,:])
print(rpn_cls_score[1978,:])
```
| github_jupyter |
# Visualizing a Gensim model
To illustrate how to use [`pyLDAvis`](https://github.com/bmabey/pyLDAvis)'s gensim [helper funtions](https://pyldavis.readthedocs.org/en/latest/modules/API.html#module-pyLDAvis.gensim) we will create a model from the [20 Newsgroup corpus](http://qwone.com/~jason/20Newsgroups/). Minimal preprocessing is done and so the model is not the best, the goal of this notebook is to demonstrate the the helper functions.
## Downloading the data
```
%%bash
mkdir -p data
pushd data
if [ -d "20news-bydate-train" ]
then
echo "The data has already been downloaded..."
else
wget http://qwone.com/%7Ejason/20Newsgroups/20news-bydate.tar.gz
tar xfv 20news-bydate.tar.gz
rm 20news-bydate.tar.gz
fi
echo "Lets take a look at the groups..."
ls 20news-bydate-train/
popd
```
## Exploring the dataset
Each group dir has a set of files:
```
ls -lah data/20news-bydate-train/sci.space | tail -n 5
```
Lets take a peak at one email:
```
!head data/20news-bydate-train/sci.space/61422 -n 20
```
## Loading the tokenizing the corpus
```
from glob import glob
import re
import string
import funcy as fp
from gensim import models
from gensim.corpora import Dictionary, MmCorpus
import nltk
import pandas as pd
# quick and dirty....
EMAIL_REGEX = re.compile(r"[a-z0-9\.\+_-]+@[a-z0-9\._-]+\.[a-z]*")
FILTER_REGEX = re.compile(r"[^a-z '#]")
TOKEN_MAPPINGS = [(EMAIL_REGEX, "#email"), (FILTER_REGEX, ' ')]
def tokenize_line(line):
res = line.lower()
for regexp, replacement in TOKEN_MAPPINGS:
res = regexp.sub(replacement, res)
return res.split()
def tokenize(lines, token_size_filter=2):
tokens = fp.mapcat(tokenize_line, lines)
return [t for t in tokens if len(t) > token_size_filter]
def load_doc(filename):
group, doc_id = filename.split('/')[-2:]
with open(filename) as f:
doc = f.readlines()
return {'group': group,
'doc': doc,
'tokens': tokenize(doc),
'id': doc_id}
docs = pd.DataFrame(map(load_doc, glob('data/20news-bydate-train/*/*'))).set_index(['group','id'])
docs.head()
```
## Creating the dictionary, and bag of words corpus
```
def nltk_stopwords():
return set(nltk.corpus.stopwords.words('english'))
def prep_corpus(docs, additional_stopwords=set(), no_below=5, no_above=0.5):
print('Building dictionary...')
dictionary = Dictionary(docs)
stopwords = nltk_stopwords().union(additional_stopwords)
stopword_ids = map(dictionary.token2id.get, stopwords)
dictionary.filter_tokens(stopword_ids)
dictionary.compactify()
dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=None)
dictionary.compactify()
print('Building corpus...')
corpus = [dictionary.doc2bow(doc) for doc in docs]
return dictionary, corpus
dictionary, corpus = prep_corpus(docs['tokens'])
MmCorpus.serialize('newsgroups.mm', corpus)
dictionary.save('newsgroups.dict')
```
## Fitting the LDA model
```
%%time
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=50, passes=10)
lda.save('newsgroups_50.model')
```
## Visualzing the model with pyLDAvis
Okay, the moment we have all been waiting for is finally here! You'll notice in the visualizaiton that we have a few junk topics that would probably disappear after better preprocessing of the corpus. This is left as an exercises to the reader. :)
```
import pyLDAvis.gensim as gensimvis
import pyLDAvis
vis_data = gensimvis.prepare(lda, corpus, dictionary)
pyLDAvis.display(vis_data)
```
| github_jupyter |
```
import numpy as np
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import SGD, Adam
from task import OmniglotTask, MNISTTask
from dataset import Omniglot, MNIST
from data_loading import get_data_loader
import sys, os
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..', ".."))
from mela.variational.variational_meta_learning import Statistics_Net_Conv, Generative_Net_Conv, Master_Model, load_model_dict
from mela.pytorch.util_pytorch import get_num_params, to_Variable, to_np_array
from mela.util import plot_matrices, make_dir
is_cuda = torch.cuda.is_available()
def get_task(root, n_cl, n_inst, split='train'):
if 'mnist' in root:
return MNISTTask(root, n_cl, n_inst, split)
elif 'omniglot' in root:
return OmniglotTask(root, n_cl, n_inst, split)
else:
print('Unknown dataset')
raise(Exception)
def get_metrics(master_model, X_test, y_test, loss_fn):
y_logit = master_model(X_test)
y_pred = y_logit.max(1)[1]
acc = (to_np_array((y_test == y_pred).float().sum()) / len(y_test))[0]
loss = to_np_array(loss_fn(y_logit, y_test))[0]
return loss, acc
```
## Prepare MeLA:
```
pre_pooling_neurons = 200
statistics_output_neurons = 20
num_classes = 5
input_channels = 3
activation_default = "leakyRelu"
activation_conv = "leakyReluFlat"
struct_param_pre_conv = [
[8, "Conv2d", {"kernel_size": 4, "stride": 2, "activation": activation_conv}],
# [None, "MaxPool2d", {"kernel_size": 2, "return_indices": False}],
[4, "Conv2d", {"kernel_size": 3, "stride": 1, "activation": activation_conv}],
[4, "Conv2d", {"kernel_size": 3, "stride": 1, "activation": activation_conv}],
[40, "Simple_Layer", {"activation": "linear", "layer_input_size": 324}]
]
struct_param_pre = [[60, "Simple_Layer", {"activation": activation_default}],
[pre_pooling_neurons, "Simple_Layer", {"activation": "linear"}],
]
struct_param_post = [[60, "Simple_Layer", {"activation": activation_default}],
[60, "Simple_Layer", {"activation": activation_default}],
[statistics_output_neurons, "Simple_Layer", {"activation": "linear"}],
]
struct_param_gen_base = [[40, "Simple_Layer", {"activation": activation_default}],
[10, "Simple_Layer", {"activation": "linear"}],
]
struct_param_model = [[64, "Conv2d", {"kernel_size": 3, "stride": 1, "dilation": 2}],
[64, "BatchNorm2d", {"activation": activation_conv}],
] * 4 + \
[[num_classes, "Simple_Layer", {"activation": "linear"}]]
main_weight_neurons = [3*64*3*3, 64, 64*64*3*3, 64, 64*64*3*3, 64, 64*64*3*3, 64, 9216 * 5]
main_bias_neurons = [64, 64, 64, 64, 64, 64, 64, 64, 5]
W_struct_param_list = []
b_struct_param_list = []
for i, num_weight_neurons in enumerate(main_weight_neurons):
struct_param_weight = struct_param_gen_base + [[num_weight_neurons, "Simple_Layer", {"activation": "linear"}]]
struct_param_bias = struct_param_gen_base + [[main_bias_neurons[i], "Simple_Layer", {"activation": "linear"}]]
W_struct_param_list.append(struct_param_weight)
b_struct_param_list.append(struct_param_bias)
statistics_Net_Conv = Statistics_Net_Conv(input_channels = input_channels,
num_classes = num_classes,
pre_pooling_neurons = pre_pooling_neurons,
struct_param_pre_conv = struct_param_pre_conv,
struct_param_pre = struct_param_pre,
struct_param_post = struct_param_post,
is_cuda = is_cuda,
)
generative_Net_Conv = Generative_Net_Conv(input_channels = input_channels,
latent_size = statistics_output_neurons,
W_struct_param_list = W_struct_param_list,
b_struct_param_list = b_struct_param_list,
struct_param_model = struct_param_model,
is_cuda = is_cuda,
)
master_model = Master_Model(statistics_Net = statistics_Net_Conv, generative_Net = generative_Net_Conv, is_cuda = is_cuda)
print("Num_params: {0}".format(get_num_params(master_model)))
```
## Train:
```
optim_mode = "indi"
dataset='omniglot'
num_inst=6
meta_batch_size=20
num_updates=15000
lr=1e-1
meta_lr=1e-3
loss_fn = nn.CrossEntropyLoss()
reg_amp = 1e-9
exp='maml-omniglot-{0}way-{1}shot-TEST'.format(num_classes, num_inst)
make_dir("output/{0}/".format(exp))
random.seed(1337)
np.random.seed(1337)
tr_loss, tr_acc, val_loss, val_acc = [], [], [], []
mtr_loss, mtr_acc, mval_loss, mval_acc = [], [], [], []
reg_list = []
optimizer = torch.optim.Adam(master_model.parameters(), lr = meta_lr)
for i in range(num_updates):
# Evaluate on test tasks
# mt_loss, mt_acc, mv_loss, mv_acc = test()
# mtr_loss.append(mt_loss)
# mtr_acc.append(mt_acc)
# mval_loss.append(mv_loss)
# mval_acc.append(mv_acc)
# Collect a meta batch update
grads = []
tloss, tacc, vloss, vacc, reg_batch = 0.0, 0.0, 0.0, 0.0, 0.0
if optim_mode == "indi":
for k in range(meta_batch_size):
# Get data:
task = get_task('../data/{}'.format(dataset), num_classes, num_inst)
train_loader = get_data_loader(task, batch_size = num_inst, split='train')
val_loader = get_data_loader(task, batch_size = num_inst, split='val')
X_train, y_train = train_loader.__iter__().next()
X_test, y_test = val_loader.__iter__().next()
X_train, y_train, X_test, y_test = to_Variable(X_train, y_train, X_test, y_test, is_cuda = is_cuda)
# Get gradient:
optimizer.zero_grad()
results = master_model.get_predictions(X_test, X_train, y_train, is_time_series = False)
loss = loss_fn(results["y_pred"], y_test)
reg = master_model.get_regularization(source = ["weight", "bias", "W_gen", "b_gen"], target = ["statistics_Net", "generative_Net"]) * reg_amp
loss = loss + reg
loss.backward()
# Get metrics:
master_model.get_statistics(X_train, y_train)
trl, tra = get_metrics(master_model, X_train, y_train, loss_fn)
vall, vala = get_metrics(master_model, X_test, y_test, loss_fn)
tloss += trl
tacc += tra
vloss += vall
vacc += vala
reg_batch += to_np_array(reg)[0]
# Gradient descent:
optimizer.step()
elif optim_mode == "sum":
optimizer.zero_grad()
loss_total = Variable(torch.FloatTensor([0]), requires_grad = False)
if is_cuda:
loss_total = loss_total.cuda()
for i in range(meta_batch_size):
# Get data:
task = get_task('../data/{}'.format(dataset), num_classes, num_inst)
train_loader = get_data_loader(task, batch_size = num_inst, split='train')
val_loader = get_data_loader(task, batch_size = num_inst, split='val')
X_train, y_train = train_loader.__iter__().next()
X_test, y_test = val_loader.__iter__().next()
X_train, y_train, X_test, y_test = to_Variable(X_train, y_train, X_test, y_test, is_cuda = is_cuda)
# Get single-task loss:
optimizer.zero_grad()
results = master_model.get_predictions(X_test, X_train, y_train, is_time_series = False)
loss = loss_fn(results["y_pred"], y_test)
reg = master_model.get_regularization(source = ["weight", "bias", "W_gen", "b_gen"], target = ["statistics_Net", "generative_Net"]) * reg_amp
loss_total = loss_total + loss + reg
# Get metrics:
master_model.generative_Net.set_latent_param(results["statistics"])
trl, tra = get_metrics(master_model, X_train, y_train, loss_fn)
vall, vala = get_metrics(master_model, X_test, y_test, loss_fn)
tloss += trl
tacc += tra
vloss += vall
vacc += vala
reg_batch += to_np_array(reg)[0]
# Gradient descient on the sum of loss:
loss_total.backward()
optimizer.step()
else:
raise Exception("optim_mode {0} not recognized!".format(optim_mode))
# Save stuff
tr_loss.append(tloss / meta_batch_size)
tr_acc.append(tacc / meta_batch_size)
val_loss.append(vloss / meta_batch_size)
val_acc.append(vacc / meta_batch_size)
reg_list.append(reg_batch / meta_batch_size)
print("iter {0}\ttrain_loss: {1:.4f}\ttest_loss: {2:.4f}\ttrain_acc: {3:.4f}\ttest_acc: {4:.4f}\treg: {5:.6f}".format(i, tr_loss[-1], val_loss[-1], tr_acc[-1], val_acc[-1], reg_list[-1]))
np.save('output/{}/tr_loss.npy'.format(exp), np.array(tr_loss))
np.save('output/{}/tr_acc.npy'.format(exp), np.array(tr_acc))
np.save('output/{}/val_loss.npy'.format(exp), np.array(val_loss))
np.save('output/{}/val_acc.npy'.format(exp), np.array(val_acc))
# np.save('output/{}/meta_tr_loss.npy'.format(exp), np.array(mtr_loss))
# np.save('output/{}/meta_tr_acc.npy'.format(exp), np.array(mtr_acc))
# np.save('output/{}/meta_val_loss.npy'.format(exp), np.array(mval_loss))
# np.save('output/{}/meta_val_acc.npy'.format(exp), np.array(mval_acc))
```
| github_jupyter |
<div class="alert alert-block alert-info" style="margin-top: 20px">
<a href="https://cocl.us/corsera_da0101en_notebook_top">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/TopAd.png" width="750" align="center">
</a>
</div>
<a href="https://www.bigdatauniversity.com"><img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png" width = 300, align = "center"></a>
<h1 align=center><font size = 5>Data Analysis with Python</font></h1>
Exploratory Data Analysis
<h3>Welcome!</h3>
In this section, we will explore several methods to see if certain characteristics or features can be used to predict car price.
<h2>Table of content</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ol>
<li><a href="#import_data">Import Data from Module</a></li>
<li><a href="#pattern_visualization">Analyzing Individual Feature Patterns using Visualization</a></li>
<li><a href="#discriptive_statistics">Descriptive Statistical Analysis</a></li>
<li><a href="#basic_grouping">Basics of Grouping</a></li>
<li><a href="#correlation_causation">Correlation and Causation</a></li>
<li><a href="#anova">ANOVA</a></li>
</ol>
Estimated Time Needed: <strong>30 min</strong>
</div>
<hr>
<h3>What are the main characteristics which have the most impact on the car price?</h3>
<h2 id="import_data">1. Import Data from Module 2</h2>
<h4>Setup</h4>
Import libraries
```
import pandas as pd
import numpy as np
```
load data and store in dataframe df:
This dataset was hosted on IBM Cloud object click <a href="https://cocl.us/DA101EN_object_storage">HERE</a> for free storage
```
path='https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/automobileEDA.csv'
df = pd.read_csv(path)
df.head()
```
<h2 id="pattern_visualization">2. Analyzing Individual Feature Patterns using Visualization</h2>
To install seaborn we use the pip which is the python package manager.
```
%%capture
! pip install seaborn
```
Import visualization packages "Matplotlib" and "Seaborn", don't forget about "%matplotlib inline" to plot in a Jupyter notebook.
```
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
<h4>How to choose the right visualization method?</h4>
<p>When visualizing individual variables, it is important to first understand what type of variable you are dealing with. This will help us find the right visualization method for that variable.</p>
```
# list the data types for each column
print(df.dtypes)
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h3>Question #1:</h3>
<b>What is the data type of the column "peak-rpm"? </b>
</div>
Double-click <b>here</b> for the solution.
<!-- The answer is below:
float64
-->
for example, we can calculate the correlation between variables of type "int64" or "float64" using the method "corr":
```
df.corr()
```
The diagonal elements are always one; we will study correlation more precisely Pearson correlation in-depth at the end of the notebook.
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1> Question #2: </h1>
<p>Find the correlation between the following columns: bore, stroke,compression-ratio , and horsepower.</p>
<p>Hint: if you would like to select those columns use the following syntax: df[['bore','stroke' ,'compression-ratio','horsepower']]</p>
</div>
```
# Write your code below and press Shift+Enter to execute
df[['bore','stroke' ,'compression-ratio','horsepower']].corr()
```
Double-click <b>here</b> for the solution.
<!-- The answer is below:
df[['bore', 'stroke', 'compression-ratio', 'horsepower']].corr()
-->
<h2>Continuous numerical variables:</h2>
<p>Continuous numerical variables are variables that may contain any value within some range. Continuous numerical variables can have the type "int64" or "float64". A great way to visualize these variables is by using scatterplots with fitted lines.</p>
<p>In order to start understanding the (linear) relationship between an individual variable and the price. We can do this by using "regplot", which plots the scatterplot plus the fitted regression line for the data.</p>
Let's see several examples of different linear relationships:
<h4>Positive linear relationship</h4>
Let's find the scatterplot of "engine-size" and "price"
```
# Engine size as potential predictor variable of price
sns.regplot(x="engine-size", y="price", data=df)
plt.ylim(0,)
```
<p>As the engine-size goes up, the price goes up: this indicates a positive direct correlation between these two variables. Engine size seems like a pretty good predictor of price since the regression line is almost a perfect diagonal line.</p>
We can examine the correlation between 'engine-size' and 'price' and see it's approximately 0.87
```
df[["engine-size", "price"]].corr()
```
Highway mpg is a potential predictor variable of price
```
sns.regplot(x="highway-mpg", y="price", data=df)
```
<p>As the highway-mpg goes up, the price goes down: this indicates an inverse/negative relationship between these two variables. Highway mpg could potentially be a predictor of price.</p>
We can examine the correlation between 'highway-mpg' and 'price' and see it's approximately -0.704
```
df[['highway-mpg', 'price']].corr()
```
<h3>Weak Linear Relationship</h3>
Let's see if "Peak-rpm" as a predictor variable of "price".
```
sns.regplot(x="peak-rpm", y="price", data=df)
```
<p>Peak rpm does not seem like a good predictor of the price at all since the regression line is close to horizontal. Also, the data points are very scattered and far from the fitted line, showing lots of variability. Therefore it's it is not a reliable variable.</p>
We can examine the correlation between 'peak-rpm' and 'price' and see it's approximately -0.101616
```
df[['peak-rpm','price']].corr()
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1> Question 3 a): </h1>
<p>Find the correlation between x="stroke", y="price".</p>
<p>Hint: if you would like to select those columns use the following syntax: df[["stroke","price"]] </p>
</div>
```
# Write your code below and press Shift+Enter to execute
df[['stroke','price']].corr()
```
Double-click <b>here</b> for the solution.
<!-- The answer is below:
#The correlation is 0.0823, the non-diagonal elements of the table.
#code:
df[["stroke","price"]].corr()
-->
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1>Question 3 b):</h1>
<p>Given the correlation results between "price" and "stroke" do you expect a linear relationship?</p>
<p>Verify your results using the function "regplot()".</p>
</div>
```
# Write your code below and press Shift+Enter to execute
sns.regplot(x='stroke', y="price", data=df)
```
Double-click <b>here</b> for the solution.
<!-- The answer is below:
#There is a weak correlation between the variable 'stroke' and 'price.' as such regression will not work well. We #can see this use "regplot" to demonstrate this.
#Code:
sns.regplot(x="stroke", y="price", data=df)
-->
<h3>Categorical variables</h3>
<p>These are variables that describe a 'characteristic' of a data unit, and are selected from a small group of categories. The categorical variables can have the type "object" or "int64". A good way to visualize categorical variables is by using boxplots.</p>
Let's look at the relationship between "body-style" and "price".
```
sns.boxplot(x="body-style", y="price", data=df)
```
<p>We see that the distributions of price between the different body-style categories have a significant overlap, and so body-style would not be a good predictor of price. Let's examine engine "engine-location" and "price":</p>
```
sns.boxplot(x="engine-location", y="price", data=df)
```
<p>Here we see that the distribution of price between these two engine-location categories, front and rear, are distinct enough to take engine-location as a potential good predictor of price.</p>
Let's examine "drive-wheels" and "price".
```
# drive-wheels
sns.boxplot(x="drive-wheels", y="price", data=df)
```
<p>Here we see that the distribution of price between the different drive-wheels categories differs; as such drive-wheels could potentially be a predictor of price.</p>
<h2 id="discriptive_statistics">3. Descriptive Statistical Analysis</h2>
<p>Let's first take a look at the variables by utilizing a description method.</p>
<p>The <b>describe</b> function automatically computes basic statistics for all continuous variables. Any NaN values are automatically skipped in these statistics.</p>
This will show:
<ul>
<li>the count of that variable</li>
<li>the mean</li>
<li>the standard deviation (std)</li>
<li>the minimum value</li>
<li>the IQR (Interquartile Range: 25%, 50% and 75%)</li>
<li>the maximum value</li>
<ul>
We can apply the method "describe" as follows:
```
df.describe()
```
The default setting of "describe" skips variables of type object. We can apply the method "describe" on the variables of type 'object' as follows:
```
df.describe(include=['object'])
```
<h3>Value Counts</h3>
<p>Value-counts is a good way of understanding how many units of each characteristic/variable we have. We can apply the "value_counts" method on the column 'drive-wheels'. Don’t forget the method "value_counts" only works on Pandas series, not Pandas Dataframes. As a result, we only include one bracket "df['drive-wheels']" not two brackets "df[['drive-wheels']]".</p>
```
df['drive-wheels'].value_counts()
```
We can convert the series to a Dataframe as follows :
```
df['drive-wheels'].value_counts().to_frame()
```
Let's repeat the above steps but save the results to the dataframe "drive_wheels_counts" and rename the column 'drive-wheels' to 'value_counts'.
```
drive_wheels_counts = df['drive-wheels'].value_counts().to_frame()
drive_wheels_counts.rename(columns={'drive-wheels': 'value_counts'}, inplace=True)
drive_wheels_counts
```
Now let's rename the index to 'drive-wheels':
```
drive_wheels_counts.index.name = 'drive-wheels'
drive_wheels_counts
```
We can repeat the above process for the variable 'engine-location'.
```
# engine-location as variable
engine_loc_counts = df['engine-location'].value_counts().to_frame()
engine_loc_counts.rename(columns={'engine-location': 'value_counts'}, inplace=True)
engine_loc_counts.index.name = 'engine-location'
engine_loc_counts.head(10)
```
<p>Examining the value counts of the engine location would not be a good predictor variable for the price. This is because we only have three cars with a rear engine and 198 with an engine in the front, this result is skewed. Thus, we are not able to draw any conclusions about the engine location.</p>
<h2 id="basic_grouping">4. Basics of Grouping</h2>
<p>The "groupby" method groups data by different categories. The data is grouped based on one or several variables and analysis is performed on the individual groups.</p>
<p>For example, let's group by the variable "drive-wheels". We see that there are 3 different categories of drive wheels.</p>
```
df['drive-wheels'].unique()
```
<p>If we want to know, on average, which type of drive wheel is most valuable, we can group "drive-wheels" and then average them.</p>
<p>We can select the columns 'drive-wheels', 'body-style' and 'price', then assign it to the variable "df_group_one".</p>
```
df_group_one = df[['drive-wheels','body-style','price']]
```
We can then calculate the average price for each of the different categories of data.
```
# grouping results
df_group_one = df_group_one.groupby(['drive-wheels'],as_index=False).mean()
df_group_one
```
<p>From our data, it seems rear-wheel drive vehicles are, on average, the most expensive, while 4-wheel and front-wheel are approximately the same in price.</p>
<p>You can also group with multiple variables. For example, let's group by both 'drive-wheels' and 'body-style'. This groups the dataframe by the unique combinations 'drive-wheels' and 'body-style'. We can store the results in the variable 'grouped_test1'.</p>
```
# grouping results
df_gptest = df[['drive-wheels','body-style','price']]
grouped_test1 = df_gptest.groupby(['drive-wheels','body-style'],as_index=False).mean()
grouped_test1
```
<p>This grouped data is much easier to visualize when it is made into a pivot table. A pivot table is like an Excel spreadsheet, with one variable along the column and another along the row. We can convert the dataframe to a pivot table using the method "pivot " to create a pivot table from the groups.</p>
<p>In this case, we will leave the drive-wheel variable as the rows of the table, and pivot body-style to become the columns of the table:</p>
```
grouped_pivot = grouped_test1.pivot(index='drive-wheels',columns='body-style')
grouped_pivot
```
<p>Often, we won't have data for some of the pivot cells. We can fill these missing cells with the value 0, but any other value could potentially be used as well. It should be mentioned that missing data is quite a complex subject and is an entire course on its own.</p>
```
grouped_pivot = grouped_pivot.fillna(0) #fill missing values with 0
grouped_pivot
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1>Question 4:</h1>
<p>Use the "groupby" function to find the average "price" of each car based on "body-style" ? </p>
</div>
```
# Write your code below and press Shift+Enter to execute
grouped_test2 = df[['body-style','price']].groupby(['body-style'],as_index=False).mean()
grouped_test2
```
Double-click <b>here</b> for the solution.
<!-- The answer is below:
# grouping results
df_gptest2 = df[['body-style','price']]
grouped_test_bodystyle = df_gptest2.groupby(['body-style'],as_index= False).mean()
grouped_test_bodystyle
-->
If you did not import "pyplot" let's do it again.
```
import matplotlib.pyplot as plt
%matplotlib inline
```
<h4>Variables: Drive Wheels and Body Style vs Price</h4>
Let's use a heat map to visualize the relationship between Body Style vs Price.
```
#use the grouped results
plt.pcolor(grouped_pivot, cmap='RdBu')
plt.colorbar()
plt.show()
```
<p>The heatmap plots the target variable (price) proportional to colour with respect to the variables 'drive-wheel' and 'body-style' in the vertical and horizontal axis respectively. This allows us to visualize how the price is related to 'drive-wheel' and 'body-style'.</p>
<p>The default labels convey no useful information to us. Let's change that:</p>
```
fig, ax = plt.subplots()
im = ax.pcolor(grouped_pivot, cmap='RdBu')
#label names
row_labels = grouped_pivot.columns.levels[1]
col_labels = grouped_pivot.index
#move ticks and labels to the center
ax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False)
#insert labels
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(col_labels, minor=False)
#rotate label if too long
plt.xticks(rotation=90)
fig.colorbar(im)
plt.show()
```
<p>Visualization is very important in data science, and Python visualization packages provide great freedom. We will go more in-depth in a separate Python Visualizations course.</p>
<p>The main question we want to answer in this module, is "What are the main characteristics which have the most impact on the car price?".</p>
<p>To get a better measure of the important characteristics, we look at the correlation of these variables with the car price, in other words: how is the car price dependent on this variable?</p>
<h2 id="correlation_causation">5. Correlation and Causation</h2>
<p><b>Correlation</b>: a measure of the extent of interdependence between variables.</p>
<p><b>Causation</b>: the relationship between cause and effect between two variables.</p>
<p>It is important to know the difference between these two and that correlation does not imply causation. Determining correlation is much simpler the determining causation as causation may require independent experimentation.</p>
<p3>Pearson Correlation</p>
<p>The Pearson Correlation measures the linear dependence between two variables X and Y.</p>
<p>The resulting coefficient is a value between -1 and 1 inclusive, where:</p>
<ul>
<li><b>1</b>: Total positive linear correlation.</li>
<li><b>0</b>: No linear correlation, the two variables most likely do not affect each other.</li>
<li><b>-1</b>: Total negative linear correlation.</li>
</ul>
<p>Pearson Correlation is the default method of the function "corr". Like before we can calculate the Pearson Correlation of the of the 'int64' or 'float64' variables.</p>
```
df.corr()
```
sometimes we would like to know the significant of the correlation estimate.
<b>P-value</b>:
<p>What is this P-value? The P-value is the probability value that the correlation between these two variables is statistically significant. Normally, we choose a significance level of 0.05, which means that we are 95% confident that the correlation between the variables is significant.</p>
By convention, when the
<ul>
<li>p-value is $<$ 0.001: we say there is strong evidence that the correlation is significant.</li>
<li>the p-value is $<$ 0.05: there is moderate evidence that the correlation is significant.</li>
<li>the p-value is $<$ 0.1: there is weak evidence that the correlation is significant.</li>
<li>the p-value is $>$ 0.1: there is no evidence that the correlation is significant.</li>
</ul>
We can obtain this information using "stats" module in the "scipy" library.
```
from scipy import stats
```
<h3>Wheel-base vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'wheel-base' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between wheel-base and price is statistically significant, although the linear relationship isn't extremely strong (~0.585)</p>
<h3>Horsepower vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'horsepower' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between horsepower and price is statistically significant, and the linear relationship is quite strong (~0.809, close to 1)</p>
<h3>Length vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'length' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['length'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between length and price is statistically significant, and the linear relationship is moderately strong (~0.691).</p>
<h3>Width vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'width' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['width'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value )
```
##### Conclusion:
Since the p-value is < 0.001, the correlation between width and price is statistically significant, and the linear relationship is quite strong (~0.751).
### Curb-weight vs Price
Let's calculate the Pearson Correlation Coefficient and P-value of 'curb-weight' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['curb-weight'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between curb-weight and price is statistically significant, and the linear relationship is quite strong (~0.834).</p>
<h3>Engine-size vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'engine-size' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['engine-size'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between engine-size and price is statistically significant, and the linear relationship is very strong (~0.872).</p>
<h3>Bore vs Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'bore' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['bore'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between bore and price is statistically significant, but the linear relationship is only moderate (~0.521).</p>
We can relate the process for each 'City-mpg' and 'Highway-mpg':
<h3>City-mpg vs Price</h3>
```
pearson_coef, p_value = stats.pearsonr(df['city-mpg'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h5>Conclusion:</h5>
<p>Since the p-value is $<$ 0.001, the correlation between city-mpg and price is statistically significant, and the coefficient of ~ -0.687 shows that the relationship is negative and moderately strong.</p>
<h3>Highway-mpg vs Price</h3>
```
pearson_coef, p_value = stats.pearsonr(df['highway-mpg'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
```
##### Conclusion:
Since the p-value is < 0.001, the correlation between highway-mpg and price is statistically significant, and the coefficient of ~ -0.705 shows that the relationship is negative and moderately strong.
<h2 id="anova">6. ANOVA</h2>
<h3>ANOVA: Analysis of Variance</h3>
<p>The Analysis of Variance (ANOVA) is a statistical method used to test whether there are significant differences between the means of two or more groups. ANOVA returns two parameters:</p>
<p><b>F-test score</b>: ANOVA assumes the means of all groups are the same, calculates how much the actual means deviate from the assumption, and reports it as the F-test score. A larger score means there is a larger difference between the means.</p>
<p><b>P-value</b>: P-value tells how statistically significant is our calculated score value.</p>
<p>If our price variable is strongly correlated with the variable we are analyzing, expect ANOVA to return a sizeable F-test score and a small p-value.</p>
<h3>Drive Wheels</h3>
<p>Since ANOVA analyzes the difference between different groups of the same variable, the groupby function will come in handy. Because the ANOVA algorithm averages the data automatically, we do not need to take the average before hand.</p>
<p>Let's see if different types 'drive-wheels' impact 'price', we group the data.</p>
Let's see if different types 'drive-wheels' impact 'price', we group the data.
```
grouped_test2=df_gptest[['drive-wheels', 'price']].groupby(['drive-wheels'])
grouped_test2.head(2)
df_gptest
```
We can obtain the values of the method group using the method "get_group".
```
grouped_test2.get_group('4wd')['price']
```
we can use the function 'f_oneway' in the module 'stats' to obtain the <b>F-test score</b> and <b>P-value</b>.
```
# ANOVA
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'], grouped_test2.get_group('4wd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
This is a great result, with a large F test score showing a strong correlation and a P value of almost 0 implying almost certain statistical significance. But does this mean all three tested groups are all this highly correlated?
#### Separately: fwd and rwd
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val )
```
Let's examine the other groups
#### 4wd and rwd
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
<h4>4wd and fwd</h4>
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('fwd')['price'])
print("ANOVA results: F=", f_val, ", P =", p_val)
```
<h3>Conclusion: Important Variables</h3>
<p>We now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. We have narrowed it down to the following variables:</p>
Continuous numerical variables:
<ul>
<li>Length</li>
<li>Width</li>
<li>Curb-weight</li>
<li>Engine-size</li>
<li>Horsepower</li>
<li>City-mpg</li>
<li>Highway-mpg</li>
<li>Wheel-base</li>
<li>Bore</li>
</ul>
Categorical variables:
<ul>
<li>Drive-wheels</li>
</ul>
<p>As we now move into building machine learning models to automate our analysis, feeding the model with variables that meaningfully affect our target variable will improve our model's prediction performance.</p>
<h1>Thank you for completing this notebook</h1>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<p><a href="https://cocl.us/corsera_da0101en_notebook_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p>
</div>
<h3>About the Authors:</h3>
This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank">Mahdi Noorian PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a>, Bahare Talayian, Eric Xiao, Steven Dong, Parizad, Hima Vsudevan and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank">Fiorella Wenver</a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>.
<p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
<hr>
<p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| github_jupyter |
```
from sklearn.datasets import make_classification, make_moons, load_iris, make_circles
from sklearn.decomposition import PCA, KernelPCA
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
import utils
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
```
# Example: Arbitrary classification
```
X, y = make_classification(n_informative=2, random_state=11)
print(X.shape)
```
## Training a simple logistic regression model
```
xtrain, xtest, ytrain, ytest = train_test_split(X, y, random_state=2)
lr = LogisticRegression()
lr.fit(xtrain, ytrain)
prediction = lr.predict(xtest)
print("F1 score: %f" % f1_score(ytest, prediction))
```
## Reducing dimensionality with PCA
```
pca = PCA(n_components=2)
x_red = pca.fit_transform(X)
print(x_red.shape)
```
## Training on reduced dimensions
```
xtrain, xtest, ytrain, ytest = train_test_split(x_red, y, random_state=2)
lr = LogisticRegression()
lr.fit(xtrain, ytrain)
prediction = lr.predict(xtest)
print("F1 score: %f" % f1_score(ytest, prediction))
plt.figure(figsize=(8, 6))
plt.scatter(x_red[:, 0], x_red[:, 1], c=y)
```
## Exercise:
## 1. Plot top 2 principal components of the iris dataset (already provided below)
## 2. Use LinearSVC to train on full iris dataset and on PCA of iris dataset, check the difference in F1 score.
```
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
# enter code here
```
## How to select the number of principal components?
### The ratio of explained variance
```
X, y = make_classification(n_redundant=10, random_state=11)
pca = PCA(n_components=X.shape[1])
pca.fit(X)
varRatio = np.cumsum(pca.explained_variance_ratio_)
plt.figure(figsize=(8, 6))
plt.plot(np.arange(1, X.shape[1] + 1), varRatio)
_ = plt.xticks(np.arange(1, X.shape[1] + 1))
```
### ≈ "Rank" of the matrix
```
np.linalg.matrix_rank(X)
```
## Exercise: Find the rank of the following matrix, and find the number of informative principal components
```
X = utils.make_varratio_exercise()
# enter code here
```
# Univariate Feature Selection
## Removing features with Low Variance
```
X = np.random.normal(size=(1000, 50))
plt.figure(figsize=(8, 6))
_ = plt.hist(X.var(0))
from sklearn.feature_selection import VarianceThreshold
vt = VarianceThreshold(threshold=1.0)
xThresh = vt.fit_transform(X)
plt.figure(figsize=(8, 6))
_ = plt.hist(xThresh.var(0))
```
## Which features were kept?
```
np.arange(X.shape[1])[vt.get_support()]
```
## Selecting the `k` "best" features from a dataset
### Perform a test that measures the statistical significance of every feature wrt target, and select the `k` highest such features
### Types of significance tests:
* <h4>$\chi^2$ test</h4>
* <h4>Mutual information</h4>
* <h4>ANOVA F-value</h4>
* <h4>$p$-value of correlation coefficients</h4>
### See [this](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html#sklearn.feature_selection.SelectKBest) for more
```
X = iris.data
y = iris.target
lr = LogisticRegression()
lr.fit(X, y)
print("f1 score:", f1_score(y, lr.predict(X), average='weighted'))
selector = SelectKBest(chi2, k=2)
xNew = selector.fit_transform(X, y)
print(xNew.shape)
lr.fit(xNew, y)
print("f1 score:", f1_score(y, lr.predict(xNew), average='weighted'))
```
## Exercise
* ### Fit a default Logistic regression on the following dataset and report the f1 score
* ### For each possible value of $k$, fit a logistic regression and find the f1 score.
* ### Plot the scores vs. number of dimensions selected
* ### Hint: use the `f_classif` selector instead of `chi2`
```
X, y = make_classification(n_informative=3, n_redundant=10, random_state=5)
print(X.shape)
# enter code here
```
| github_jupyter |
# Imports
```
import warnings
warnings.filterwarnings(action='ignore')
import tensorflow as tf
from tensorflow import keras
import sklearn
from sklearn.metrics import roc_curve, auc, log_loss, precision_score, f1_score, recall_score, confusion_matrix
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib as mplb
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import pandas as pd
import seaborn as sns
import os
import zipfile
import shutil
import getpass
import requests
from IPython.display import clear_output
from tqdm.notebook import tqdm
import datetime
%load_ext tensorboard
print(f'[INFO] Using tensorflow-gpu {tf.__version__}')
```
# Config
```
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
seed_val = 2020
# set seed
np.random.seed(seed=seed_val)
tf.random.set_seed(seed=seed_val)
```
# Params
```
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 64
class_names = ['NEG', 'POS']
base_dir = '../'
train_images_dir = os.path.join(base_dir, 'Datasets/Images', 'train')
val_images_dir = os.path.join(base_dir, 'Datasets/Images', 'val')
test_images_dir = os.path.join(base_dir, 'Datasets/Images', 'test')
train_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Train.csv')
test_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Test.csv')
sample_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Train.csv')
train_df = pd.read_csv(train_csv_path)
test_df = pd.read_csv(test_csv_path)
sample_sub_df = pd.read_csv(sample_csv_path)
train_df.head()
test_df.head()
sample_sub_df.tail()
```
# Datasets & Dataloaders
```
image_generator = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
preprocessing_function=keras.applications.efficientnet.preprocess_input,
rotation_range=33,
brightness_range=[0.3, 1.0],
zoom_range=0.3,
fill_mode='nearest',
horizontal_flip=True,
vertical_flip=True,
#rescale=1./255.0,
validation_split=0.25)
train_generator = image_generator.flow_from_directory(directory=train_images_dir+'/train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=seed_val,
subset='training')
validation_generator = image_generator.flow_from_directory(directory=train_images_dir+'/train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=seed_val,
subset='validation')
for imgs, labels in train_generator:
print(f"First image shape : {imgs[0].shape}, label : {labels[0]}")
break
```
# Visualization
```
def show_training_sample(batch_size=BATCH_SIZE):
imgs, labs = next(iter(train_generator))
plt.figure(figsize=(22, 18))
for i in range(min(25, batch_size)):
l, c = 5, 5
img = imgs[i]
label = class_names[tf.argmax(labs[i])]
ax = plt.subplot(l, c, i+1)
plt.imshow(img)
plt.title(label)
plt.axis("off")
```
show_training_sample()
```
arch_name = "EfficientNetB4"
base_arch = getattr(tf.keras.applications, arch_name)
base_model = base_arch(include_top=False, input_shape=IMG_SHAPE)
# freeze trained layers
for layer in base_model.layers:
layer.trainable = False
def build_model(fc_size=2, n_dense_units=512):
inputs = inputs = keras.Input(shape=IMG_SHAPE)
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(units=n_dense_units, activation='relu')(x)
x = keras.layers.Dropout(0.3)(x)
if fc_size > 1:
predictions = keras.layers.Dense(units=fc_size, activation="softmax")(x)
else:
predictions = keras.layers.Dense(units=1, activation="sigmoid")(x)
model = keras.Model(inputs = inputs, outputs=predictions)
return model
model = build_model(fc_size=2, n_dense_units=1024)
model.summary()
```
# Training phase
```
# training params
# optimizer
lr = 2e-5
optimizer = keras.optimizers.Adam(learning_rate=lr)
# loss
loss_fn = keras.losses.CategoricalCrossentropy()
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['AUC'])
num_epochs = 50
optim_name = optimizer.get_config()['name']
model_name = f'tf_model_x_rays_based_on_{arch_name}_and_{optim_name}.h5'
model_path = os.path.join(base_dir, 'Models', model_name)
# CALLBACKS
auc_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path,
verbose=1,
monitor='val_auc',
mode='max',
save_best_only=True)
acc_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path,
verbose=1,
mode='max',
monitor='val_accuracy',
save_best_only=True)
loss_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path,
verbose=1,
mode='min',
monitor='val_loss',
save_best_only=True)
es = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=20,
verbose=1,
restore_best_weights=True)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_auc',
factor=0.1,
patience=10,
verbose=1,
mode='max',
min_lr=lr)
LOGDIR = os.path.join(base_dir, "logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = keras.callbacks.TensorBoard(LOGDIR, histogram_freq=1)
# bot config
#bot_callback = botCallback(access_token=access_token)
#plotter = Plotter(access_token)
CALLBACKS = [auc_ckpt, loss_ckpt, es, reduce_lr, tensorboard_callback] #bot_callback, plotter]
print(LOGDIR)
%tensorboard --logdir {LOGDIR}
h = model.fit(train_generator,
validation_data=validation_generator,
epochs=num_epochs,
steps_per_epoch=train_generator.n // BATCH_SIZE,
validation_steps=validation_generator.n // BATCH_SIZE,
callbacks=CALLBACKS)
```
# Results
```
y_hat = model.predict(validation_generator)
y_hat = tf.argmax(y_hat, axis=1).numpy()
y_true = validation_generator.classes
y_true.shape, y_hat.shape
# preds = model(validation_generator)
fpr, tpr, thresholds = roc_curve(y_true, y_hat, pos_label=1)
print(f'[INFO] False positive rate : {fpr}')
print(f'[INFO] True positive rate : {tpr}')
print(f'[INFO] Thresholds : {thresholds}')
metric = auc(x=fpr, y=tpr)
plt.figure(figsize=(20, 8))
plt.plot(fpr, tpr, label=f"AUC score = {metric}")
plt.legend(fontsize = 14)
plt.xlabel('False positive rate', fontsize = 18)
plt.ylabel('True positive rate', fontsize = 18)
plt.xlim(0,1)
plt.ylim(0,1)
plt.title('ROC Curve')
plt.show()
```
# Scores CV/LB
# Predictions
```
def load_models(cv_models_path = os.path.join(base_dir, 'Models', 'CV_models'), optim_name="Adam"):
models = []
n_folds = 5
try:
for fold_num in range(1, n_folds+1):
m = keras.models.load_model(os.path.join(cv_models_path, f"tf_xrays_model_based_on_{arch_name}_and_{optim_name}_fold_{fold_num}.h5"))
m.trainable = False
models.append(m)
except :
model.trainable = False
models.append(model)
return models
models = load_models(optim_name=optim_name)
len(models)
def test_step(models):
images_test = []
predictions = []
for im in tqdm(os.listdir(os.path.join(test_images_dir, 'test')), desc=f"Predicting on test images "):
images_test.append(im.split('.')[0])
x = keras.preprocessing.image.load_img(os.path.join(test_images_dir, 'test', im), target_size=(IMG_SIZE, IMG_SIZE))
x = keras.preprocessing.image.img_to_array(x)
x = keras.applications.efficientnet.preprocess_input(x)
tmp_preds = []
for model in models:
pred = model.predict(x.reshape(-1, IMG_SIZE, IMG_SIZE, 3))[0][1]# get column 1 of prediction
tmp_preds.append(pred)
predictions.append(np.array(tmp_preds).mean())
return images_test, predictions
images_test, predictions = test_step(models = [model])
assert len(predictions) == len(images_test)
my_file = pd.DataFrame({
'ID': images_test,
'LABEL':predictions
})
my_file
file_name = f"tf_xrays_based_on_{arch_name}_bs_{BATCH_SIZE}_opt_{optim_name}_lr_{lr}_ep_{num_epochs}.csv"
my_file.to_csv(os.path.join(base_dir, 'Submissions', file_name), index=False)
print(f"[INFO] Saved file as {file_name}")
```
| github_jupyter |
# 1_Running_EnergyPlus_using_the_Command_Prompt
## The command prompt in Windows via a Jupyter Notebook
Just use an '!'
So to print the current working directory...
```
!cd
```
... this is equivalent to opening the 'Command Prompt' window and typing 'cd'.
It's also possible to use variables here, by putting a '$' before them. So the code below is equivalent to `!cd` ...
```
my_variable='cd'
!$my_variable
```
## Using the command prompt with EnergyPlus
### Checking we can access the EnergyPlus .exe
Print the EnergyPlus version...
```
!C:\EnergyPlusV8-9-0\EnergyPlus -v
```
### Printing the EnergyPlus.exe help
```
!C:\EnergyPlusV8-9-0\EnergyPlus -h
```
### Running an EnergyPlus simulation
```
import os
output_directory=os.path.abspath('sim')
idf_arg=os.path.join(output_directory,
'1ZoneUncontrolled.idf'
)
weather_arg=r'-w C:\EnergyPlusV8-9-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw'
output_directory_arg='-d ' + output_directory
st=' '.join(['-x',
'-r',
'-c',
output_directory_arg,
weather_arg,
idf_arg])
print(st)
!C:\EnergyPlusV8-9-0\EnergyPlus $st
```
### Or as a function...
```
import os
def run_energyplus(epexe_fp,
out_fp,
idf_fp,
epw_fp,
output_prefix='eplus'
):
"""Runs the EnergyPlus software
Arguments:
- epexe_fp (str): the absolute filepath of the 'energyplus.exe' file - excluding the extension
- out_fp (str): the absolute filepath of the output folder
- idf_fp (str): the absolute filepath of the idf file - including the extension
- epw_fp (str): the absolute filepath of the epw file - including the extension
- output_prefix (str) : the prefix to the output file names
Note: ReadVarsESO will only work if the idf file is placed in the output folder
"""
#CREATES THE 'OUT' FOLDER IF IT DOESN'T EXIST
if not os.path.isdir(out_fp):
os.mkdir(out_fp)
#DELETES THE 'eplusout.expidf' FILE IN 'out_fp' IF IT'S PRESENT
# this is needed to force the recreation of this file...
expidf_fp=os.path.join(out_fp,output_prefix+'out.expidf')
if os.path.isfile(expidf_fp):
os.remove(expidf_fp)
#RUN ENERGYPLUS VIA COMMAND PROMPT
!$epexe_fp -x -r -c -d $out_fp -p $output_prefix -w $epw_fp $idf_fp
return
run_energyplus
```
### Running an EnergyPlus simulation using the 'run_energyplus' function
```
epexe_fp=r'C:\EnergyPlusV8-9-0\EnergyPlus'
out_fp=os.path.abspath('sim')
idf_fp=os.path.join(out_fp,
'1ZoneUncontrolled.idf'
)
epw_fp=r'C:\EnergyPlusV8-9-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw'
run_energyplus(epexe_fp,
out_fp,
idf_fp,
epw_fp
)
```
| github_jupyter |
Copyright 2020 Montvieux Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
import PIL.Image
from IPython.display import display,clear_output,HTML
from IPython.display import Image as DisplayImage
import base64
import json
from io import StringIO
import ipywidgets as widgets
import sys
import time
import datetime
import imageio
import numpy as np
import io
import os
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.evaluation import evaluate_policy
from plark_game import classes
from gym_plark.envs import plark_env,plark_env_guided_reward,plark_env_top_left
from stable_baselines import DQN, PPO2, A2C, ACKTR
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import DummyVecEnv
import helper
import self_play
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
### Save model location
```
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
basepath = '/data/agents/models'
exp_name = 'test_' + basicdate
exp_path = os.path.join(basepath, exp_name)
print(exp_path)
```
# Use the self play training loop - short run time example
```
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
basepath = '/data/agents/models'
exp_name = 'test_' + basicdate
exp_path = os.path.join(basepath, exp_name)
print(exp_path)
video_path,basewidth,hsize = self_play.run_self_play(exp_name,exp_path,basicdate,
pelican_testing_interval=100,pelican_max_initial_learning_steps=1000,
panther_testing_interval=100,panther_max_initial_learning_steps=1000,
self_play_testing_interval=100,self_play_max_learning_steps_per_agent=1000,self_play_iterations=10,
model_type='dqn',log_to_tb=False,image_based=False
)
# video_path = '/data/agents/models/test_20200325_184254/test_self_play.mp4'
# basewidth = 310
# hsize = 250
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" width="'''+str(basewidth)+'''" height="'''+str(hsize)+'''" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
```
# Use the self play training loop - Longer running example
```
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
basepath = '/data/agents/models'
exp_name = 'test_' + basicdate
exp_path = os.path.join(basepath, exp_name)
print(exp_path)
video_path,basewidth,hsize = self_play.run_self_play(exp_name,exp_path,basicdate,
pelican_testing_interval=10000,pelican_max_initial_learning_steps=100000,
panther_testing_interval=10000,panther_max_initial_learning_steps=100000,
self_play_testing_interval=10000,self_play_max_learning_steps_per_agent=100000,self_play_iterations=1000,
model_type='dqn',log_to_tb=False,image_based=False
)
# video_path = '/data/agents/models/test_20200325_184254/test_self_play.mp4'
# basewidth = 310
# hsize = 250
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" width="'''+str(basewidth)+'''" height="'''+str(hsize)+'''" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
```
# Make video of previously trained agents
```
# Please update the paths below to match a trained agent.
panther_agent_filepath = '/data/agents/models/test_20210209_213638/ppo2_20210209_213638_panther/'
image_based = False
pelican_env = plark_env.PlarkEnv(driving_agent='pelican',panther_agent_filepath=panther_agent_filepath,config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',image_based=image_based)
pelican_load_path = '/data/agents/models/test_20210209_213638/ppo2_20210209_213638_pelican/ppo2_20210209_213638_pelican.zip'
pelican_model = PPO2.load(pelican_load_path)
#INFO:helper:model_path: /data/agents/models/test_20210209_213638/ppo2_20210209_213638_panther/ppo2_20210209_213638_panther.zip
#NFO:helper:model_path: /data/agents/models/test_20210209_213638/ppo2_20210209_213638_pelican/ppo2_20210209_213638_pelican.zip
video_path = '/data/test_video/'
os.makedirs(video_path, exist_ok=True)
video_file_path = os.path.join(video_path, 'test_self_play.mp4')
basewidth,hsize = helper.make_video(pelican_model,pelican_env,video_file_path,verbose=True,n_steps = 100000)
video = io.open(video_file_path, 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" width="'''+str(basewidth)+'''" height="'''+str(hsize)+'''" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
```
| github_jupyter |
## Set up imports
```
# install PRAW and newspaper3k
!pip install praw
!pip3 install newspaper3k
import pandas as pd
import praw
import os
import requests
from bs4 import BeautifulSoup
import re
import pickle
from newspaper import Article
import spacy
from collections import Counter
from datetime import datetime
```
## Set up various objects
```
def lowerify(text):
# fix up geolocation dataframe a little
return text.lower()
# set up cities/states locations datafrane
locs_path = 'https://raw.githubusercontent.com/Lambda-School-Labs/Labs25-Human_Rights_First-TeamB-DS/main/project/cities_states.csv'
locs_df = pd.read_csv(locs_path)
locs_df = locs_df.drop(columns=['Unnamed: 0', 'country'])
locs_df['city_ascii'] = locs_df['city_ascii'].apply(lowerify)
locs_df['admin_name'] = locs_df['admin_name'].apply(lowerify)
# state to city lookup table
states_map = {}
for state in list(locs_df.admin_name.unique()):
states_map[state] = locs_df[locs_df['admin_name'] == state]['city_ascii'].to_list()
# police brutality indentifying nlp
# make sure to import model.pkl
model_file = open('model.pkl', 'rb')
pipeline = pickle.load(model_file)
model_file.close()
# spacy nlp model
nlp = spacy.load('en_core_web_sm')
# Set up PRAW
# PRAW credentials go here
```
## Run the update and see what's returned
```
# Grab data from reddit
data = []
print('Pulling data from Reddit...')
for submission in reddit.subreddit("news").top('week', limit=500):
data.append([
submission.id, submission.title, submission.url
])
# construct a dataframe with the data
col_names = ['id', 'title', 'url']
df = pd.DataFrame(data, columns=col_names)
print(f'Number of entries initially pulled: {df.shape[0]}\n')
# pull the text from each article itself using newspaper3k
content_list = []
date_list = []
# go through each URL and use newspaper3k to extract data
print('Extracting data via newspaper3k...')
for id_url in df['url']:
# use newspaper3k to extract text
article = Article(id_url)
article.download()
# if the article doesn't download, the error is thrown in parse()
try:
article.parse()
except:
# add null values to show no connection
content_list.append(None)
date_list.append(None)
continue
content_list.append(article.text)
# this will be null if newspaper3k can't find it
date_list.append(article.publish_date)
df['text'] = content_list
df['date'] = date_list
print('Number of entries with missing data:')
print(df.isnull().sum(),'\n')
# drop any articles with missing data columns
df = df.dropna()
df = df.reset_index()
df = df.drop(columns='index')
print(f'Resulting entry count: {df.shape[0]}\n')
# convert date column to pandas Timestamps
def timestampify(date):
return pd.Timestamp(date, unit='s').isoformat()
df['date'] = df['date'].apply(timestampify)
print('Filtering through police brutality filter...')
# use NLP model to filter posts
df['is_police_brutality'] = pipeline.predict(df['title'])
df = df[df['is_police_brutality'] == 1]
df = df.drop(columns='is_police_brutality')
print(f'Number of entries determined to be about police brutality: {df.shape[0]}')
# use spaCy to extract location tokens
tokens_list = []
print('Tokenizing through spaCy...')
for text in df['text']:
doc = nlp(text)
ents = [e.text.lower() for e in doc.ents if e.label_ == 'GPE']
tokens_list.append(ents)
df['tokens'] = tokens_list
# figure out which city and state the article takes place in
city_list = []
state_list = []
geo_list = []
print('Compiling geolocation data...')
for tokens in df['tokens']:
# set up Counter
c = Counter(tokens)
# set up geolocation dict for geo list
geo_entry = {'lat': None, 'long': None}
# count which states come back the most, if any
state_counts = {}
for state in states_map:
if c[state] > 0:
state_counts[state] = c[state]
# get state(s) that came back the most as dict with lists
max_count = 0
max_state = None
for state in state_counts:
if state_counts[state] > max_count:
max_count = state_counts[state]
max_state = {state: {}}
elif state_counts[state] == max_count:
max_state[state] = {}
# if no state is found
if max_state is None:
city_list.append(None)
state_list.append(None)
geo_list.append(geo_entry)
continue
max_city = None
# get any cities in tokens based on states
for state in max_state: # ideally this should only run once
city_counts = {}
for city in states_map[state]:
if c[city] > 0:
city_counts[city] = c[city]
max_state[state] = city_counts
# get the city/state combo that came back the most
max_count = 0
for city in city_counts:
if city_counts[city] > max_count:
max_count = city_counts[city]
max_city = (city, state)
# if no city is found
if max_city is None:
city_list.append(None)
state_list.append(None)
geo_list.append(geo_entry)
continue
# the city and state should be known now
city_list.append(max_city[0].title())
state_list.append(max_city[1].title())
# now get the geolocation data
row = locs_df[(
(locs_df['city_ascii'] == max_city[0]) &
(locs_df['admin_name'] == max_city[1])
)]
row = row.reset_index()
if row.empty:
pass
else:
geo_entry['lat'] = row['lat'][0]
geo_entry['long'] = row['lng'][0]
geo_list.append(geo_entry)
# loop ends, add cities and states onto dataframe
df['city'] = city_list
df['state'] = state_list
df['geocoding'] = geo_list
print('Number of entries where geolocation data could not be found:')
print(df.isnull().sum(),'\n')
# drop any columns with null entries for location
df = df.dropna()
df = df.reset_index()
df = df.drop(columns='index')
# cleanup to match 846 api
def listify(text):
return [text]
df['links'] = df['url'].apply(listify)
df['description'] = df['text']
df = df.drop(columns=['tokens', 'text'])
df = df[[
'id', 'state', 'city',
'date', 'title', 'description',
'links', 'geocoding'
]]
print(f'Final number of entries: {df.shape[0]}')
df.head()
```
| github_jupyter |
```
%matplotlib inline
```
********************************************************************************
3D Attitude Estimation - Benchmark
********************************************************************************
Goals of this script:
* implement two different UKFs on the 3D attitude estimation example.
* design the Extended Kalman Filter (EKF).
* compare the different algorithms with Monte-Carlo simulations.
*We assume the reader is already familiar with the considered problem described
in the related example.*
For the given problem, two different UKFs emerge, defined respectively as:
1- The state is embedded in $SO(3)$ with left multiplication, i.e.
* the retraction $\varphi(.,.)$ is the $SO(3)$ exponential where
uncertainty is multiplied on the left by the state.
* the inverse retraction $\varphi^{-1}(.,.)$ is the $SO(3)$
logarithm.
2- The state is embedded in $SO(3)$ with right multiplication, i.e.
* the retraction $\varphi(.,.)$ is the $SO(3)$ exponential where
uncertainty is multiplied on the right by the state.
* the inverse retraction $\varphi^{-1}(.,.)$ is the $SO(3)$
logarithm.
We tests the different algorithms with the same noise parameter setting and on
simulation with moderate initial heading error.
Import
==============================================================================
```
from scipy.linalg import block_diag
from ukfm import SO3, UKF, EKF
from ukfm import ATTITUDE as MODEL
import ukfm
import numpy as np
import matplotlib
ukfm.set_matplotlib_config()
```
Simulation Setting
==============================================================================
We compare the filters on a large number of Monte-Carlo runs.
```
# Monte-Carlo runs
N_mc = 100
```
This script uses the :meth:`~ukfm.ATTITUDE` model. The initial values of the
heading error has 10° standard deviation.
```
# sequence time (s)
T = 100
# IMU frequency (Hz)
imu_freq = 100
# IMU noise standard deviation (noise is isotropic)
imu_std = np.array([5/180*np.pi, # gyro (rad/s)
0.4, # accelerometer (m/s**2)
0.3]) # magnetometer
# create the model
model = MODEL(T, imu_freq)
# propagation noise covariance matrix
Q = imu_std[0]**2*np.eye(3)
# measurement noise covariance matrix
R = block_diag(imu_std[1]**2*np.eye(3), imu_std[2]**2*np.eye(3))
# initial uncertainty matrix
P0 = (10/180*np.pi)**2*np.eye(3) # The state is perfectly initialized
# sigma point parameters
alpha = np.array([1e-3, 1e-3, 1e-3])
```
Filter Design
==============================================================================
Additionally to the UKFs, we compare them to an EKF. The EKF has the same
uncertainty representation as the UKF with right uncertainty representation.
We set variables for recording metrics before launching Monte-Carlo
simulations.
```
left_ukf_err = np.zeros((N_mc, model.N, 3))
right_ukf_err = np.zeros_like(left_ukf_err)
ekf_err = np.zeros_like(left_ukf_err)
left_ukf_nees = np.zeros((N_mc, model.N))
right_ukf_nees = np.zeros_like(left_ukf_nees)
ekf_nees = np.zeros_like(left_ukf_nees)
```
Monte-Carlo Runs
==============================================================================
We run the Monte-Carlo through a for loop.
```
for n_mc in range(N_mc):
print("Monte-Carlo iteration(s): " + str(n_mc+1) + "/" + str(N_mc))
# simulate true states and noisy inputs
states, omegas = model.simu_f(imu_std)
# simulate accelerometer and magnetometer measurements
ys = model.simu_h(states, imu_std)
# initial state with error
state0 = model.STATE(Rot=states[0].Rot.dot(
SO3.exp(10/180*np.pi*np.random.randn(3))))
# covariance need to be "turned"
left_ukf_P = state0.Rot.dot(P0).dot(state0.Rot.T)
right_ukf_P = P0
ekf_P = P0
# variables for recording estimates of the Monte-Carlo run
left_ukf_states = [state0]
right_ukf_states = [state0]
ekf_states = [state0]
left_ukf_Ps = np.zeros((model.N, 3, 3))
right_ukf_Ps = np.zeros_like(left_ukf_Ps)
ekf_Ps = np.zeros_like(left_ukf_Ps)
left_ukf_Ps[0] = left_ukf_P
right_ukf_Ps[0] = right_ukf_P
ekf_Ps[0] = ekf_P
left_ukf = UKF(state0=states[0], P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.phi,
phi_inv=model.phi_inv,
alpha=alpha)
right_ukf = UKF(state0=states[0], P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.right_phi,
phi_inv=model.right_phi_inv,
alpha=alpha)
ekf = EKF(model=model, state0=states[0], P0=P0, Q=Q, R=R,
FG_ana=model.ekf_FG_ana,
H_ana=model.ekf_H_ana,
phi=model.right_phi)
# filtering loop
for n in range(1, model.N):
# propagation
left_ukf.propagation(omegas[n-1], model.dt)
right_ukf.propagation(omegas[n-1], model.dt)
ekf.propagation(omegas[n-1], model.dt)
# update
left_ukf.update(ys[n])
right_ukf.update(ys[n])
ekf.update(ys[n])
# save estimates
left_ukf_states.append(left_ukf.state)
right_ukf_states.append(right_ukf.state)
ekf_states.append(ekf.state)
left_ukf_Ps[n] = left_ukf.P
right_ukf_Ps[n] = right_ukf.P
ekf_Ps[n] = ekf.P
# get state
Rots, _ = model.get_states(states, model.N)
left_ukf_Rots, _ = model.get_states(left_ukf_states, model.N)
right_ukf_Rots, _ = model.get_states(right_ukf_states, model.N)
ekf_Rots, _ = model.get_states(ekf_states, model.N)
# record errors
left_ukf_err[n_mc] = model.errors(Rots, left_ukf_Rots)
right_ukf_err[n_mc] = model.errors(Rots, right_ukf_Rots)
ekf_err[n_mc] = model.errors(Rots, ekf_Rots)
# record NEES
left_ukf_nees[n_mc] = model.nees(left_ukf_err[n_mc], left_ukf_Ps,
left_ukf_Rots, 'LEFT')
right_ukf_nees[n_mc] = model.nees(right_ukf_err[n_mc], right_ukf_Ps,
right_ukf_Rots, 'RIGHT')
ekf_nees[n_mc] = model.nees(ekf_err[n_mc], ekf_Ps, ekf_Rots, 'RIGHT')
```
Results
==============================================================================
We visualize the results averaged over Monte-Carlo sequences, and compute the
Root Mean Squared Error (RMSE) averaged over all Monte-Carlo.
```
model.benchmark_print(left_ukf_err, right_ukf_err, ekf_err)
```
All the curves have the same shape. Filters obtain the same performances.
We finally compare the filters in term of consistency (Normalized Estimation
Error Squared, NEES), as in the localization benchmark.
```
model.nees_print(left_ukf_nees, right_ukf_nees, ekf_nees)
```
All the filters obtain the same NEES and are consistent.
**Which filter is the best ?** For the considered problem, **left UKF**,
**right UKF**, and **EKF** obtain the same performances. This is expected as
when the state consists of an orientation only, left and right UKFs are
implicitely the same. The EKF obtains similar results as it is also based on a
retraction build on $SO(3)$ (not with Euler angles).
Conclusion
==============================================================================
This script compares two UKFs and one EKF for the problem of attitude
estimation. All the filters obtain similar performances as the state involves
only the orientation of the platform.
You can now:
- compare the filters in different noise setting to see if the filters still
get the same performances.
- address the problem of 3D inertial navigation, where the state is defined as
the oriention of the vehicle along with its velocity and its position.
| github_jupyter |
# Notebook for class 4
This is the notebook I was typing into in class 4, with some notes.
First we explored different data types.
```
my_float = 1.0
my_float
```
We can see what type of thing a variable refers to with the `type` function. We will see more of functions later.
```
type(my_float)
```
Notice the lack of a decimal point in the following statement.
```
my_other_thing = 1
```
This gives us an `int`:
```
my_other_thing
type(my_other_thing)
```
We are about to make a string. Here I am trying to make the string "I love Python", but I get an error - why?
```
my_string = I love python
```
It looked there was a problem with the spaces, so I replace the spaces with underscores:
```
my_string = I_love_python
```
Now it becomes clear that Python thought that I was trying to refer to a variable name that does not exist.
Here's a string - it needs quotes:
```
my_string = "I love python"
my_string
type(my_string)
```
Notice that Python displays strings with quotes, by default (at least in the Notebook):
```
'I love python'
```
If you use the `print` function, it does not add the quotes.
```
print(my_string)
```
Here I go through the class exercise:
```
a = 1.0
b = 1
c = 'Hello'
```
Here I add a float to itself:
```
a + a
```
The addition produces a `float`:
```
type(a + a)
```
Adding two `int`s gives an `int`:
```
b + b
```
Adding two strings concatenates the strings:
```
c + c
```
Adding a `float` to an `int` gives a `float`:
```
a + b
```
Python won't let you add `float`s (or `int`s) to a string:
```
a + c
```
Remember *assignment* - a name, followed by `=`, followed by an expression:
```
a = 1
```
It sets the value of `a` (in this case):
```
a
```
Here is a *comparison* expression. `<` is a *comparison operator*, and returns the result of the comparison _less than_.
```
a < 10
```
Here we look at the value returned from this expression:
```
my_logical = a < 10
my_logical
```
It's a `bool` type (Boolean).
```
type(my_logical)
```
Booleans can be either `True` or `False`:
```
a > 10
```
Remember assignment (again). It has the single `=` between the name (on the left) and the expression (on the right).
```
a = 1
```
Here is something very different - an equality comparison operator. This is two equals signs: `==`. It's an expression. It tests whether the thing on the left is equal to the thing on the right, returning `True` or `False`.
```
a == 1
```
Now we consider *call expressions*.
```
a = 2 / 3
a
```
Here we *call* the function `round`, with the *argument* `a` - the number we want it to round.
```
round(a)
```
We can pass `round` two arguments. The second argument is the number of decimal points we want it to round to.
```
round(a, 2)
```
We can also give this argument a *name*. In that case, it is called a *keyword argument*.
```
round(a, ndigits=2)
```
To find what the names of the arguments are, look at the help:
```
round?
```
We have to get the name of the argument right, otherwise Python
complains.
```
round(a, number_of_digits=2)
```
Check the names, from the help:
```
round?
```
Now we get on to arrays.
```
import numpy as np
```
Here I am calling a function with no arguments. It's a call expression, but in this case, the function needs no arguments. It returns a single random number:
```
np.random.uniform()
```
Once we have checked the help, we find there is a `size` argument, that we can specify. This allows us to ask for 100 random numbers.
```
np.random.uniform(size=100)
```
Here I put these into a variable:
```
randoms = np.random.uniform(size=100)
```
It's a new type - `array`:
```
type(randoms)
```
One of the things that `array`s have, is a *length*. We can get that with the `len` function:
```
len(randoms)
randoms
```
Here we do a comparison on a single random number:
```
a_random = np.random.uniform()
a_random
```
For our simulation, we want to check whether the random number is less than 0.26. If so, we label this as a black juror.
```
a_random < 0.26
```
We want to do this 100 times. We can do this with arrays, in one shot:
```
black_yn = randoms < 0.26
black_yn
```
Now we can use the `np.count_nonzero` function to count how many `True` values there are. This is our first simulation of a jury pool, of 100 jurors.
```
np.count_nonzero(black_yn)
```
| github_jupyter |
# Chapter 7 Reading files in Python
```
file_handle = open('Ahmed_file.txt', 'r')
file_handle
file_handle = open('myfile.txt', 'r')
text = 'hi\nthere'
text
print(text)
len('x\ny')
len('x\ty')
print('x\ty')
```
# two ways to read files
1. read line line by line
2. read the whole file
# read file line by line
```
file_handle = open('Ahmed_file.txt', 'r')
for line in file_handle:
print(line)
print('hello')
print('hello\n')
file_handle = open('Ahmed_file.txt', 'r')
for line in file_handle:
print(line.strip())
```
# اكتب برنامج لعد عدد الاسطر في ملف معين
```
fhandle = open('Ahmed_file.txt') # default mode is read
# fhandle = open('Ahmed_file.txt', 'r')
count = 0
for line in fhandle:
count += 1
count
```
# open file in different path
## relative path مسار نسبي
```
fhandle = open('files/file101.txt')
for line in fhandle:
print(line.strip())
```
## full path مسار كامل
```
fhandle = open('/home/motaz/Desktop/testfile.txt')
for line in fhandle:
print(line.strip())
```
# file path in Windows
```
print('dir1\file1.txt')
print('dir1\\file1.txt')
print('c:\users\msaad\Dektop\file.txt')
print('c:\\users\\msaad\\Dektop\\file.txt')
print('c:/users/msaad/Dektop/file.txt')
```
# Encoding
```
fhandle = open('files/file101.txt')
print(fhandle)
for line in fhandle:
print(line.strip())
fhandle = open('files/file101.txt', encoding='cp1256')
print(fhandle)
for line in fhandle:
print(line.strip())
fhandle = open('files/file101.txt', encoding='utf-8')
print(fhandle)
for line in fhandle:
print(line.strip())
```
# اكتب برنامج لعد عدد الاسطر في ملف معين
```
fh = open('files/mbox-short.txt')
count = 0
for line in fh:
count += 1
count
fh = open('files/mbox.txt')
count = 0
for line in fh:
count += 1
count
```
# Method 2: readling the whole file
```
file_handle = open('../files/mbox-short.txt')
text = file_handle.read()
text[:100]
print(text[:100])
len(text[:100])
text[3:7]
# number of chars: 7-3 = 4 letters
# index: 3, 4, 5, 6
# letters: 4, 5, 6, 7
```
# كم عدد الاحرف في الملف
```
len(text)
```
# كم عدد الكلمات في الملف
```
len(text.split())
```
# عدد الاسطر في الملف
```
text.count('\n')
len(text.split('\n'))
lines = text.split('\n')
lines[:5]
file_handle = open('../files/mbox.txt')
text = file_handle.read()
# number of chars
len(text)
# number of words
len(text.split())
# number of lines
len(text.split('\n'))
```
# اكتب برنامج لعد عدد الرسائل البريد الالكتروني من الملف
```
# method 1: read file line by line
file_handle = open('../files/mbox.txt')
count = 0
for line in file_handle:
if line.startswith('From:'):
count += 1
count
# method 2: read the whole file
file_handle = open('../files/mbox.txt')
text = file_handle.read()
lines = text.split('\n')
count = 0
for line in lines:
if line.startswith('From:'):
count += 1
count
```
# compare reading methods: line by line vs whole file
* reading files line by line, you need disk read operation for every line, so it is slow, but it is useful for large files
* reading the whole file is faster than "line by line", but you can not use it for large files.
# اكتب برنامج للبحث عن عناوين البريد الالكتروني وطباعتها من الملف
```
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:'):
print(line.strip())
count += 1
count
```
# اطبع فقط البريد الالكتروني في المسألة السابقة
```
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:'):
print(line.strip().replace('From: ', ''))
count += 1
count
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:'):
words = line.split()
print(words[1])
count += 1
count
```
# من السؤال السابق ، اطبع عناوين البريد الالكتروني من هذا النطاق فقط
domain: @uct.ac.za
```
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:') and line.endswith('@uct.ac.za'):
words = line.split()
print(words[1])
count += 1
count
'@uct.ac.za' == '@uct.ac.za\n'
'@uct.ac.za\n'.strip()
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:') and line.strip().endswith('@uct.ac.za'):
words = line.split()
print(words[1])
count += 1
count
# using continue
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:'):
if not '@uct.ac.za' in line:
continue
words = line.split()
print(words[1])
count += 1
count
```
# اطبع كل الرسائل من بريطانيا
```
file_handle = open('../files/mbox-short.txt')
count = 0
for line in file_handle:
if line.startswith('From:') and line.strip().endswith('.uk'):
words = line.split()
print(words[1])
count += 1
count
```
# اكتب كود يطلب من المستخدم ادخال اسم ملف وطباعة محتواه
```
file_name = input('enter file name:')
file_handle = open(file_name)
text = file_handle.read()
print(text)
file_name = input('enter file name:')
file_handle = open(file_name)
text = file_handle.read()
print(text)
file_name = input('enter file name:')
try:
file_handle = open(file_name)
text = file_handle.read()
print(text)
except FileNotFoundError:
print('file not found')
```
# Writing to files in python
```
file_handle = open('myfile02.txt', 'w')
file_handle.write('hello, python is cool')
file_handle.close()
file_handle = open('myfile02.txt', 'w') # over write
file_handle.write('hello again')
file_handle.close()
```
# من السؤال السابق ، خزن عناوين البريد الالكتروني من هذا النطاق فقط على ملف
domain: @uct.ac.za
```
infile = open('../files/mbox-short.txt')
outfile = open('emails_uct.txt', 'w')
count = 0
for line in infile:
if line.startswith('From:') and line.strip().endswith('@uct.ac.za'):
words = line.split()
outfile.write(words[1] + '\n')
count += 1
infile.close()
outfile.close()
count
```
# اكتب برنامج لاستخراج كل عناوين البريد المرسلة في عام 2008
```
infile = open('../files/mbox.txt')
outfile = open('emails_2008.txt', mode='w')
lines = infile.read().split('\n')
for line in lines:
if line.startswith('From ') and line.strip().endswith('2008'):
outfile.write(line.split()[1] + '\n')
infile.close()
outfile.close()
```
# split vs strip
```
' hello my name is Ahmed\n'.strip()
' hello my name is Ahmed\n'.split()
```
# اكتب برنامج لاستخراج عناوين البريد المرسل والمستقبل والسنة
```
infile = open('../files/mbox-short.txt')
emails = infile.read().split('----------------------')
for email in emails:
sender = ''
receiver = ''
date = ''
lines = email.split('\n')
for line in lines:
if line.startswith('From: '):
sender = line.split()[1]
if line.startswith('To: '):
receiver = line.split()[1]
if line.startswith('From '):
date= line.split()[-1]
if sender and receiver and date:
print('To:{}\tFrom:{}\tDate:{}'.format(sender, receiver, date))
x = 'ww'
if x:
print('+')
```
| github_jupyter |
This file is part of MADIP: Molecular Atlas Data Integration Pipeline
This module provides helper functions for perform alignment of protein and gene ids among different data sources.
Copyright 2021 Blue Brain Project / EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
## Align IDs
```
import pandas as pd
import numpy as np
import re
import pickle as pkl
import networkx as nx
from collections import Counter
from itertools import chain
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import importlib
importlib.reload(protein_ids_alignment_helpers)
import protein_ids_alignment_helpers
from protein_ids_alignment_helpers import (
process_uniprot_mapping_data,
get_gene_unified,
get_uniprot_unified,
check_GN_consistency_within_studies,
any_in,
get_uniprot_raw_data,
get_gene_id_final
)
# load data produced by step_1_collect_protein_data.ipynb jupyter notebook
with open('../data/1_df_all_9may2021.pkl','rb') as f:
df_all = pkl.load(f)
# check for contaminant protein ids
print(len(df_all.loc[(~df_all['Uniprot'].isna())&(df_all['Uniprot'].str.contains("CON_"))]))
# CON_ is contaminant protein according to MaxQuant annotation
#drop potential experimental contaminants "CON__"
print(len(df_all))
df_all = df_all.loc[~((~df_all['Uniprot'].isna())&(df_all['Uniprot'].str.contains("CON_")))]
print(len(df_all))
# manually get Uniprot data from https://www.uniprot.org (latest done on 21july2020).
#The queries are:
#(taxonomy:"Mus musculus (Mouse) [10090]" OR taxonomy:"Rattus norvegicus (Rat) [10116]" OR taxonomy:"Homo sapiens (Human) [9606]") AND reviewed:yes
#(taxonomy:"Mus musculus (Mouse) [10090]" OR taxonomy:"Rattus norvegicus (Rat) [10116]" OR taxonomy:"Homo sapiens (Human) [9606]") AND reviewed:no
uniprot_rev, uniprot_unrev, uniprot_rev_dict, uniprot_unrev_dict = process_uniprot_mapping_data()
# It will take a while.
uniprot_rev.head()
# check for data formatting
print(df_all.loc[(~df_all['gene_names'].isna()) & (df_all['gene_names'].str.contains('; ')),'Study'].unique())
df_all.loc[(~df_all['Uniprot'].isna()) & (df_all['Uniprot'].str.contains(' '))].head()
# clean formatting: replace "; " with ";"
df_all.loc[(df_all['Study']=='Beltran 2016') & (df_all['gene_names'].str.contains('; ')),'gene_names'] = df_all.loc[(df_all['Study']=='Beltran 2016') & (df_all['gene_names'].str.contains('; ')),'gene_names'].str.replace('; ',';')
# check for data formatting
df_all.loc[(~df_all['gene_names'].isna()) & (df_all['gene_names'].str.contains('; ')),'Study'].unique()
# check for GN consistency within studies
multiids = check_GN_consistency_within_studies(df_all)
for i,v in multiids.items():
print(i,'\t',len(v))
multiids['Hamezah 2019']
multiids['Hamezah 2018']
multiids['Hosp 2017, soluble']
multiids['Hosp 2017, CSF']
multiids['Sharma 2015, cultured']
multiids['Sharma 2015, isolated']
multiids['Wisniewski 2015']
multiids['Geiger 2013']
multiids['Davis 2019']
multiids['Guergues 2019']
multiids['Kjell 2020']
df_all.loc[(df_all['gene_names']=='UCHL3;UCHL4') & (df_all['Study']=='Kjell 2020')].head()
df_all.loc[(df_all['gene_names']=='UCHL3') & (df_all['Study']=='Kjell 2020')].head()
df_all.loc[(df_all['gene_names']=='UCHL4') & (df_all['Study']=='Kjell 2020')].head()
df_all.loc[(df_all['gene_names']=='NFIA;NFIX') & (df_all['Study']=='Guergues 2019')].head()
df_all.loc[(df_all['gene_names']=='NFIA') & (df_all['Study']=='Guergues 2019')].head()
df_all.loc[(df_all['gene_names']=='NFIX') & (df_all['Study']=='Guergues 2019')].head()
# remove partial duplicates of type "GN1;GN2" .. "GN1" within studies, because we need unique, unambigous ids
df_all = df_all.reset_index(drop=True)
print(len(df_all))
for i,study in enumerate(df_all['Study'].unique()):
indexNames = df_all.loc[(df_all['Study'] == study) & (df_all['gene_names'].isin(multiids[study]))].index
df_all.drop(indexNames , inplace=True)
print(len(df_all))
df_all = df_all.reset_index(drop=True)
# check for GN consistency within studies
multiids2 = check_GN_consistency_within_studies(df_all)
for i,v in multiids2.items():
print(i,'\t',len(v))
#for check
df_all.loc[(~df_all['gene_names'].isna()) & (df_all['gene_names'].str.contains(','))].head()
#for check
df_all.loc[(~df_all['gene_names'].isna()) & (df_all['gene_names'].str.contains('; '))].head()
#for check
df_all.loc[(~df_all['Uniprot'].isna()) & (df_all['Uniprot'].str.contains(','))].head()
#for check
df_all.loc[(~df_all['Uniprot'].isna()) & (df_all['Uniprot'].str.contains(' '))].head()
# del NA from df_all['Uniprot']
df_all_4ids = df_all[['gene_names','Uniprot','Study']].copy()
df_all_4ids = df_all_4ids.drop_duplicates(keep='first')
print(len(df_all_4ids))
df_all_4ids[~df_all_4ids['Uniprot'].isna()]['Uniprot'].str.split(";") [0:3]
a_uniprot = df_all_4ids[~df_all_4ids['Uniprot'].isna()]['Uniprot'].str.split(";")
print(len(a_uniprot))
# to remove isoform index, i.e. transform from PPPPPP-1 to PPPPPP
# and remove CON_ from weird uniprot ids
for idx,elem in enumerate(a_uniprot):
if isinstance(elem,list):
for i, s in enumerate(elem):
if "CON__" in s:
s = s.replace("CON__","")
if len(s)<6:
print(s)
elem[i] = s.split('-')[0] # it will change a_uniprot
# 1536,1550d1535
# remove incorrect Uniprot ids: "1536,1550d1535"
for L in a_uniprot:
try:
L.remove("1536,1550d1535")
except ValueError:
pass
a_gn = df_all_4ids[~df_all_4ids['gene_names'].isna()]['gene_names'].str.split(";")
print(len(a_uniprot))
print(len(a_gn))
print(len(df_all_4ids[~df_all_4ids['Uniprot'].isna()]['Uniprot'].str.split(";") ))
# to check
for idx,elem in enumerate(a_uniprot):
if isinstance(elem,list):
for i, s in enumerate(elem):
if "-" in s:
print(s)
print(len(df_all[~df_all['gene_names'].isna()]['gene_names'].unique()))
print(len(df_all[~df_all['gene_names'].isna()]['gene_names'].str.replace(",", ";").unique()))
# this will take a while
cG = nx.Graph()
cc = Counter()
for idx,pp in enumerate(a_gn):
cc = cc + Counter(pp)
cG.add_nodes_from(pp)
for i in range(len(pp)-1):
cG.add_edge(pp[i], pp[i+1])
prot_align_id_gn= {}
while len(cc):
mc = cc.most_common(1)[0][0]
for n in nx.node_connected_component(cG, mc):
prot_align_id_gn[n] = mc
del cc[n]
df_g_s = df_all.loc[~df_all['gene_names'].isna(),['gene_names','Study']].drop_duplicates(keep='first')
print(len(df_g_s))
df_g_s = df_g_s.reset_index(drop=True)
df_g_s.head()
# calc number of occurences for every gn
gn_study_dict = dict()
for idx, row in df_g_s.iterrows():
gn_list = row["gene_names"].replace(" ","").split(";")
#gn_list = [x.split("-")[0] for x in gn_list0 if x is not None] # check if its needed now!!!!
for elem in gn_list:
check = gn_study_dict.get(elem,"NewIDfoundInGNStudyList")
if check == "NewIDfoundInGNStudyList":
gn_study_dict[elem] = 1
else:
gn_study_dict[elem] = 1+check
print(len(gn_study_dict)) # 16743
df_all['gene_name_unified'].unique()
df_all = df_all.drop(columns='gene_name_unified')
# to get gene_names to gene_name_unified mapping using gn_study_dict (occurences dict)
df_all_gn = pd.DataFrame(df_all.loc[~(df_all['gene_names'].isna()),'gene_names'].copy())
df_all_gn = df_all_gn.drop_duplicates(keep='first')
df_all_gn = df_all_gn.reset_index(drop=True)
df_all_gn = df_all_gn.reset_index(drop=True)
df_all_gn['gene_name_unified'] = None
for index,row in df_all_gn.iterrows():
df_all_gn.loc[index,'gene_name_unified'] = get_gene_unified(index,row,gn_study_dict)
print(len(df_all))
df = pd.merge(df_all,df_all_gn,how='inner',on=['gene_names'])
print(len(df))
(len(df_all.loc[df_all['gene_names'].isna()]) + len(df))/len(df_all)
df_all['gene_name_unified'] = None
# make same columns order
df.columns
df_all.columns
# make same columns order
df = df[df_all.columns]
df.columns
df2 = pd.concat([df, df_all.loc[df_all['gene_names'].isna()]],sort=False )
df2 = df2.reset_index(drop=True)
#df_all['gene_name_unified'] = df_all['gene_names'].str.split(";", 1).str[0].replace(prot_align_id_gn)
# replace(" ","")
df_all = df2.copy()
df_all.columns
len(df_all['gene_name_unified'].unique()) #14537
print(len(df_all.loc[df_all['gene_names'].isna(),'Study'].unique()))
print(len(df_all.loc[~df_all['gene_names'].isna(),'Study'].unique()))
len(df_all['Study'].unique())
cGuniprot = nx.Graph()
ccuniprot = Counter()
for idxu,ppu in enumerate(a_uniprot):
ccuniprot = ccuniprot + Counter(ppu)
cGuniprot.add_nodes_from(ppu)
for iu in range(len(ppu)-1):
cGuniprot.add_edge(ppu[iu], ppu[iu+1])
print(len(a_uniprot))
print(len(cGuniprot.nodes()))
print(len(ccuniprot))
prot_align_id_uniprot= {}
while len(ccuniprot):
mcuniprot = ccuniprot.most_common(1)[0][0]
for nuniprot in nx.node_connected_component(cGuniprot, mcuniprot):
prot_align_id_uniprot[nuniprot] = mcuniprot
del ccuniprot[nuniprot]
print(len(prot_align_id_uniprot))
df_u_s = df_all.loc[~df_all['Uniprot'].isna(),['Uniprot','Study']].drop_duplicates(keep='first')
print(len(df_u_s)) #120899
df_u_s.head()
print(len(df_u_s.loc[df_u_s['Uniprot'].str.contains("CON_")]))
# CON_ is contaminant protein according to MaxQuant annotation
#
df_u_s.loc[df_u_s['Uniprot'].str.contains("1536,1550d1535")].head()
# calc number of occurences for every uniprot id and check for weird ids
uniprot_study_dict = dict()
for idx, row in df_u_s.iterrows():
uniprots_list0 = row["Uniprot"].replace(" ","").split(";")
uniprots_list = [x.split("-")[0] for x in uniprots_list0 if x is not None] # check if its needed now!!!!
for elem in uniprots_list:
check = uniprot_study_dict.get(elem,"NewIDfoundInUniprotStudyList")
if check == "NewIDfoundInUniprotStudyList":
uniprot_study_dict[elem] = 1
else:
uniprot_study_dict[elem] = 1+check
#df_all['Uniprot_unified'] = df_all['Uniprot'].str.split(";", 1).str[0].replace(prot_align_id_uniprot)
#df_all['Uniprot_unified'] = #df_all['Uniprot'].str.split(";", 1).str[0] # get most common
# to get gene_names to gene_name_unified mapping using gn_study_dict (occurences dict)
df_all_uni = pd.DataFrame(df_all.loc[~(df_all['Uniprot'].isna()),'Uniprot'].copy())
df_all_uni = df_all_uni.drop_duplicates(keep='first')
df_all_uni = df_all_uni.reset_index(drop=True)
df_all_uni['Uniprot_unified'] = None
for index,row in df_all_uni.iterrows():
df_all_uni.loc[index,'Uniprot_unified'] = get_uniprot_unified(index,row,uniprot_study_dict)
df_all_uni.columns
df_all.columns
df_all['Uniprot_unified'].unique()
df_all = df_all.drop(columns = 'Uniprot_unified')
print(len(df_all))
df = pd.merge(df_all,df_all_uni,how='inner',on=['Uniprot'])
print(len(df))
df.columns
df_all['Uniprot_unified'] = None
# make same columns order
df = df[df_all.columns]
df.columns
df2 = pd.concat([df, df_all.loc[df_all['Uniprot'].isna()]],sort=False )
df2 = df2.reset_index(drop=True)
df_all = df2.copy()
#df_all['Uniprot_unified'] = df_all['Uniprot_unified'].str.split("-").str[0]
df_all['Uniprot_unified'] = df_all['Uniprot_unified'].replace(prot_align_id_uniprot)
len(df_all.loc[df_all['Uniprot_unified'].isna()])
df_all.loc[(~df_all['Uniprot'].isna())&(df_all['Uniprot'].str.contains("-"))&(df_all['Study']!='Hosp 2017, soluble')].head()
print(len(df_all['Uniprot_unified'].unique())) # 18718
print(len(df_all.loc[df_all['Uniprot'].isna(),'Study'].unique()))
print(len(df_all.loc[~df_all['Uniprot'].isna(),'Study'].unique()))
print(len(df_all.loc[df_all['Uniprot_unified'].isna(),'Study'].unique()))
print(len(df_all.loc[~df_all['Uniprot_unified'].isna(),'Study'].unique()))
print(len(df_all.loc[(df_all['Uniprot_unified'].isna()) & (df_all['Uniprot'].isna()),'Study'].unique()))
print(len(df_all.loc[(~df_all['Uniprot_unified'].isna()) & (df_all['Uniprot'].isna()),'Study'].unique()))
print(len(df_all.loc[df_all['Uniprot'].isna() & df_all['gene_names'].isna(),'Study'].unique()))
print(len(df_all.loc[df_all['Uniprot_unified'].isna() & df_all['gene_name_unified'].isna(),'Study'].unique()))
```
### Get Uniprot ids by gene_name_unified for missing Uniprot ids; get Gene Names by Uniprot_unified for missing Gene Names; use Uniprot-GN mapping to check GN-unified
```
#Data downloaded on 05june2020 is from
#ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/
# is in /Users/polina/git/bbpMolAtlas/2020/data/ids_mapping_05june2020
mouse_uniprot_ids = pd.read_csv('../data/MOUSE_10090_idmapping.dat.gz',header=None,sep='\t')
rat_uniprot_ids = pd.read_csv('../data/RAT_10116_idmapping.dat.gz',header=None,sep='\t')
human_uniprot_ids = pd.read_csv('../data/HUMAN_9606_idmapping.dat.gz',header=None,sep='\t')
mouse_uniprot_ids.columns = ['Uniprot','ID_type','ID']
rat_uniprot_ids.columns = ['Uniprot','ID_type','ID']
human_uniprot_ids.columns = ['Uniprot','ID_type','ID']
mouse_uniprot_ids['id_of_organism'] = 'mouse'
rat_uniprot_ids['id_of_organism'] = 'rat'
human_uniprot_ids['id_of_organism'] = 'human'
#combine data for multiple organisms
uniprot_ids_mrh = pd.concat([mouse_uniprot_ids,rat_uniprot_ids,human_uniprot_ids],ignore_index=True,sort=True)
print((len(mouse_uniprot_ids['Uniprot'].unique())+len(rat_uniprot_ids['Uniprot'].unique())+len(human_uniprot_ids['Uniprot'].unique()))/len(uniprot_ids_mrh['Uniprot'].unique()),len(uniprot_ids_mrh))
#keep only needed id-types
print(len(uniprot_ids_mrh))
uniprot_ids_mrh = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type'].isin(['UniProtKB-ID', 'Gene_Name','GeneID','Gene_Synonym','GeneCards','HGNC'])].copy()
print(len(uniprot_ids_mrh))
uniprot_ids_mrh = uniprot_ids_mrh.reset_index(drop=True)
uniprot_ids_mrh['ID'] = uniprot_ids_mrh['ID'].str.upper()
len(uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name','Uniprot'].unique())
uniprot_ids_mrh.head()
len(df_all.loc[df_all['Uniprot_unified'].isna() & (~df_all['gene_name_unified'].isna()),'gene_name_unified'].unique())
df_all.loc[df_all['gene_name_unified']=='KRT8','Uniprot_unified'].unique()
len(df_all.loc[(~df_all['Uniprot_unified'].isna()) & (df_all['Uniprot_unified'].str.contains('CON_'))])
len(df_all.loc[(df_all['Study']=='Davis 2019') & (df_all['gene_name_unified'].isna()) ])
len(df_all.loc[(df_all['Study']=='Davis 2019') & (df_all['gene_name_unified'].isna()) ])/len(df_all.loc[(df_all['Study']=='Davis 2019')])
df_all = df_all.reset_index(drop=True)
len(df_all.loc[df_all['gene_name_unified']!=df_all['gene_names']])
df_all_withGN = df_all.loc[~df_all['gene_names'].isna()].copy()
len(df_all_withGN.loc[~df_all_withGN['gene_names'].str.contains(';'),'gene_names'].unique())
len(df_all_withGN.loc[~df_all_withGN['gene_names'].str.contains(';'),'gene_name_unified'].unique())
len(df_all_withGN.loc[df_all_withGN['gene_names'].str.contains(';'),'gene_names'].unique())
len(df_all_withGN.loc[df_all_withGN['gene_names'].str.contains(';'),'gene_name_unified'].unique())
len(df_all_withGN.loc[(df_all_withGN['gene_name_unified']!=df_all_withGN['gene_names']) & (~df_all_withGN['gene_names'].str.contains(';'))])
len(df_all_withGN.loc[(df_all_withGN['gene_name_unified']==df_all_withGN['gene_names']) & (~df_all_withGN['gene_names'].str.contains(';'))])
df_all_withGN.loc[df_all_withGN['gene_names'].str.contains('SRSF5;')].head(100)
df_all_withGN.loc[df_all_withGN['gene_names'].str.contains(';HRS')].head(100)
len(uniprot_ids_mrh)
uniprot_ids_mrh['ID_type'].unique()
len(uniprot_ids_mrh[uniprot_ids_mrh['ID_type']=='Gene_Name'])
len(uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']].drop_duplicates(keep='first'))
uniprot_ids_mrh_gnDupl = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']]
print(len(uniprot_ids_mrh_gnDupl))
uniprot_ids_mrh_gnDupl = uniprot_ids_mrh_gnDupl.drop_duplicates(keep=False)
print(len(uniprot_ids_mrh_gnDupl))
uniprot_ids_mrh_gnDupl.head(10)
uniprot_ids_mrh_gnDupl_d = uniprot_ids_mrh_gnDupl.groupby('Uniprot').count()
print(len(uniprot_ids_mrh_gnDupl_d[uniprot_ids_mrh_gnDupl_d['ID']>1]))
uniprot_ids_mrh[(uniprot_ids_mrh['Uniprot'].isin(uniprot_ids_mrh_gnDupl_d[uniprot_ids_mrh_gnDupl_d['ID']>1].index.to_list())) & (uniprot_ids_mrh['ID_type']=='Gene_Name')].head(10)
uniprot_gn = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']].groupby('Uniprot').aggregate(lambda tdf: tdf.unique().tolist())
uniprot_gn.head()
uniprot_ids_mrh_dict = pd.Series(uniprot_gn['ID'].values,index=uniprot_gn.index).to_dict()
df_fgn = df_all[['gene_names', 'gene_name_unified', 'Uniprot', 'Uniprot_unified']].copy()
df_fgn = df_fgn.drop_duplicates(keep='first')
print(len(df_all))
print(len(df_fgn))
df_fgn['gn_from_uniprot'] = df_fgn['Uniprot_unified'].copy()
df_fgn['gn_from_uniprot'] = df_fgn['gn_from_uniprot'].map(uniprot_ids_mrh_dict).fillna('NoMapping') #df_all.replace({"gn_from_uniprot": uniprot_ids_mrh_dict})
df_fgn.head()
gn_uniprot = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']].groupby('ID').aggregate(lambda tdf: tdf.unique().tolist())
gn_uniprot.head()
uniprot_gn_mrh_dict = pd.Series(gn_uniprot['Uniprot'].values,index=gn_uniprot.index).to_dict()
df_fgn['uniprot_from_gn'] = df_fgn['gene_name_unified'].copy()
df_fgn['uniprot_from_gn'] = df_fgn['uniprot_from_gn'].map(uniprot_gn_mrh_dict).fillna('NoMapping') #df_all.replace({"gn_from_uniprot": uniprot_ids_mrh_dict})
df_fgn.head()
gnu_count = df_fgn.loc[(~df_fgn['gene_name_unified'].isna()) & (~df_fgn['Uniprot_unified'].isna()),['gene_name_unified','Uniprot_unified']].drop_duplicates(keep='first').groupby('gene_name_unified').count()
gnu_count_dict = pd.Series(gnu_count['Uniprot_unified'].values,index=gnu_count.index).to_dict()
gnu_count.sort_values('Uniprot_unified',ascending=False).head(10)
uniprot_u_count = df_fgn.loc[(~df_fgn['gene_name_unified'].isna()) & (~df_fgn['Uniprot_unified'].isna()),['gene_name_unified','Uniprot_unified']].drop_duplicates(keep='first').groupby('Uniprot_unified').count()
uniprot_u_count.sort_values('gene_name_unified',ascending=False).head(10)
uniprot_u_count_dict = pd.Series(uniprot_u_count['gene_name_unified'].values,index=uniprot_u_count.index).to_dict()
df_fgn.loc[df_fgn['gene_name_unified']=='HIST1H2BC','Uniprot_unified'].unique()
gn_uniprot_unif_4dict = df_fgn.loc[(~df_fgn['gene_name_unified'].isna()) & (~df_fgn['Uniprot_unified'].isna()),['gene_name_unified','Uniprot_unified']].drop_duplicates(keep='first')
len(gn_uniprot_unif_4dict)
#gn_uniprot_unif_dict = pd.Series(gn_uniprot_unif_4dict['gene_name_unified'].values,index=gn_uniprot_unif_4dict['Uniprot_unified']).to_dict()
gn_uniprot_unif_dict = gn_uniprot_unif_4dict.groupby('Uniprot_unified').aggregate(lambda tdf: tdf.unique().tolist())
gn_uniprot_unif_dict.head()
len(gn_uniprot_unif_dict)
gn_uniprot_unif_dict_l = pd.Series(gn_uniprot_unif_dict['gene_name_unified'].values,index=gn_uniprot_unif_dict.index).to_dict()
uniprot_gn_unif_dict = gn_uniprot_unif_4dict.groupby('gene_name_unified').aggregate(lambda tdf: tdf.unique().tolist())
uniprot_gn_unif_dict_l = pd.Series(uniprot_gn_unif_dict['Uniprot_unified'].values,index=uniprot_gn_unif_dict.index).to_dict()
uniprot_gn_unif_dict.head()
len(df_fgn)
df_fgn = df_fgn.reset_index(drop=True)
df_fgn[(df_fgn['gene_names'].isna()) & (~df_fgn['gene_name_unified'].isna())].head()
df_fgn.head()
type([df_fgn.iloc[0,2]])
type(df_fgn.iloc[0,4])
```
# Given
```
# main data
print(len(df_all))
df_all.head()
# extracted from df_all, only columns related to IDs, dropped duplicates
print(len(df_fgn))
df_fgn.head()
# gene_names - original gene names from the raw data
# gene_name_unified - obtained by mapping from graph connected components (gene name with the highest number of occurences across all data sets compared to other genes of the the same connected component)
# Uniprot - original Uniprot IDs from the raw data
# Uniprot_unified - obtained by mapping from graph connected components
# gn_from_uniprot - bad, because some studies report unrelated genes as one entry
# uniprot_from_gn - bad, because some studies report unrelated genes as one entry
# gene_id_final - needed
# Uniprot-to-other IDs mapping.
#Data downloaded on 05june2020 is from
#ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/
# is in /Users/polina/git/bbpMolAtlas/2020/data/ids_mapping_05june2020
uniprot_ids_mrh.head()
# from uniprot_ids_mrh to make dict of lists:
#uniprot_gn = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']].groupby('Uniprot').aggregate(lambda tdf: tdf.unique().tolist())
uniprot_gn.head()
# uniprot_ids_mrh_dict = pd.Series(uniprot_gn['ID'].values,index=uniprot_gn.index).to_dict()
# from uniprot_ids_mrh make dict of lists:
#gn_uniprot = uniprot_ids_mrh.loc[uniprot_ids_mrh['ID_type']=='Gene_Name',['Uniprot','ID']].groupby('ID').aggregate(lambda tdf: tdf.unique().tolist())
gn_uniprot.head()
# uniprot_gn_mrh_dict = pd.Series(gn_uniprot['Uniprot'].values,index=gn_uniprot.index).to_dict()
# UniprotMainData
# Uniprot main data (reviewed), 21july2020
#(taxonomy:"Mus musculus (Mouse) [10090]" OR taxonomy:"Rattus norvegicus (Rat) [10116]" OR taxonomy:"Homo sapiens (Human) [9606]") AND reviewed:yes
uniprot_rev.head() # reviewed, best
# Entry - Uniprot
# Entry name - "AlmostGeneID_Organism" (almost, because in some cases it's modified gene name)
# Gene names - list of TRUE synonymous gene names
# gene_id_entry_name - AlmostGeneID obtained by split("-") from Entry name
# UniprotMainData_2
# Uniprot main data 2 (unreviewed), 21july2020
#(taxonomy:"Mus musculus (Mouse) [10090]" OR taxonomy:"Rattus norvegicus (Rat) [10116]" OR taxonomy:"Homo sapiens (Human) [9606]") AND reviewed:no
uniprot_unrev.head() # unreviewed
# Entry - Uniprot
# Entry name - "AlmostGeneID_Organism" (almost, because in some cases it's modified gene name)
# Gene names - list of TRUE synonymous gene names
# gene_id_entry_name - AlmostGeneID obtained by split("-") from Entry name
# from Uniprot main data
#uniprot_rev_dict = pd.Series(uniprot_rev['Gene names'].values,index=uniprot_rev['Entry']).to_dict()
#uniprot_unrev_dict = pd.Series(uniprot_unrev['Gene names'].values,index=uniprot_unrev['Entry']).to_dict()
#uniprot_rev_genes = list(set([item for sublist in uniprot_rev['Gene names'].tolist() for item in sublist]))
#uniprot_unrev_genes = list(set([item for sublist in uniprot_unrev['Gene names'].tolist() for item in sublist]))
uniprot_rev.head()
#uniprot_rev.loc[uniprot_rev['Organism'].str.contains('Mus')].head()
df_fgn.head()
# from df_fgn
# gene_names.split(";") count
#df_fgn['gene_names'] = df_fgn['gene_names'].replace(None,np.nan)
df_fgn_gnstudy = df_all.loc[~(df_all['gene_names'].isna()),['gene_names','Study']].copy()
df_fgn_gnstudy = df_fgn_gnstudy.drop_duplicates(keep='first')
df_fgn_gnstudy = df_fgn_gnstudy.reset_index(drop=True)
s = df_fgn_gnstudy['gene_names'].str.split(';').apply(pd.Series, 1).stack() #.str.replace(" ", "")
s.index = s.index.droplevel(-1) # to line up with df's index
s.name = 'gene_names' # needs a name to join
del df_fgn_gnstudy['gene_names']
df_fgn_gnstudy = df_fgn_gnstudy.join(s)
gn_study_count = df_fgn_gnstudy.groupby('gene_names').count()
gn_study_count.head()
print(len(gn_study_count))
gn_study_count = gn_study_count.loc[gn_study_count.index!='',:]
print(len(gn_study_count))
gn_study_count.index
gn_study_count_dict = pd.Series(gn_study_count['Study'].values,index=gn_study_count.index).to_dict()
len(gn_study_count_dict) #16742
```
# Needed
```
#df_fgn['gene_id_final']
# and subsequently mapped:
#df_all['gene_id_final']
```
In **df_fgn**:
1) **gene_name_unified** == **gene_names** --> **gene_id_final** = **gene_names** == **gene_name_unified**
2) multiple gene names per entry (i.e. **";"** in **gene_names**) and **gene_name_unified** in **gene_names.split(";")** --> **gene_id_final** = **gene_name_unified**
3) **gene_name_unified** not in **gene_names**.split(";")
- I look at **Uniprot** of this entry.
**Uniprot**.split(";")
- IF **gene_name_unified** or any of **gene_names**.split(";") for given entry are in the values of Uniprot-GN dictionaries (**uniprot_rev_dict, uniprot_unrev_dict, ?uniprot_gn**) for given list of **Uniprot**.split(";") IDs of this entry,
I either set **gene_id_final** to the found gene name if there is only one match,
OR inspect this entry visually.
IF one **Uniprot**.split(";") ID found in Reviewed dictionary and another one is found in Unrevied dictionary, I set **gene_id_final** according to reviewed dictionary.
4) For entries in the raw data (**df_fgn**) which have only **Uniprot** IDs and don't have **gene_names** neither **gene_name_unified**:
I get corresponding gene names using Uniprot-GN dictionaries (**uniprot_rev_dict, uniprot_unrev_dict, uniprot_gn**),
and IF there are multiple gene names, I check which one/ones are in list of all GN from **df_fgn[gene_names].split(";")**
and IF it's only one, then I set final gene name as this gene name,
and IF there are many, I inspect visually this entry.
Potentially in some cases **df_fgn[gn_from_uniprot]** and **df_fgn[uniprot_from_gn]** can help in the future
```
#df_fgn = df_fgn.drop(columns='gene_id_final')
df_fgn = df_fgn.reset_index(drop=True)
df_fgn['gene_id_final'] = None
for index,row in df_fgn.iterrows():
#print(index)
df_fgn.loc[index,'gene_id_final'] = get_gene_id_final(index,row,uniprot_rev_dict,uniprot_unrev_dict,uniprot_ids_mrh_dict,gn_study_count_dict)
len(df_fgn)
len(df_fgn['gene_id_final'].unique()) # 14841
len(df_fgn.loc[df_fgn['gene_id_final'].isna()])
df_fgn.loc[df_fgn['gene_id_final'].isna()].head()
len(df_fgn.loc[(df_fgn['gene_id_final'].isna()) & (df_fgn['gene_names'].isna())]) # -> drop these entries no GN found by Uniprot
print(len(df_fgn))
df_fgn = df_fgn.loc[~( (df_fgn['gene_id_final'].isna()) & (df_fgn['gene_names'].isna()) )]
print(len(df_fgn))
df_fgn.loc[df_fgn['gene_id_final'].isna()].head()
# depending on the goal, this can be useful in some cases
#print(len(df_fgn.loc[(df_fgn['gene_id_final'].isna()) & (~df_fgn['gene_names'].isna())]))
#df_fgn.loc[(df_fgn['gene_id_final'].isna()) & (~df_fgn['gene_names'].isna())].head() # -> set gn_final to original gene name
#df_fgn.loc[(df_fgn['gene_id_final'].isna()) & (~df_fgn['gene_names'].isna()),'gene_id_final'] = df_fgn.loc[(df_fgn['gene_id_final'].isna()) & (~df_fgn['gene_names'].isna()),'gene_names'].copy()
with open('../data/2_df_fgn_9may2021.pkl','wb') as f:
pkl.dump(df_fgn,f)
print(len(df_fgn.loc[df_fgn['gene_id_final'].str.contains('#')]))
print(len(df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')]))
print(len(df_fgn.loc[df_fgn['gene_id_final'].str.contains('&')]))
print(len(df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')]))
print(len(df_fgn))
df_fgn = df_fgn.reset_index(drop=True)
```
### MANUAL CURATION for the entries containing "; & @ ". See the get_gene_id_final from protein_ids_alignment_helpers.py for the details on how these composite ids were created
```
len(df_fgn.loc[(df_fgn['gene_id_final'].str.contains('&')) & (df_fgn['Uniprot']!=df_fgn['Uniprot_unified'])])
#df.loc[df['gene_id_final']== 'C2CD4CC2CD4 FAMILY',"gene_id_final"] = 'C2CD4C'
df_fgn.loc[df_fgn['gene_id_final']== 'C2CD4CC2CD4 FAMILY']
df_fgn.loc[(df_fgn['gene_id_final'].str.contains('&')) & (df_fgn['Uniprot']==df_fgn['Uniprot_unified'])]
df_fgn_gene_id_final_counts = df_fgn.loc[(~df_fgn['gene_id_final'].str.contains(';')) & (~df_fgn['gene_id_final'].str.contains('@'))].groupby('gene_id_final').count()
df_fgn_gene_id_final_counts['sum'] = df_fgn_gene_id_final_counts.sum(axis=1)
df_fgn_gene_id_final_counts_dict = pd.Series(df_fgn_gene_id_final_counts['sum'].values,index=df_fgn_gene_id_final_counts.index).to_dict()
df_fgn = df_fgn.reset_index(drop=True)
len(df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')])
for idx,row in df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')].iterrows():
gns_mostFreq41=np.nan
gids = row['gene_id_final'].split(';')
counts_occ=dict()
for gid in gids:
counts_occ[gid] = df_fgn_gene_id_final_counts_dict.get(gid,0.0)
# get keys with max value:
max_value = max(counts_occ.values())
if max_value >0:
gns_mostFreq41 = [k for k,v in counts_occ.items() if v == max_value]
else:
ns_mostFreq41 = np.nan
print("check 1 ",idx)
if isinstance(gns_mostFreq41, list):
if len(gns_mostFreq41)==1:
df_fgn.loc[idx,'gene_id_final'] = gns_mostFreq41[0]
#print(gns_mostFreq41[0])
elif len(gns_mostFreq41) >1:
df_fgn.loc[idx,'gene_id_final'] = gns_mostFreq41[0]
#print("check 2 ",idx)
else:
print("check 3 ",idx)
len(df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')])
df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')]
df_fgn.loc[(df_fgn['gene_id_final'].str.contains(';')) & (df_fgn['gene_id_final']== 'HIST2H4;HIST1H4C') & (df_fgn['Uniprot']=='B2RTM0'),"gene_id_final"] = 'HIST2H4'
#by Uniprot
df_fgn.loc[df_fgn['gene_id_final'].str.contains(';')]
df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')]
len(df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')])
df_fgn = df_fgn.reset_index(drop=True)
df_fgn_gene_id_final_counts = df_fgn.loc[(~df_fgn['gene_id_final'].str.contains(';')) & (~df_fgn['gene_id_final'].str.contains('@'))].groupby('gene_id_final').count()
df_fgn_gene_id_final_counts['sum'] = df_fgn_gene_id_final_counts.sum(axis=1)
df_fgn_gene_id_final_counts_dict = pd.Series(df_fgn_gene_id_final_counts['sum'].values,index=df_fgn_gene_id_final_counts.index).to_dict()
for idx,row in df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')].iterrows():
gns_mostFreq41=np.nan
gids = row['gene_id_final'].split('@')
counts_occ=dict()
for gid in gids:
counts_occ[gid] = df_fgn_gene_id_final_counts_dict.get(gid,0.0)
# get keys with max value:
max_value = max(counts_occ.values())
if max_value >0:
gns_mostFreq41 = [k for k,v in counts_occ.items() if v == max_value]
else:
df_fgn.loc[idx,'gene_id_final'] = gids[0]
ns_mostFreq41 = np.nan
print("check 1 ",idx)
if isinstance(gns_mostFreq41, list):
if len(gns_mostFreq41)==1:
df_fgn.loc[idx,'gene_id_final'] = gns_mostFreq41[0]
#print(gns_mostFreq41[0])
elif len(gns_mostFreq41) >1:
df_fgn.loc[idx,'gene_id_final'] = gns_mostFreq41[0]
#print("check 2 ",idx)
else:
print("check 3 ",idx)
len(df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')])
df_fgn.loc[df_fgn['gene_id_final'].str.contains('@')]
with open('../data/2_df_fgn_9May2021.pkl','wb') as f:
pkl.dump(df_fgn,f)
# Possible improvement:
# for rat and human entries check if their gene_id_final exists in mouse entries
# if gene_id_final doesn't exist in mouse entries, try to find corresponding mouse gene name using Entry-name, Gene name, Uniprot from uniprot_rev, uniprot_unrev
len(df_fgn['gene_id_final'].unique())
len(df_fgn.loc[~df_fgn['gene_id_final'].isna(),'gene_id_final'].unique()) #14823
df_fgn = df_fgn.reset_index(drop=True)
df_all.columns
df_fgn.columns
print(len(df_all))
print(len(df_fgn))
df = pd.merge(df_all,df_fgn,how='inner',on=['gene_names', 'gene_name_unified', 'Uniprot', 'Uniprot_unified'])
len(df)
df.loc[df['gene_id_final'].isna()].head()
### check gene_id_final
print(len(df))
print(len(df.loc[~df['gene_id_final'].isna()].copy()))
extra = pd.merge(df_all,df_fgn, how='left', indicator=True)
extra['_merge'].unique()
len(extra.loc[extra['_merge']=='left_only'])
len(extra.loc[(extra['_merge']=='left_only') & (~extra['gene_names'].isna()) ])
with open('../data/2_df_best_alignedIDs_9May2021.pkl','wb') as f:
pkl.dump(df,f)
```
| github_jupyter |
# Sequence Generation
In this exercise, you will design an RNN to generate baby names! You will design an RNN to learn to predict the next letter of a name given the preceding letters. This is a character-level RNN rather than a word-level RNN.
This idea comes from this excellent blog post: http://karpathy.github.io/2015/05/21/rnn-effectiveness/
```
%matplotlib inline
import numpy as np
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.utils import np_utils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Embedding
from tensorflow.keras.layers import LSTM, SimpleRNN, GRU
```
## Training Data
The training data we will use comes from this corpus:
http://www.cs.cmu.edu/afs/cs/project/ai-repository/ai/areas/nlp/corpora/names/
Take a look at the training data in `data/names.txt`, which includes both boy and girl names. Below we load the file and convert it to all lower-case for simplicity.
Note that we also add a special "end" character (in this case a period) to allow the model to learn to predict the end of a name.
```
with open('../data/names.txt') as f:
names = f.readlines()
names = [name.lower().strip() + '.' for name in names]
print('Loaded %d names' % len(names))
names[:10]
```
We need to count all of the characters in our "vocabulary" and build a dictionary that translates between the character and its assigned index (and vice versa).
```
chars = set()
for name in names:
chars.update(name)
vocab_size = len(chars)
print('Vocabulary size:', vocab_size)
char_inds = dict((c, i) for i, c in enumerate(chars))
inds_char = dict((i, c) for i, c in enumerate(chars))
char_inds
```
#### Exercise 1 - translate chars to indexes
Most of the work of preparing the data is taken care of, but it is important to know the steps because they will be needed anytime you want to train an RNN. Use the dictionary created above to translate each example in `names` to its number format in `int_names`.
```
# Translate names to their number format in int_names
```
The `create_matrix_from_sequences` will take the examples and create training data by cutting up names into input sequence of length `maxlen` and training labels, which are the following character. Make sure you understand this procedure because it is what will actually go into the network!
```
def create_matrix_from_sequences(int_names, maxlen, step=1):
name_parts = []
next_chars = []
for name in int_names:
for i in range(0, len(name) - maxlen, step):
name_parts.append(name[i: i + maxlen])
next_chars.append(name[i + maxlen])
return name_parts, next_chars
maxlen = 3
name_parts, next_chars = create_matrix_from_sequences(int_names, maxlen)
print('Created %d name segments' % len(name_parts))
X_train = sequence.pad_sequences(name_parts, maxlen=maxlen)
y_train = to_categorical(next_chars, vocab_size)
X_train.shape
X_train[:5]
```
#### Exercise 2 - design a model
Design your model below. Like before, you will need to set up the embedding layer, the recurrent layer, a dense connection and a softmax to predict the next character.
Fit the model by running at least 10 epochs. Later you will generate names with the model. Getting around 30% accuracy will usually result in decent generations. What is the accuracy you would expect for random guessing?
```
model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1)
```
## Sampling from the model
We can sample the model by feeding in a few letters and using the model's prediction for the next letter. Then we feed the model's prediction back in to get the next letter, etc.
The `sample` function is a helper to allow you to adjust the diversity of the samples. You can read more [here](https://en.wikipedia.org/wiki/Softmax_function#Reinforcement_learning).
Read the `gen_name` function to understand how the model is sampled.
```
def sample(p, diversity=1.0):
p1 = np.asarray(p).astype('float64')
p1 = np.log(p1) / diversity
e_p1 = np.exp(p1)
s = np.sum(e_p1)
p1 = e_p1 / s
return np.argmax(np.random.multinomial(1, p1, 1))
def gen_name(seed, length=1, diversity=1.0, maxlen=3):
"""
seed - the start of the name to sample
length - the number of letters to sample; if None then samples
are generated until the model generates a '.' character
diversity - a knob to increase or decrease the randomness of the
samples; higher = more random, lower = closer to the model's
prediction
maxlen - the size of the model's input
"""
# Prepare input array
x = np.zeros((1, maxlen), dtype=int)
# Generate samples
out = seed
while length is None or len(out) < len(seed) + length:
# Add the last chars so far for the next input
for i, c in enumerate(out[-maxlen:]):
x[0, i] = char_inds[c]
# Get softmax for next character
preds = model.predict(x, verbose=0)[0]
# Sample the network output with diversity
c = sample(preds, diversity)
# Choose to end if the model generated an end token
if c == char_inds['.']:
if length is None:
return out
else:
continue
# Build up output
out += inds_char[c]
return out
```
#### Exercise 3 - sample the model
Use the `gen_name` function above to sample some names from your model.
1. Try generating a few characters by setting the `length` argument.
2. Try different diversities. Start with 1.0 and vary it up and down.
3. Try using `length=None`, allowing the model to choose when to end a name.
4. What happens when `length=None` and the diversity is high? How do samples change in this case staring from beginning to end? Why do you think this is?
5. With `length=None` and a "good" diversity, can you tell if the model has learned a repertoire of "endings"? What are some of them?
6. Find some good names. What are you favorites? :D
#### Exercise 4 - retrain
Now that you have seen some samples, go back up and redefine your model to "erase" it. Don't train it again yet. You can sample again to compare the quality of the samples before the model is trained.
Experiment with the hidden layer size, the maxlen, the number of epochs, etc. Do you observe any differences in the sample behavior?
Not all changes will make an observable impact, but do experiments to see what you can discover.
Finally, take a look at [this Colab notebook](https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/shakespeare_with_tpu_and_keras.ipynb) for a similar example with Shakespeare text
| github_jupyter |
FGL-3SR for analysis of connectomes
==================================================================
This example uses FGL-3SR in order to learn functional connectivity between
regions of interest.
Load ADHD dataset and MSDL atlas
--------------------------------
```
from nilearn import datasets
adhd_data = datasets.fetch_adhd(n_subjects=1)
```
We use probabilistic regions of interest (ROIs) from the MSDL atlas.
```
msdl_data = datasets.fetch_atlas_msdl()
msdl_coords = msdl_data.region_coords
n_regions = len(msdl_coords)
print('MSDL has {0} ROIs, part of the following networks :\n{1}.'.format(
n_regions, msdl_data.networks))
from nilearn import input_data
masker = input_data.NiftiMapsMasker(
msdl_data.maps, resampling_target="data", t_r=2.5, detrend=True,
low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1)
```
Region signals extraction
-------------------------
We extract regions time series.
```
subjects_timeseries = []
adhd_labels = []
for func_file, confound_file, phenotypic in zip(
adhd_data.func, adhd_data.confounds, adhd_data.phenotypic):
time_series = masker.fit_transform(func_file, confounds=confound_file)
is_adhd = phenotypic['adhd']
subjects_timeseries.append(time_series)
adhd_labels.append(is_adhd)
```
FGL-3SR
-------------------------
We learn functional connectivity between regions of interest with FGL-3SR.
```
import GL_3SR
beta = 1500.
alpha = .0001
Y = subjects_timeseries[0].T
N = Y.shape[0]
gl3sr = GL_3SR.FGL_3SR(trace=N, beta=beta, alpha=alpha, maxit=100, verbose=True, cv_crit=10e-12)
gl3sr.fit(Y)
import numpy as np
X, H, lbd, err = gl3sr.get_coeffs()
Lpred = X.dot(np.diag(lbd)).dot(X.T)
Wpred = np.diag(np.diag(Lpred)) - Lpred
Wpred = (Wpred + Wpred.T)/2
Wpred *= (Wpred>np.mean(Wpred))
```
Estimate weight matrix
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(Wpred)
from nilearn import plotting
plotting.plot_connectome(Wpred, msdl_coords, colorbar=True, edge_cmap='Greys',
edge_vmin=.02, edge_vmax=.08, title='')
%matplotlib inline
import matplotlib as mlp
import matplotlib.pyplot as plt
mlp.rcParams["figure.figsize"] = (15, 5)
plt.subplot(131)
plt.plot(err[1:])
plt.grid(alpha=.3)
plt.title("Error convergence")
plt.subplot(132)
plt.title('$k = $' + str(np.sum(np.sum(abs(H), 1)>0)))
plt.imshow(abs(H[1:, :40]))
plt.subplot(133)
plt.plot(Y[0], label="true")
plt.plot(X.dot(H)[0], label='by FGL-3SR')
plt.grid(alpha=.3)
plt.legend()
```
| github_jupyter |
# Introduction
**[Connect Four](https://en.wikipedia.org/wiki/Connect_Four)** is a game where two players alternate turns dropping colored discs into a vertical grid. Each player uses a different color (usually red or yellow), and the objective of the game is to be the first player to get four discs in a row.
<center>
<img src="https://i.imgur.com/40B1MGc.png"><br/>
</center>
In this course, you will build your own intelligent agents to play the game.
- In the first lesson, you'll learn how to set up the game environment and create your first agent.
- The next two lessons focus on traditional methods for building game AI. These agents will be smart enough to defeat many novice players!
- In the final lesson, you'll experiment with cutting-edge algorithms from the field of reinforcement learning. The agents that you build will come up with gameplay strategies much like humans do: gradually, and with experience.
# Join the competition
Throughout the course, you'll test your agents' performance by competing against agents that other users have created.
To join the competition, open a new window with **[the competition page](https://www.kaggle.com/c/connectx/overview)**, and click on the **"Join Competition"** button. (_If you see a "Submit Agent" button instead of a "Join Competition" button, you have already joined the competition, and don't need to do so again._)
<center>
<img src="https://i.imgur.com/dDX1YVW.png" width=80%><br/>
</center>
This takes you to the rules acceptance page. You must accept the competition rules in order to participate. These rules govern how many submissions you can make per day, the maximum team size, and other competition-specific details. Then, click on **"I Understand and Accept"** to indicate that you will abide by the competition rules.
# Getting started
The game environment comes equipped with agents that have already been implemented for you. To see a list of these default agents, run:
```
from kaggle_environments import make, evaluate
# Create the game environment
# Set debug=True to see the errors if your agent refuses to run
env = make("connectx", debug=True)
# List of available default agents
print(list(env.agents))
```
The `"random"` agent selects (uniformly) at random from the set of **valid moves**. In Connect Four, a move is considered valid if there's still space in the column to place a disc (i.e., if the board has seven rows, the column has fewer than seven discs).
In the code cell below, this agent plays one game round against a copy of itself.
```
# Two random agents play one game round
env.run(["random", "random"])
# Show the game
env.render(mode="ipython")
```
You can use the player above to view the game in detail: every move is captured and can be replayed. _Try this now!_
As you'll soon see, this information will prove incredibly useful for brainstorming ways to improve our agents.
# Defining agents
To participate in the competition, you'll create your own agents.
Your agent should be implemented as a Python function that accepts two arguments: `obs` and `config`. It returns an integer with the selected column, where indexing starts at zero. So, the returned value is one of 0-6, inclusive.
We'll start with a few examples, to provide some context. In the code cell below:
- The first agent behaves identically to the `"random"` agent above.
- The second agent always selects the middle column, whether it's valid or not! Note that if any agent selects an invalid move, it loses the game.
- The third agent selects the leftmost valid column.
```
import random
import numpy as np
# Selects random valid column
def agent_random(obs, config):
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
return random.choice(valid_moves)
# Selects middle column
def agent_middle(obs, config):
return config.columns//2
# Selects leftmost valid column
def agent_leftmost(obs, config):
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
return valid_moves[0]
```
So, what are `obs` and `config`, exactly?
### `obs`
`obs` contains two pieces of information:
- `obs.board` - the game board (a Python list with one item for each grid location)
- `obs.mark` - the piece assigned to the agent (either `1` or `2`)
`obs.board` is a Python list that shows the locations of the discs, where the first row appears first, followed by the second row, and so on. We use `1` to track player 1's discs, and `2` to track player 2's discs. For instance, for this game board:
<center>
<img src="https://i.imgur.com/kSYx4Nx.png" width=25%><br/>
</center>
`obs.board` would be `[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 1, 2, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 2, 1, 2, 0, 2, 0]`.
### `config`
`config` contains three pieces of information:
- `config.columns` - number of columns in the game board (`7` for Connect Four)
- `config.rows` - number of rows in the game board (`6` for Connect Four)
- `config.inarow` - number of pieces a player needs to get in a row in order to win (`4` for Connect Four)
Take the time now to investigate the three agents we've defined above. Make sure that the code makes sense to you!
# Evaluating agents
To have the custom agents play one game round, we use the same `env.run()` method as before.
```
# Agents play one game round
env.run([agent_leftmost, agent_random])
# Show the game
env.render(mode="ipython")
```
The outcome of a single game is usually not enough information to figure out how well our agents are likely to perform. To get a better idea, we'll calculate the win percentages for each agent, averaged over multiple games. For fairness, each agent goes first half of the time.
To do this, we'll use the `get_win_percentages()` function (defined in a hidden code cell). _To view the details of this function, click on the "Code" button below._
```
def get_win_percentages(agent1, agent2, n_rounds=100):
# Use default Connect Four setup
config = {'rows': 6, 'columns': 7, 'inarow': 4}
# Agent 1 goes first (roughly) half the time
outcomes = evaluate("connectx", [agent1, agent2], config, [], n_rounds//2)
# Agent 2 goes first (roughly) half the time
outcomes += [[b,a] for [a,b] in evaluate("connectx", [agent2, agent1], config, [], n_rounds-n_rounds//2)]
print("Agent 1 Win Percentage:", np.round(outcomes.count([1,-1])/len(outcomes), 2))
print("Agent 2 Win Percentage:", np.round(outcomes.count([-1,1])/len(outcomes), 2))
print("Number of Invalid Plays by Agent 1:", outcomes.count([None, 0]))
print("Number of Invalid Plays by Agent 2:", outcomes.count([0, None]))
```
Which agent do you think performs better against the random agent: the agent that always plays in the middle (`agent_middle`), or the agent that chooses the leftmost valid column (`agent_leftmost`)? Let's find out!
```
get_win_percentages(agent1=agent_middle, agent2=agent_random)
get_win_percentages(agent1=agent_leftmost, agent2=agent_random)
```
It looks like the agent that chooses the leftmost valid column performs best!
# Your turn
These agents are quite simple. As the course progresses, you'll create increasingly complex agents! Continue to **[make your first competition submission](https://www.kaggle.com/kernels/fork/7677818)**.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/intro-to-game-ai-and-reinforcement-learning/discussion) to chat with other learners.*
| github_jupyter |
## Résumé - outils et résultats
Ici on cherche des récommendations sur le log des chansons écoutées, mais suite à un remaniement qui attribue une
note (entre 0 et 5) à chaque chanson pour chaque utilisateur. (Remaniement effectué par https://github.com/Patouche). Ce remaniement semble avoir rassemblé les 'doublons' (doublon: un utilisateur écoute chanson x un jour, le réécoute le lendemain) pour que chaque paire utilisateur--chanson paraisse uniquement maintenant une fois, avec son score.
OUTILS: library Surprise
RESULTATS
1. KNNwithMeans - plus des utilisateurs se rassemblent dans leur consomation, plus ils devraient s'échanger de chansons.
On arrive à prédire un score pour une chanson, dans le context d'un utilisateur, et sur (je crois) une échelle de 0 à 5, genre (pour une chanson très appréciée, et une autre peu appréciée):
user 0 & chanson 55915: 4.52 est
user 0 & chanson 78257: 0.37 est
- mais comme ces données viennent comme un élément à l'intérieur d'un object Surprise, je ne vois pas encore
comment exploiter cet outil. Il faudrait d'abord isoler le score 'est' d'une longue chaîne de résultats, et ensuite...mettre un boucle à tourner pour prédire chaque score pour un utilisateur? Très peu efficace.
2. Une fois les ensembles 'train' et 'test' établis à partir de notre log, on peut comparer des différents algos
pris de Surprise. On a simplement pris KNNwithMeans mais apparemment c'est BaselineOnly qui a le plus petit RMSE
## importer et remanier
```
import csv
import pandas as pd
from surprise import KNNBasic
from surprise import NormalPredictor
from surprise import BaselineOnly
from surprise import KNNWithMeans
from surprise import KNNWithZScore
from surprise import KNNBaseline
from surprise import Dataset
from surprise import Reader
from surprise import accuracy
from surprise.model_selection import cross_validate
from surprise.model_selection import train_test_split
# import the log with its 'scores' derived by patouche
log = pd.read_csv('.CSV FILE LOCATION')
# rename the 'count' column because 'count' is a method in python libraries
log.rename(columns = {'song_id':'song','count':'count_score'}, inplace = True)
print(log.shape)
log.head(5)
# changer le type de la colonne 'song' à 'catégorie'
log['song'] = log['song'].astype("category")
# créer une colonne 'song ID' qui sera plus facile à manipuler que les strings youtube qui font 'nom'
log['song_id'] = log['song'].cat.codes
# changer le type de song_id aussi pour que la colonne soit 'catégorie'
log.song_id = log.song_id.astype("category")
log.info()
# remanier l'ordre des colonnes pour mieux suivre la logique qui sera appliquée plus bas
log = log[['user_id', 'song_id', 'count_score']]
log.head(3)
```
## 1. Kerpanic KNNwithMeans (from surprise) - explicit User User collaborative filtering
https://kerpanic.wordpress.com/2018/03/26/a-gentle-guide-to-recommender-systems-with-surprise/
USER:USER
The output is the prediction of user u’s rating on item i:
si on donnait chanson X à utilisateur, que dirait elle?
We utilize the similarity measure between user u and user v in this case.
ITEM:ITEM - DO NOT RUN, TAKES FOREVER / HANGS. I DONT KNOW WHY
Instead of using user similarity, we use item similarity measure to calculate the prediction.
Ssimilarity is now between item i and item j, instead of user u and v as before.
```
# on crée / définit un lecteur en précisant l'échelle de notes
reader = Reader(rating_scale = (0, 5))
# on définit 'data' qui prendra comme paramètres les colonnes utilisateur, chanson et score (et le lecteur)
data = Dataset.load_from_df(log[['user_id', 'song_id', 'count_score']], reader=reader)
# on divise le data pour garder 15% pour la partie test
trainset, testset = train_test_split(data, test_size=.15)
# Use user_based true/false to switch between user-based or item-based collaborative filtering
algo = KNNWithMeans(k=50, sim_options={'name': 'pearson_baseline', 'user_based': True})
# on utilise la partie 'training' pour former l'algo sur notre log
algo.fit(trainset)
# avec l'algo 'formé' sur notre log, on peut demander des prédictions en précisant des utilisateurs et
# chansons et en utilisant algo.predict
uid = 1273 # utilisateur 102
iid = 73445 # chanson 2
# get a prediction for specific users and items.
pred = algo.predict(uid, iid, r_ui=4, verbose = True)
pred
# résultat: estimé 0.57
```
notons les scores rendu par l'algo quand on demande des chansons aimé / mal aimé par les utilisateurs
user 0 / chanson 55915 (score 5) gives 4.52 est
user 0 / chanson 78257 (score 1) gives 0.37 est
user 1270 / chanson 18125 (score 5) gives 2 est
user 1273 / chanson 73445 (score 5) gives 5 est
user 1274 / chanson 61382 (score 0) gives 0.43 est
user 1274 / chanson 60518 (score 5) gives 5 est
## 2. choisir son algo - d'abord notre KNNwithMeans
```
# on crée / définit un lecteur en précisant l'échelle de notes
reader = Reader(rating_scale = (0, 5))
# on définit 'data' qui prendra comme paramètres les colonnes utilisateur, chanson et score (et le lecteur)
data = Dataset.load_from_df(log[['user_id', 'song_id', 'count_score']], reader=reader)
# on effectue une 'cross validation' avec ce 'data' et l'algo KNN basic
cross_validate(KNNWithMeans(), data, verbose=False)
```
## ensuite les autre...
```
# KNN BASIC
cross_validate(KNNBasic(), data, verbose=False)
# KNN BASELINE
cross_validate(KNNBaseline(), data, verbose=False)
# KNN WITH Z SCORE
cross_validate(KNNWithZScore(), data, verbose=False)
# NORMAL PREDICTOR
cross_validate(NormalPredictor(), data, verbose=False)
# BASELINE ONLY
cross_validate(BaselineOnly(), data, verbose=False)
```
| github_jupyter |
```
#import libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import warnings
%matplotlib inline
#ignore all warnings
warnings.filterwarnings("ignore")
#read the data
data = pd.read_csv("Data/train.csv")
data.head()
```
## Checking for missing values
```
plt.figure(figsize=(13, 6))
sns.barplot(x=data.columns, y=data.isnull().sum())
'''
we only have missing data in three columns:
age (~200), cabin (~700), embarked(~0)
'''
data.isnull().sum()
```
## EDA
### Sex x Survival
```
sns.barplot(x="Sex", y="Survived", data=data)
sns.countplot(x="Survived", hue="Sex", data=data)
sns.countplot(x="Sex", data=data)
data['Sex'].value_counts()
data['Survived'].value_counts()
```
#### We can see that almost 600 passengers onboard were men (65%) and approximately 300 were women (35%).
#### More than half of the women survived (70%). More than 200 of the total 314. And less than 20% of the men survived. Approximately 100 of the total 577. We can conclude that women are most likely to survive.
#### More people died (61%) than survived (39%).
### Age x Survival
```
sns.barplot(y="Age", x="Survived", hue="Sex", data=data)
plt.legend(loc=0)
sns.swarmplot(y="Age", x="Survived", hue="Sex", data=data)
plt.legend(loc=0)
data['Age'] = pd.cut(data['Age'], 4)
sns.barplot(data=data, x="Age", y="Survived")
sns.barplot(data=data, x="Age", hue="Sex", y="Survived")
```
#### The survive ratio (in general) decreases with the Age.
#### Passenger with 0.34 - 20.315 have the highest survive ratio with almost 48%. Between 20.315 - 40.21 and 40.21 - 60.105 are almost 40% each.
#### Men highest survive ratio = 0.34 - 20.315 years old (young men).
#### Women highest survive ratio = 20.315 - 40.21 years old (young and adult women).
### Pclass x Survived
```
sns.barplot(x="Pclass", y="Survived", data=data)
sns.countplot(x="Pclass", hue="Pclass", data=data)
plt.legend(loc=0)
```
#### Most of the passengers were in the class 3 (almost 500). But the class 1 has the highest survived ratio (almost 65%). And the class 3 has the lowest survived ratio (almost 25%). The survived ratio decreases with Pclass
```
sns.barplot(x="Pclass", y="Survived", hue="Sex", data=data)
```
#### Almost all women from the class 1 and 2 survived. And almost all men from the class 2 and 3 died (less than 20% survived from each class)
### Embarked x Survived
```
sns.countplot(x="Embarked", data=data)
```
#### More than 600 passengers embarked from Southampton (S)
```
sns.countplot(x="Embarked", hue="Survived", data=data)
```
#### Embarked C has the highest survive rate (more than 50%). Embarked S has the lowest survive rate (less than 35%).
```
sns.barplot(x="Embarked", y="Survived", hue="Sex", data=data)
```
#### Woman has the highest survive rate in all embarked (+60%, +80% and almost 80%, respectively)
#### Embarked C has the highest man survive ratio (near 30%)
### Fare x Survived
```
data['Fare'] = pd.cut(data['Fare'], 4)
sns.barplot(data=data, x="Fare", y="Survived")
sns.barplot(data=data, x="Fare", hue="Sex", y="Survived")
```
#### Survival rate increases with Fare
### Cabin x Survived
```
data['Cabin'] = data['Cabin'].str[0]
data['Cabin'].head()
sns.countplot(data['Cabin'])
data['Cabin'].fillna('U', inplace=True)
sns.countplot(data['Cabin'])
```
#### We have more unknown cabins than know
```
sns.barplot(x="Cabin", y="Survived", data=data)
```
#### Cabins 'E', 'D' and 'B' has the highest survival rate
### SibSp x Survived
```
sns.barplot(x='SibSp', y='Survived', data=data)
```
#### Survival rate descreases with SibSp
#### Highest survival rate = 1 SibSp
### Parch x Survived
```
sns.barplot(x='Parch', y='Survived', data=data)
```
#### Highest survival rate = 3 Parch
### TotalOnboard x Survived and Alone x Survived
```
data['TotalOnboard'] = data['SibSp'] + data['Parch']
data['IsAlone'] = np.where(data['TotalOnboard'] == 0, 1, 0)
data['IsAlone']
sns.barplot(x='TotalOnboard', y='Survived', data=data)
```
#### Survival rate is higher when total family members onboard is between 1 and 3
#### Survival rate for alone people is higher than big family size (4 or plus family members onboard)
```
sns.barplot(x='IsAlone', y='Survived', data=data)
sns.barplot(x='IsAlone', y='Survived', hue='Sex', data=data)
```
#### Alone has a 30% survival rate
#### Single woman has a higher survival rate (70%) than single man (less than 20%)
### Name (Title) x Survived
```
import re
data['Title'] = [re.search(r' \w*\.', str(x)).group(0) for x in data['Name']]
data.drop(columns=['Name'])
plt.figure(figsize=(10, 8))
sns.countplot(data['Title'])
data['Title'] = data['Title'].replace(' Mr.', 'Mr')
data['Title'] = data['Title'].replace(' Mrs.', 'Mrs')
data['Title'] = data['Title'].replace(' Miss.', 'Miss')
data['Title'] = data['Title'].replace(' Master.', 'Master')
data['Title'] = data['Title'].replace([' Don.', ' Rev.', ' Dr.', ' Mme.', ' Ms.', ' Major.', ' Lady.', ' Sir.', ' Mlle.', ' Col.', ' Capt.',' Countess.', ' Jonkheer.'], 'Others')
sns.countplot(data['Title'])
sns.barplot(x="Title", y="Survived", data=data)
```
#### Mrs (80%) and Miss (70%) are the name's title with the most survival rate. That confirms even more that females have a bigger chance to survive
| github_jupyter |
```
!pip3 install yfinance
# importar as bibliotecas necessárias
import pandas as pd
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
import yfinance as yf
def stockData(stock_name, start_date, end_date=date.today(), country='US'):
"""
This function return a dataframe with the price of the stock
since the start_date until the end_date.
"""
stock = stock_name
if(country == 'BR'):
stock = stock_name + '.SA'
df = pd.DataFrame()
# import data to data frame
df = yf.download(stock, start=start_date, end=end_date)
return df
```
### Importando uma ação Brasileira
Criei uma função chamada stockData em que retorna um dataFrame com os dados da stock deseja, note que por se tratar da API do Yahoo FInance temos que adicionar ".SA" ao final de cada ticker brasileiro. Assim, para as stocks americanas, vc pode colocar automaticamente o ticker desejado.
```
df = stockData("ITUB4", start_date='2020-01-03', end_date='2021-03-31', country='BR')
print(df.head())
figure = plt.plot(df["Close"])
plt.xlabel('Mes/Ano')
plt.ylabel('Preco da Acao (R$) - Fechamento')
plt.show()
```
### Importando ação da bolsa americana
```
df2 = stockData("TER", start_date='2020-01-03', end_date='2021-04-01')
figure = plt.plot(df2["Close"])
plt.xlabel('Mes/Ano')
plt.ylabel('Preco da Stock (US$) - Fechamento')
plt.show()
```
### Calculando a correlação entre ativos
```
def stockCorrelation(stock_list, start_date, end_date, country='US'):
"""
This function returns the correlation index between stock1 and stock2
during the time interval START_DATE to END_DATE
"""
stock_collection = {}
df = pd.DataFrame()
for stock in stock_list:
data = stockData(stock, start_date, end_date, country=country)
stock_collection[stock] = data['Close']
df = pd.DataFrame(stock_collection,columns=stock_list)
print(df)
corrMatrix = df.corr()
mask = np.triu(np.ones_like(corrMatrix, dtype=np.bool))
sn.heatmap(corrMatrix, mask=mask, annot=True)
plt.title('Correlacao entre os ativos')
plt.show()
# RETURN CORRELATION MATRIX, PLOT DIAGONAL MATRIX
return corrMatrix
bancoes = ['BBAS3', 'ITUB4', 'BBDC3', 'SANB11', 'BPAC11']
```
#### Correlação entre "bancões" brasileiros
```
stockCorrelation(bancoes,'2020-01-03', '2021-04-01', country='BR')
```
#### Correlação entre big techs americanas
```
bigtechs = ['FB', 'AAPL', 'MSFT', 'GOOGL']
stockCorrelation(bigtechs,'2020-01-03', '2021-04-01', country='US')
```
| github_jupyter |
# Análisis de los datos obtenidos
Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción.Se implementa un regulador experto. Los datos analizados son del día 18 de Agosto del 2015
Los datos del experimento:
* Hora de inicio: 16:08
* Hora final : 16:35
* Filamento extruido:
* $T: 150ºC$
* $V_{min} tractora: 1.5 mm/s$
* $V_{max} tractora: 3.4 mm/s$
* Los incrementos de velocidades en las reglas del sistema experto son distintas:
* En los caso 3 y 5 se mantiene un incremento de +2.
* En los casos 4 y 6 se reduce el incremento a -1.
```
%pylab inline
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('M1.CSV')
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X','Diametro Y', 'VELOCIDAD']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
```
Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
```
datos.ix[:, "Diametro X":"Diametro Y"].plot(figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r')
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
```
Con esta segunda aproximación se ha conseguido estabilizar los datos. Se va a tratar de bajar ese porcentaje. Como cuarta aproximación, vamos a modificar las velocidades de tracción. El rango de velocidades propuesto es de 1.5 a 5.3, manteniendo los incrementos del sistema experto como en el actual ensayo.
Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
```
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
```
#Filtrado de datos
Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
```
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
```
##Representación de X/Y
```
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
```
#Analizamos datos del ratio
```
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
```
#Límites de calidad
Calculamos el número de veces que traspasamos unos límites de calidad.
$Th^+ = 1.85$ and $Th^- = 1.65$
```
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
```
| github_jupyter |
```
import matplotlib.pyplot as plt
%matplotlib inline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.engine.base_layer import Layer
from keras.layers import Activation, Dense
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.datasets import mnist
from keras.optimizers import SGD
from keras.utils import np_utils
from __future__ import print_function
import keras
from keras.models import Sequential
from keras.layers.core import Flatten
from keras.layers import Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
class Mish(Layer):
'''
Mish Activation Function.
.. math::
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Examples:
>>> X_input = Input(input_shape)
>>> X = Mish()(X_input)
'''
def __init__(self, **kwargs):
super(Mish, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs * K.tanh(K.softplus(inputs))
def get_config(self):
base_config = super(Mish, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def mish(x):
return keras.layers.Lambda(lambda x: x*K.tanh(K.softplus(x)))(x)
batch_size = 128
num_classes = 10
epochs = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def model_relu(num_layers, x_train, y_train, batch_size, epochs, x_test, y_test):
model = Sequential()
model.add(Conv2D(20, kernel_size=(5, 5),
activation=mish,
input_shape=input_shape))
model.add(Conv2D(50, (5, 5), activation=mish))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
for layers in range(num_layers):
model.add(Dense(500))
model.add(BatchNormalization())
model.add(Mish())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
return score[1]
l1 = []
for x in range(12,23):
test_acc = model_relu(x, x_train, y_train, batch_size, epochs, x_test, y_test)
l1.append(test_acc)
l2 = [0.982,
0.9791,
0.9795,
0.9752,
0.972,
0.9622,
0.9557,
0.9174,
0.7358,
0.6341,
0.3968]
l2 = [i * 100 for i in l2]
positions = (0, 2, 4, 6, 8, 10)
labels = (15, 17, 19, 21, 23, 25)
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
plt.plot(l,'b-o', label= "ReLU")
plt.plot(l1, 'r-o', label= "Mish")
plt.plot(l2, 'k-o', label= "Swish")
plt.legend(loc='best', fontsize=15)
plt.xticks(positions, labels)
plt.grid()
plt.xlabel('Number of Layers', fontsize = 20)
plt.ylabel('Testing Accuracy', fontsize = 20)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.title.set_text('Testing Accuracy vs Number of Layers on MNIST')
ax.title.set_fontsize(20)
plt.savefig("layersacc.png", bbox_inches = 'tight')
plt.show()
```
| github_jupyter |
<small><small><i>
All the IPython Notebooks in **Data Science Interview Questions** series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/DataScience_Interview_Questions)**
</i></small></small>
# Data Analysis ➞ <span class='label label-default'>27 Questions</span>
### 1. (Given a Dataset) Analyze this dataset and tell me what you can learn from it.
<span class='label label-default'>Solution</span>
- Typical data cleaning and visualization.
### 2. What is `R2`? What are some other metrics that could be better than `R2` and why?
<span class='label label-default'>Solution</span>
- goodness of fit measure. variance explained by the regression / total variance.
- the more predictors you add, the higher $R^2$ becomes.
- hence use adjusted $R^2$ which adjusts for the degrees of freedom.
- or train error metrics.
### 3. What is the curse of dimensionality?
<span class='label label-default'>Solution</span>
- High dimensionality makes clustering hard, because having lots of dimensions means that everything is **"far away"** from each other.
- For example, to cover a fraction of the volume of the data we need to capture a very wide range for each variable as the number of variables increases.
- All samples are close to the edge of the sample. And this is a bad news because prediction is much more difficult near the edges of the training sample.
- The sampling density decreases exponentially as p increases and hence the data becomes much more sparse without significantly more data.
- We should conduct PCA to reduce dimensionality.
### 4. Is more data always better?
<span class='label label-default'>Solution</span>
- **Statistically**
- It depends on the quality of your data, for example, if your data is biased, just getting more data won’t help.
- It depends on your model. If your model suffers from high bias, getting more data won’t improve your test results beyond a point. You’d need to add more features, etc.
- **Practically**
- More data usually benefit the models.
- Also there’s a tradeoff between having more data and the additional storage, computational power, memory it requires. Hence, always think about the cost of having more data.
### 5. What are advantages of plotting your data before performing analysis?
<span class='label label-default'>Solution</span>
- Data sets have errors. You won't find them all but you might find some. That 212 year old man. That 9 foot tall woman.
- Variables can have skewness, outliers, etc. Then the arithmetic mean might not be useful, which means the standard deviation isn't useful.
- Variables can be multimodal! If a variable is multimodal then anything based on its mean or median is going to be suspect.
### 6. How can you make sure that you don’t analyze something that ends up meaningless?
<span class='label label-default'>Solution</span>
- Proper exploratory data analysis.
- In every data analysis task, there's the exploratory phase where you're just graphing things, testing things on small sets of the data, summarizing simple statistics, and getting rough ideas of what hypotheses you might want to pursue further.
- Then there's the exploratory phase, where you look deeply into a set of hypotheses.
- The exploratory phase will generate lots of possible hypotheses, and the exploratory phase will let you really understand a few of them. Balance the two and you'll prevent yourself from wasting time on many things that end up meaningless, although not all.
### 7. What is the role of trial and error in data analysis? What is the the role of making a hypothesis before diving in?
<span class='label label-default'>Solution</span>
- data analysis is a repetition of setting up a new hypothesis and trying to refute the null hypothesis.
- The scientific method is eminently inductive: we elaborate a hypothesis, test it and refute it or not. As a result, we come up with new hypotheses which are in turn tested and so on. This is an iterative process, as science always is.
### 8. How can you determine which features are the most important in your model?
<span class='label label-default'>Solution</span>
- Linear regression can use p-value
- run the features though a Gradient Boosting Machine or Random Forest to generate plots of relative importance and information gain for each feature in the ensembles.
- Look at the variables added in forward variable selection.
### 9. How do you deal with some of your predictors being missing?
<span class='label label-default'>Solution</span>
- Remove rows with missing values - This works well if
- the values are missing randomly (see [Vinay Prabhu's answer](https://www.quora.com/How-can-I-deal-with-missing-values-in-a-predictive-model/answer/Vinay-Prabhu-7) for more details on this)
- if you don't lose too much of the dataset after doing so.
- Build another predictive model to predict the missing values.
- This could be a whole project in itself, so simple techniques are usually used here.
- Use a model that can incorporate missing data.
- Like a random forest, or any tree-based method.
### 10. You have several variables that are positively correlated with your response, and you think combining all of the variables could give you a good prediction of your response. However, you see that in the multiple linear regression, one of the weights on the predictors is negative. What could be the issue?
<span class='label label-default'>Solution</span>
- Multicollinearity refers to a situation in which two or more explanatory variables in a [multiple regression](https://en.wikipedia.org/wiki/Multiple_regression "Multiple regression") model are highly linearly related.
- Leave the model as is, despite multicollinearity. The presence of multicollinearity doesn't affect the efficiency of extrapolating the fitted model to new data provided that the predictor variables follow the same pattern of multicollinearity in the new data as in the data on which the regression model is based.
- principal component regression
### 11. Let’s say you’re given an unfeasible amount of predictors in a predictive modeling task. What are some ways to make the prediction more feasible?
<span class='label label-default'>Solution</span>
- PCA
### 12. Now you have a feasible amount of predictors, but you’re fairly sure that you don’t need all of them. How would you perform feature selection on the dataset?
<span class='label label-default'>Solution</span>
- ridge / lasso / elastic net regression.
- Univariate Feature Selection where a statistical test is applied to each feature individually. You retain only the best features according to the test outcome scores.
- Recursive Feature Elimination:
- First, train a model with all the feature and evaluate its performance on held out data.
- Then drop let say the 10% weakest features (e.g. the feature with least absolute coefficients in a linear model) and retrain on the remaining features.
- Iterate until you observe a sharp drop in the predictive accuracy of the model.
### 13. Your linear regression didn’t run and communicates that there are an infinite number of best estimates for the regression coefficients. What could be wrong?
<span class='label label-default'>Solution</span>
- p > n.
- If some of the explanatory variables are perfectly correlated (positively or negatively) then the coefficients would not be unique.
### 14. You run your regression on different subsets of your data, and find that in each subset, the beta value for a certain variable varies wildly. What could be the issue here?
<span class='label label-default'>Solution</span>
- The dataset might be heterogeneous. In which case, it is recommended to cluster datasets into different subsets wisely, and then draw different models for different subsets. Or, use models like non parametric models (trees) which can deal with heterogeneity quite nicely.
### 15. What is the main idea behind ensemble learning? If I had many different models that predicted the same response variable, what might I want to do to incorporate all of the models? Would you expect this to perform better than an individual model or worse?
<span class='label label-default'>Solution</span>
- The assumption is that a group of weak learners can be combined to form a strong learner.
- Hence the combined model is expected to perform better than an individual model.
- Assumptions:
- average out biases
- reduce variance
- Bagging works because some underlying learning algorithms are unstable: slightly different inputs leads to very different outputs. If you can take advantage of this instability by running multiple instances, it can be shown that the reduced instability leads to lower error. If you want to understand why, the original bagging paper( [http://www.springerlink.com/](http://www.springerlink.com/content/l4780124w2874025/)) has a section called "why bagging works"
- Boosting works because of the focus on better defining the "decision edge". By re-weighting examples near the margin (the positive and negative examples) you get a reduced error (see http://citeseerx.ist.psu.edu/vie...)
- Use the outputs of your models as inputs to a meta-model.
**For example:** if you're doing binary classification, you can use all the probability outputs of your individual models as inputs to a final logistic regression (or any model, really) that can combine the probability estimates.
One very important point is to make sure that the output of your models are out-of-sample predictions. This means that the predicted value for any row in your data-frame should NOT depend on the actual value for that row.
### 16. Given that you have wi-fi data in your office, how would you determine which rooms and areas are underutilized and over-utilized?
<span class='label label-default'>Solution</span>
- If the data is more used in one room, then that one is over utilized!
- Maybe account for the room capacity and normalize the data.
### 17. How could you use GPS data from a car to determine the quality of a driver?
<span class='label label-default'>Solution</span>
- Speed
- Driving paths
### 18. Given accelerometer, altitude, and fuel usage data from a car, how would you determine the optimum acceleration pattern to drive over hills?
<span class='label label-default'>Solution</span>
- Historical data?
### 19. Given position data of NBA players in a season’s games, how would you evaluate a basketball player’s defensive ability?
<span class='label label-default'>Solution</span>
- Evaluate his positions in the court.
### 20. How would you quantify the influence of a Twitter user?
<span class='label label-default'>Solution</span>
- like page rank with each user corresponding to the webpages and linking to the page equivalent to following.
### 21. Given location data of golf balls in games, how would construct a model that can advise golfers where to aim?
<span class='label label-default'>Solution</span>
- winning probability for different positions.
### 22. You have 100 mathletes and 100 math problems. Each mathlete gets to choose 10 problems to solve. Given data on who got what problem correct, how would you rank the problems in terms of difficulty?
<span class='label label-default'>Solution</span>
- One way you could do this is by storing a "skill level" for each user and a "difficulty level" for each problem. We assume that the probability that a user solves a problem only depends on the skill of the user and the difficulty of the problem.* Then we maximize the likelihood of the data to find the hidden skill and difficulty levels.
- The Rasch model for dichotomous data takes the form:
$ {\displaystyle \Pr {X_{ni}=1\\} = {\frac {\exp({\beta_{n}}-{\delta_{i}})}{1+\exp({\beta_{n}}-{\delta_{i}})}},} $
where is the ability of person and is the difficulty of item.
### 23. You have 5000 people that rank 10 sushis in terms of saltiness. How would you aggregate this data to estimate the true saltiness rank in each sushi?
<span class='label label-default'>Solution</span>
- Some people would take the mean rank of each sushi. If I wanted something simple, I would use the median, since ranks are (strictly speaking) ordinal and not interval, so adding them is a bit risque (but people do it all the time and you probably won't be far wrong).
### 24. Given data on congressional bills and which congressional representatives co-sponsored the bills, how would you determine which other representatives are most similar to yours in voting behavior? How would you evaluate who is the most liberal? Most republican? Most bipartisan?
<span class='label label-default'>Solution</span>
- collaborative filtering. you have your votes and we can calculate the similarity for each representatives and select the most similar representative.
- for liberal and republican parties, find the mean vector and find the representative closest to the center point.
### 25. How would you come up with an algorithm to detect plagiarism in online content?
<span class='label label-default'>Solution</span>
- reduce the text to a more compact form (e.g. fingerprinting, bag of words) then compare those with other texts by calculating the similarity.
### 26. You have data on all purchases of customers at a grocery store. Describe to me how you would program an algorithm that would cluster the customers into groups. How would you determine the appropriate number of clusters to include?
<span class='label label-default'>Solution</span>
- K-means
- choose a small value of k that still has a low SSE (elbow method)
- [Elbow method](https://bl.ocks.org/rpgove/0060ff3b656618e9136b)
### 27. Let’s say you’re building the recommended music engine at Spotify to recommend people music based on past listening history. How would you approach this problem?
<span class='label label-default'>Solution</span>
- content-based filtering
- collaborative filtering
<span class='label label-default'>Solution</span>
| github_jupyter |
# Edge detection with openCV
This notebook makes use of `openCV` and the [Canny edge detection](https://en.wikipedia.org/wiki/Canny_edge_detector) to extract edges as a proxy for shape. Later we only use those edges that are in the bounding box database for imagenette created earlier.
```
import cv2
import glob
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from pathlib import Path
```
Read in a sample image to check edge detection
```
save_path = Path("data/")
image_path = save_path / "imagenette2/train/n03028079/n03028079_28243.JPEG"
# read imagenette sample image
image = cv2.imread(str(image_path), cv2.COLOR_BGR2RGB)
```
Blurring also helps
```
blurred = cv2.fastNlMeansDenoisingColored(image, None, 30, 10, 7, 21)
blurred = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
```
Canny needs a grayscale image to work properly
```
plt.imshow(blurred, cmap = plt.cm.gray)
```
Perform canny edge detection
```
# perform edge detection
edges = cv2.Canny(blurred, 30, 100)
plt.imshow(edges, cmap = plt.cm.gray)
```
As we can see the result is not very good and can be improved.
Finetuning parameters takes time, however. Because we want to automatize it anyway, we need a automatic choice for the parameters.
_____
## Automatize edge detection
To only perform edge detection on the images we actually will use, we need to load in the imagenette bounding boxes dataset we created earlier
```
imgn_data_flt = pd.read_pickle(save_path / "imagenette_boundingboxes.pkl")
imgn_data_flt[:5]
```
Since we do not use the original sized image in the network, but a `256` resize with `224` center crop, we need to adjust the bounding boxes accordingly!
The columns we need have the following indexes:
```
xmax_index = imgn_data_flt.columns.get_loc("object.bndbox.xmax")
xmin_index = imgn_data_flt.columns.get_loc("object.bndbox.xmin")
ymax_index = imgn_data_flt.columns.get_loc("object.bndbox.ymax")
ymin_index = imgn_data_flt.columns.get_loc("object.bndbox.ymin")
szh_index = imgn_data_flt.columns.get_loc("size.height")
szw_index = imgn_data_flt.columns.get_loc("size.width")
for row in imgn_data_flt.itertuples():
# get the value for the index
# add +1 because index is first "column" in the tuple
xmin = row[xmin_index + 1]
xmax = row[xmax_index + 1]
ymin = row[ymin_index + 1]
ymax = row[ymax_index + 1]
height = row[szh_index + 1]
width = row[szw_index + 1]
# new values for x dimension
x_change = 256 / int(width)
xmax_n = int(x_change * int(xmax) - 16)
xmax_n = min(xmax_n, 224)
xmin_n = int(x_change * int(xmin) - 16)
xmin_n = max(xmin_n, 0)
# new values for y dimension
y_change = 256 / int(height)
ymax_n = int(y_change * int(ymax) - 16)
ymax_n = min(ymax_n, 224)
ymin_n = int(y_change * int(ymin) - 16)
ymin_n = max(ymin_n, 0)
# save to dataframe
imgn_data_flt.at[row[0], "xmax_n"] = xmax_n
imgn_data_flt.at[row[0], "xmin_n"] = xmin_n
imgn_data_flt.at[row[0], "ymax_n"] = ymax_n
imgn_data_flt.at[row[0], "ymin_n"] = ymin_n
imgn_data_flt[:5]
```
Save modifed data with adpted bounding boxes to pickle
```
with open(save_path / "imgn_data_flt.pkl", 'wb') as f:
pickle.dump(imgn_data_flt, f)
```
A function to do the preprocessing needed for canny
```
def preprocess_image(imagePath):
image = cv2.imread(str(imagePath))
blurred = cv2.fastNlMeansDenoisingColored(image, None, 30, 10, 7, 21)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
return gray
```
The automatic chpoice of the lower and upper values allows for quicker processing
```
# as seen in
# https://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/
def auto_canny(image, sigma=0.9):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
```
`auto_canny` gives us a much better quality than the naive approach
```
plt.imshow(auto_canny(blurred), cmap = plt.cm.gray)
```
Check if everything worked as expected
```
"n03445777_5901" in list(imgn_data_flt['filename'])
```
We need a list of the files we want to analyze. We do not want to have those of the `validation` set and only want to use those that are present in our data
```
imgn_fns = list(imgn_data_flt['filename'])
# for now we filter out those that would be in the original ImageNet validation set
imgn_pict = [filename for filename in Path(save_path / 'imagenette2/').rglob('*.JPEG')
if '_val_' not in filename.name
if filename.name.split(".")[0] in imgn_fns]
print(imgn_pict[:10])
print(f'\nTotal images: {len(imgn_pict)}')
```
Save list of paths to pickle to be able to use it in other notebooks and scripts
```
with open(save_path / "imgn_pict.pkl", 'wb') as f:
pickle.dump(imgn_pict, f)
```
Because we will produce a lot of data, we write it out line by line to not have to keep it in memory
Look [here](https://stackoverflow.com/questions/50274063/find-coordinates-of-a-canny-edge-image-opencv-python) for a description on how the coordinates are extracted.
In short: `np.where()` gets the indexes and with `zip()` the list of tuples with that index is created.
[This](https://stackoverflow.com/questions/34667282/numpy-where-detailed-step-by-step-explanation-examples) is a explanation on how `np.where` works in detail.
> When `a` is a 2d array, `np.where()` returns an array of row idx's, and an array of col idx's:
```
def write_edges(filename, bbox_data=None):
with open(filename,'w') as file:
# loop over the images
for imagePath in tqdm(imgn_pict):
# load the image, convert it to grayscale, and blur it slightly
pre_image = preprocess_image(imagePath)
# apply Canny edge detection
edges = auto_canny(pre_image)
# get edge coordinates
indices = np.where(edges != [0])
coordinates = list(zip(indices[0], indices[1]))
fname_wos = imagePath.name.split('.')[0]
if bbox_data is not None:
xmin = bbox_data.at[fname_wos, "xmin_n"]
xmax = bbox_data.at[fname_wos,"xmax_n"]
ymin = bbox_data.at[fname_wos,"ymin_n"]
ymax = bbox_data.at[fname_wos,"ymax_n"]
# only save canny edge coordinates within bounding box
coordinates = [(x,y) for x,y in coordinates if x >= xmin and x <= xmax and y >= ymin and y <= ymax]
# save to disk
file.write(f'{fname_wos}\t{coordinates}\n')
```
Create the file
```
write_edges(save_path / "edges_data.csv")
```
## Subset with edges within the bounding boxes
For now we have created a file with all edges. We also need one only with those within the bounding box
```
imgn_data_flt.set_index(['filename'], drop=False, inplace=True)
imgn_data_flt[:5]
type(imgn_data_flt.at["n03445777_5901","object.bndbox.xmin"])
```
Some columns are saved as strings even though they should be numeric. Because we use some math operators in our function we are going to call, we have to fix that.
```
imgn_data_flt.iloc[:,3:7] = imgn_data_flt.iloc[:,xmin_index:ymax_index].apply(pd.to_numeric)
```
Now we can filter out those regions that are not on the bounding box area
```
write_edges(save_path / 'edges_data_flt.csv', bbox_data=imgn_data_flt)
```
___
## Visualizations
```
import seaborn as sns
from ast import literal_eval
sns.set_context("paper")
sns.set_style("whitegrid")
sns.set_palette("colorblind")
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'], 'size' : 11})
rc('text', usetex=True)
edges_flt = pd.read_csv(save_path / 'edges_data_flt.csv', sep='\t', header=None)
```
Let's set new headers for our dataframe
```
edges_flt.columns = ['filename', 'edges']
edges_flt[:5]
```
Unfortunately, the `edges` was read in as a string and not as a data type list with integers. We have to fix that.
**Note**: This may take a while!
```
edges_flt['edges'] = edges_flt.edges.apply(literal_eval)
```
Would be nice to know how many edges each picture has...
```
edges_flt['edgeslength'] = edges_flt['edges'].str.len()
edges_flt[:5]
```
Some basic descriptive insights
```
edges_flt['edgeslength'].describe(include='all')
```
We may have some outliers here!
```
sns.distplot(edges_flt['edgeslength'])
sns.despine(bottom = True, left = True)
```
Yes!
```
sns.boxplot(x=edges_flt['edgeslength'])
sns.despine(bottom = True, left = True)
```
Filter out outliers based on the `99%` quantile
```
q = edges_flt['edgeslength'].quantile(0.90)
```
Plot without otliers
```
sns.boxplot(x=edges_flt[edges_flt['edgeslength'] < q]['edgeslength'])
sns.despine(bottom = True, left = True)
edges_flt['edges'][:5]
x_val = [elem[0] for lst in edges_flt['edges'] for elem in lst]
y_val = [elem[1] for lst in edges_flt['edges'] for elem in lst]
```
Visualize the parts of the image with where the edges were detected (within the bounding boxes).
Image sizes differ, however. To get representative picture, we should scale the data beforehand.
**Note:** takes long to compute!
```
#sns.jointplot(x_val, y_val, kind="kde")
```
Let's use `pd.sample()` to visualize `500` random entries, since the complete analysis takes quite some time.
```
edges_smpl = edges_flt.sample(n=500)
edges_smpl[:5]
x_val_s = [elem[0] for lst in edges_smpl['edges'] for elem in lst]
y_val_s = [elem[1] for lst in edges_smpl['edges'] for elem in lst]
sns.jointplot(x_val_s, y_val_s, kind="kde")
sns.despine(bottom = True, left = True)
```
| github_jupyter |
[](https://colab.research.google.com/github/krasserm/bayesian-machine-learning/blob/master/latent_variable_models_part_2.ipynb)
```
try:
# Use Tensorflow 2.x
%tensorflow_version 2.x
# Check if notebook is running in Google Colab
import google.colab
except:
pass
```
# Latent variable models - part 2: Stochastic variational inference and variational autoencoders
[Part 1](latent_variable_models_part_1.ipynb) of this article series introduced a latent variable model with discrete latent variables, the Gaussian mixture model (GMM), and an algorithm to fit this model to data, the EM algorithm. Part 2 covers a latent variable model with continuous latent variables for modeling more complex data, like natural images for example, and a Bayesian inference technique that can be used in conjunction with stochastic optimization algorithms.
Consider a natural image of size $100 \times 100$ with a single channel. This image is a point in $10.000$-dimensional space. Natural images are usually not uniformly distributed in this space but reside on a much lower-dimensional manifold within this high-dimensional space. The lower dimensionality of the manifold is related to the limited degrees of freedom in these images e.g. only a limited number of pixel value combinations are actually perceived as natural images.
Modeling natural images with latent variable models whose continuous latent variables represent locations on the manifold can be a useful approach that is also discussed here. As in part 1, a model with one latent variable $\mathbf{t}_i$ per observation $\mathbf{x}_i$ is used but now the latent variables are continuous rather than discrete variables. Therefore, summations over latent variable states are now replaced by integrals and these are often intractable for more complex models.
Observations i.e. images $\mathbf{X} = \left\{ \mathbf{x}_1, \ldots, \mathbf{x}_N \right\}$ are again described with a probabilistic model $p(\mathbf{x} \lvert \boldsymbol{\theta})$. Goal is to maximize the data likelihood $p(\mathbf{X} \lvert \boldsymbol{\theta})$ w.r.t. $\boldsymbol{\theta}$ and to obtain approximate posterior distributions over continuous latent variables. The joint distribution over an observed variable $\mathbf{x}$ and a latent variable $\mathbf{t}$ is defined as the product of the conditional distribution over $\mathbf{x}$ given $\mathbf{t}$ and the prior distribution over $\mathbf{t}$.
$$
p(\mathbf{x}, \mathbf{t} \lvert \boldsymbol{\theta}) = p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta})
\tag{1}
$$
We obtain the marginal distribution over x by integrating over t.
$$
p(\mathbf{x} \lvert \boldsymbol{\theta}) = \int p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta}) d\mathbf{t}
\tag{2}
$$
This integral is usually intractable for even moderately complex conditional probabilities $p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta})$ and consequently also the true posterior.
$$
p(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\theta}) = {{p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta})} \over {p(\mathbf{x} \lvert \boldsymbol{\theta})}}
\tag{3}
$$
This means that the E-step of the EM algorithm becomes intractable. Recall from part 1 that the lower bound of the log marginal likelihood is given by
$$
\mathcal{L}(\boldsymbol{\theta}, q) = \log p(\mathbf{X} \lvert \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{T} \lvert \mathbf{X}) \mid\mid p(\mathbf{T} \lvert \mathbf{X}, \boldsymbol{\theta}))
\tag{4}
$$
In the E-step, the lower bound is maximized w.r.t. $q$ and $\boldsymbol{\theta}$ is held fixed. If the true posterior is tractable, we can set $q$ to the true posterior so that the KL divergence becomes $0$ which maximizes the lower bound for the current value of $\boldsymbol{\theta}$. If the true posterior is intractable approximations must be used.
Here, we will use *stochastic variational inference*, a Bayesian inference method that also scales to large datasets<sup>[1]</sup>. Numerous other approximate inference approaches exist but these are not discussed here to keep the article focused.
## Stochastic variational inference
The field of mathematics that covers the optimization of a functional w.r.t. a function, like ${\mathrm{argmax}}_q \mathcal{L}(\boldsymbol{\theta}, q)$ in our example, is the [calculus of variations](https://en.wikipedia.org/wiki/Calculus_of_variations), hence the name *variational inference*. In this context, $q$ is called a *variational distribution* and $\mathcal{L}(\boldsymbol{\theta}, q)$ a *variational lower bound*.
We will approximate the true posterior with a parametric variational distribution $q(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\phi})$ and try to find a value of $\boldsymbol{\phi}$ that minimizes the KL divergence between this distribution and the true posterior. Using $q(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\phi})$ we can formulate the variational lower bound for a single observation $\mathbf{x}_i$ as
$$
\begin{align*}
\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) &=
\log p(\mathbf{x}_i \lvert \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})) \\ &=
\log p(\mathbf{x}_i \lvert \boldsymbol{\theta}) - \int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \over {p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})}} d\mathbf{t}_i \\ &=
\int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{p(\mathbf{x}_i \lvert \boldsymbol{\theta}) p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})} \over {q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})}} d\mathbf{t}_i \\ &=
\int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) p(\mathbf{t}_i \lvert \boldsymbol{\theta})} \over {q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})}} d\mathbf{t}_i \\ &=
\int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) d\mathbf{t}_i - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta})) \\ &=
\mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))
\end{align*}
\tag{5}
$$
We assume that the integral $\int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) d\mathbf{t}_i$ is intractable but we can choose a functional form of $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ from which we can easily sample so that the expectation of $\log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ w.r.t. to $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ can be approximated with $L$ samples from $q$.
$$
\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \approx {1 \over L} \sum_{l=1}^L \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))
\tag{6}
$$
where $\mathbf{t}_{i,l} \sim q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$. We will also choose the functional form of $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ and $p(\mathbf{t}_i \lvert \boldsymbol{\theta})$ such that integration of the KL divergence can be done analytically, hence, no samples are needed to evaluate the KL divergence. With these choices, an approximate evaluation of the variational lower bound is possible. But in order to optimize the lower bound w.r.t. $\boldsymbol{\theta}$ and $\boldsymbol{\phi}$ we need to approximate the gradients w.r.t. these parameters.
### Stochastic gradients
We first assume that the analytical expression of the KL divergence is differentiable w.r.t. $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ so that deterministic gradients can be computed. The gradient of the first term on the RHS of Eq. $(5)$ w.r.t. $\boldsymbol{\theta}$ is
$$
\nabla_{\boldsymbol{\theta}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) =
\mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})
\tag{7}
$$
Here, $\nabla_{\boldsymbol{\theta}}$ could be moved inside the expectation as $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ doesn't depend on $\boldsymbol{\theta}$. Assuming that $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is differentiable w.r.t. $\boldsymbol{\theta}$, unbiased estimates of the gradient can be obtained by sampling from $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$.
$$
\nabla_{\boldsymbol{\theta}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) \approx
{1 \over L} \sum_{l=1}^L \nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})
\tag{8}
$$
We will later implement $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ as neural network and use Tensorflow to compute $\nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})$. The gradient w.r.t. $\boldsymbol{\theta}$ is a bit more tricky as $\nabla_{\boldsymbol{\phi}}$ cannot be moved inside the expectation because $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ depends on $\boldsymbol{\phi}$. But if we can decompose $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ into an auxiliary distribution $p(\boldsymbol\epsilon)$ that doesn't depend on $\boldsymbol{\phi}$ and a deterministic, differentiable function $g(\boldsymbol\epsilon, \mathbf{x}, \boldsymbol{\phi})$ where $\mathbf{t}_i = g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\epsilon \sim p(\boldsymbol\epsilon)$ then we can re-formulate the gradient w.r.t. $\boldsymbol{\phi}$ as
$$
\begin{align*}
\nabla_{\boldsymbol{\phi}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) &=
\nabla_{\boldsymbol{\phi}} \mathbb{E}_{p(\boldsymbol\epsilon)} \log p(\mathbf{x}_i \lvert g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol{\theta}) \\ &=
\mathbb{E}_{p(\boldsymbol\epsilon)} \nabla_{\boldsymbol{\phi}} \log p(\mathbf{x}_i \lvert g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol{\theta})
\tag{9}
\end{align*}
$$
Unbiased estimates of the gradient w.r.t. $\boldsymbol{\phi}$ can then be obtained by sampling from $p(\boldsymbol\epsilon)$.
$$
\nabla_{\boldsymbol{\phi}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) \approx
{1 \over L} \sum_{l=1}^L \nabla_{\boldsymbol{\phi}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})
\tag{10}
$$
where $\mathbf{t}_{i,l} = g(\boldsymbol\epsilon_l, \mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\epsilon_l \sim p(\boldsymbol\epsilon)$. This so-called *reparameterization trick* can be applied to a wide range of probability distributions, including Gaussian distributions. Furthermore, stochastic gradients w.r.t. $\boldsymbol{\phi}$ obtained with this trick have much smaller variance than those obtained with alternative approaches (not shown here).
### Mini-batches
The above approximations for the variational lower bound and its gradients have been formulated for a single training example $\mathbf{x}_i$ but this can be easily extended to mini-batches $\mathbf{X}^M = \left\{ \mathbf{x}_1, \ldots, \mathbf{x}_M \right\}$ with $M$ random samples from a dataset $\mathbf{X}$ of $N$ i.i.d. observations. The lower bound of the full dataset $\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{X})$ can then be approximated as
$$
\begin{align*}
\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_M) &\approx
{N \over M} \sum_{i=1}^M \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \\ &=
\mathcal{L}^M(\boldsymbol{\theta}, q; \mathbf{X}^M)
\tag{11}
\end{align*}
$$
Gradients of $\mathcal{L}^M(\boldsymbol{\theta}, q; \mathbf{X}^M)$ can be obtained as described above together with averaging over the mini-batch and used in combination with optimizers like Adam, for example, to update the parameters of the latent variable model. Sampling from the variational distribution $q$ and usage of mini-batches leads to noisy gradients, hence the term *stochastic variational inference*.
If $M$ is sufficiently large, for example $M = 100$, then $L$ can be even set to $1$ i.e. a single sample from the variational distribution per training example is sufficient to get a good gradient estimate on average.
## Variational autoencoder
From the perspective of a generative model, $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ is a probabilistic *encoder* because it generates a *latent code* $\mathbf{t}_i$ for input image $\mathbf{x}_i$ and $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is a probabilistic *decoder* because it generates or reconstructs an image $\mathbf{x}_i$ from latent code $\mathbf{t}_i$. Optimizing the variational lower bound w.r.t. parameters $\boldsymbol{\theta}$ and $\boldsymbol{\phi}$ can therefore be regarded as training a probabilistic autoencoder or *variational autoencoder* (VAE)<sup>[1]</sup>.
In this context, the first term on the RHS of Eq. $(5)$ can be interpreted as expected negative *reconstruction error*. The second term is a *regularization term* that encourages the variational distribution to be close to the prior over latent variables. If the regularization term is omitted, the variational distribution would collapse to a delta function and the variational auto-encoder would degenerate to a "usual" deterministic autoencoder.
### Implementation
For implementing a variational autoencoder, we make the following choices:
- The variational distribution $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ is a multivariate Gaussian $\mathcal{N}(\mathbf{t}_i \lvert \boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol\sigma^2(\mathbf{x}_i, \boldsymbol{\phi}))$ with a diagonal covariance matrix where mean vector $\boldsymbol\mu$ and the covariance diagonal $\boldsymbol\sigma^2$ are functions of $\mathbf{x}_i$ and $\boldsymbol{\phi}$. These functions are implemented as neural network and learned during optimization of the lower bound w.r.t. $\boldsymbol{\phi}$. After reparameterization, samples from $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ are obtained via the deterministic function $g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}) = \boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi}) + \boldsymbol\sigma^2(\mathbf{x}_i, \boldsymbol{\phi}) \odot \boldsymbol\epsilon$ and an auxiliary distribution $p(\boldsymbol\epsilon) = \mathcal{N}(\boldsymbol\epsilon \lvert \mathbf{0}, \mathbf{I})$.
- The conditional distribution $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is a multivariate Bernoulli distribution $\text{Ber}(\mathbf{x}_i \lvert \mathbf{k}(\mathbf{t}_i, \boldsymbol{\theta}))$ where parameter $\mathbf{k}$ is a function of $\mathbf{t}_i$ and $\boldsymbol{\theta}$. This distribution models the binary training data i.e. monochrome (= binarized) MNIST images in our example. Function $\mathbf{k}$ computes for each pixel its expected value. It is also implemented as neural network and learned during optimization of the lower bound w.r.t. $\boldsymbol{\theta}$. Taking the (negative) logarithm of $\text{Ber}(\mathbf{x}_i \lvert \mathbf{k}(\mathbf{t}_i, \boldsymbol{\theta}))$ gives a sum over pixel-wise binary cross entropies as shown in Eq. $(12)$
- Prior $p(\mathbf{t}_i \lvert \boldsymbol{\theta})$ is a multivariate Gaussian distribution $\mathcal{N}(\mathbf{t}_i \lvert \mathbf{0}, \mathbf{I})$ with zero mean and unit covariance matrix. With the chosen functional forms of the prior and the variational distribution $q$, $\mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))$ can be integrated analytically to $-{1 \over 2} \sum_{d=1}^D (1 + \log \sigma_{i,d}^2 - \mu_{i,d}^2 - \sigma_{i,d}^2)$ where $D$ is the dimensionality of the latent space and $\mu_{i,d}$ and $\sigma_{i,d}$ is the $d$-th element of $\boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\sigma(\mathbf{x}_i, \boldsymbol{\phi})$, respectively.
Using these choices and setting $L = 1$, the variational lower bound for a single image $\mathbf{x}_i$ can be approximated as
$$
\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \approx
- \sum_c \left(x_{i,c} \log k_{i,c} + (1 - x_{i,c}) \log (1 - k_{i,c})\right) + {1 \over 2} \sum_d (1 + \log \sigma_{i,d}^2 - \mu_{i,d}^2 - \sigma_{i,d}^2)
\tag{12}
$$
where $x_{i,c}$ is the value of pixel $c$ in image $\mathbf{x}_i$ and $k_{i,c}$ its expected value. The negative value of the lower bound is used as loss during training. The following figure outlines the architecture of the variational autoencoder.

The definitions of the encoder and decoder neural networks were taken from \[2\]. Here, the encoder computes the logarithm of the variance, instead of the variance directly, for reasons of numerical stability.
```
from tensorflow.keras import layers
from tensorflow.keras.models import Model
def create_encoder(latent_dim):
"""
Creates a convolutional encoder for MNIST images.
Args:
latent_dim: dimensionality of latent space.
"""
encoder_iput = layers.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, padding='same', activation='relu')(encoder_iput)
x = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2, 2))(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Flatten()(x)
x = layers.Dense(32, activation='relu')(x)
q_mean = layers.Dense(latent_dim)(x)
q_log_var = layers.Dense(latent_dim)(x)
return Model(encoder_iput, [q_mean, q_log_var], name='encoder')
def create_decoder(latent_dim):
"""
Creates a convolutional decoder for MNIST images.
Args:
latent_dim: dimensionality of latent space.
"""
decoder_input = layers.Input(shape=(latent_dim,))
x = layers.Dense(12544, activation='relu')(decoder_input)
x = layers.Reshape((14, 14, 64))(x)
x = layers.Conv2DTranspose(32, 3, padding='same', activation='relu', strides=(2, 2))(x)
k = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(x)
return Model(decoder_input, k, name='decoder')
```
These definitions are used to implement a `VariationalAutoencoder` model class.
```
import tensorflow as tf
class VariationalAutoencoder(Model):
def __init__(self, latent_dim=2):
"""
Creates a variational autoencoder Keras model.
Args:
latent_dim: dimensionality of latent space.
"""
super().__init__()
self.latent_dim = latent_dim
self.encoder = create_encoder(latent_dim)
self.decoder = create_decoder(latent_dim)
def encode(self, x):
"""
Computes variational distribution q statistics from
input image x.
Args:
x: input image, shape (M, 28, 28, 1).
Returns:
Mean, shape (M, latent_dim), and log variance,
shape (M, latent_dim), of multivariate Gaussian
distribution q.
"""
q_mean, q_log_var = self.encoder(x)
return q_mean, q_log_var
def sample(self, q_mean, q_log_var):
"""
Samples latent code from variational distribution q.
Args:
q_mean: mean of q, shape (M, latent_dim).
q_log_var: log variance of q, shape (M, latent_dim).
Returns:
Latent code sample, shape (M, latent_dim).
"""
eps = tf.random.normal(shape=q_mean.shape)
return q_mean + tf.exp(q_log_var * .5) * eps
def decode(self, t):
"""
Computes expected pixel values (= probabilities k) from
latent code t.
Args:
t: latent code, shape (M, latent_dim).
Returns:
Probabilities k of multivariate Bernoulli
distribution p, shape (M, 28, 28, 1).
"""
k = self.decoder(t)
return k
def call(self, x):
"""
Computes expected pixel values (= probabilities k) of a
reconstruction of input image x.
Args:
x: input image, shape (M, 28, 28, 1).
Returns:
Probabilities k of multivariate Bernoulli
distribution p, shape (M, 28, 28, 1).
"""
q_mean, q_log_var = self.encode(x)
t = self.sample(q_mean, q_log_var)
return self.decode(t)
```
The `variational_lower_bound` function is implemented using Eq. $(12)$ and Eq. $(11)$ but instead of estimating the lower bound for the full dataset it is normalized by the dataset size $N$.
```
from tensorflow.keras.losses import binary_crossentropy
def variational_lower_bound(model, x):
"""
Computes normalized variational lower bound.
Args:
x: input images, shape (M, 28, 28, 1)
Returns:
Variational lower bound averaged over M
samples in batch and normalized by dataset
size N.
"""
q_mean, q_log_var = model.encode(x)
t = model.sample(q_mean, q_log_var)
x_rc = model.decode(t)
# Expected negative reconstruction error
rc_error = -tf.reduce_sum(binary_crossentropy(x, x_rc), axis=[1, 2])
# Regularization term (KL divergence)
kl_div = 0.5 * tf.reduce_sum(1 + q_log_var \
- tf.square(q_mean) \
- tf.exp(q_log_var), axis=-1)
# Average over mini-batch (of size M)
return tf.reduce_mean(rc_error + kl_div)
```
The training procedure uses the negative value of the variational lower bound as loss to compute stochastic gradient estimates. These are used by the `optimizer` to update model parameters $\boldsymbol\theta$ and $\boldsymbol\phi$. The normalized variational lower bound of the test set is computed at the end of each epoch and printed.
```
@tf.function
def train_step(model, optimizer, x):
"""Trains VAE on mini-batch x using optimizer.
"""
with tf.GradientTape() as tape:
# Compute neg. variational lower bound as loss
loss = -variational_lower_bound(model, x)
# Compute gradients from neg. variational lower bound
gradients = tape.gradient(loss, model.trainable_variables)
# Apply gradients to model parameters theta and phi
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train(model, optimizer, ds_train, ds_test, epochs):
"""Trains VAE on training dataset ds_train using
optimizer for given number of epochs.
"""
for epoch in range(1, epochs + 1):
for x in ds_train:
train_step(model, optimizer, x)
vlb_mean = tf.keras.metrics.Mean()
for x in ds_test:
vlb_mean(variational_lower_bound(model, x))
vlb = vlb_mean.result()
print(f'Epoch: {epoch:02d}, Test set VLB: {vlb:.2f}')
```
Since the data are modelled with a multivariate Bernoulli distribution, the MNIST images are first binarized to monochrome images so that their pixel values are either 0 or 1. The training batch size is set to 100 to get reliable stochastic gradient estimates.
```
from tensorflow.keras.datasets import mnist
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = (x_train > 127.5).astype('float32') # binarize
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = (x_test > 127.5).astype('float32') # binarize
x_test = x_test.reshape(-1, 28, 28, 1)
batch_size = 100
ds_train = tf.data.Dataset.from_tensor_slices(x_train).shuffle(x_train.shape[0]).batch(batch_size)
ds_test = tf.data.Dataset.from_tensor_slices(x_test).shuffle(x_test.shape[0]).batch(batch_size)
```
We choose a two-dimensional latent space so that it can be easily visualized. Training the variational autoencoder with `RMSProp` as optimizer at a learning rate of `1e-3` for 20 epochs gives already reasonable results. This takes a few minutes on a single GPU.
```
vae = VariationalAutoencoder(latent_dim=2)
opt = tf.keras.optimizers.RMSprop(lr=1e-3)
train(model=vae,
optimizer=opt,
ds_train=ds_train,
ds_test=ds_test,
epochs=20)
```
The following figure shows the locations of test set images in latent space. Here, the mean vectors of the variational distributions are plotted. The latent space is organized by structural similarity of digits i.e. structurally similar digits have a smaller distance in latent space than structurally dissimilar digits. For example, digits 4 and 9 usually differ only by a horizontal bar or curve at the top of the image and are therefore in proximity.
```
import matplotlib.pyplot as plt
%matplotlib inline
# Compute mean vectors of variational distributions (= latent code locations)
q_test_mean, _ = vae.encode(x_test)
# Use a discrete colormap
cmap = plt.get_cmap('viridis', 10)
# Plot latent code locations colored by the digit value on input images
im = plt.scatter(q_test_mean[:, 0], q_test_mean[:, 1], c=y_test, cmap=cmap,
vmin=-0.5, vmax=9.5, marker='x', s=0.2)
plt.colorbar(im, ticks=range(10));
```
When we sample locations in latent space (with density proportional to the prior density over latent variables) and decode these locations we can get a nice overview how MNIST digits are organized by structural similarity in latent space. Each digit is plotted with its expected pixel values k instead of using a sample from the corresponding multivariate Bernoulli distribution.
```
import numpy as np
from scipy.stats import norm
# Number of samples per latent space dimension
samples_per_dim = 20
# Size of plotted digits
digit_size = 28
# Sampling grid coordinates. Grid points density is
# proportial to density of latent variable prior.
grid_x = norm.ppf(np.linspace(0.05, 0.95, samples_per_dim))
grid_y = norm.ppf(np.linspace(0.05, 0.95, samples_per_dim))
figure = np.zeros((digit_size * samples_per_dim,
digit_size * samples_per_dim))
for i, x in enumerate(grid_x):
for j, y in enumerate(grid_y):
t_ij = np.array([[x, y]])
x_ij = vae.decode(t_ij)
digit = x_ij.numpy().reshape(digit_size, digit_size)
figure[j * digit_size: (j + 1) * digit_size,
i * digit_size: (i + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r');
```
## References
\[1\] Diederik P. Kingma, Max Welling [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114).
\[2\] François Chollet. [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
| github_jupyter |
<div align='center'>
<p>You can open this file in <b>Google Colab</b></p>
<a href="https://colab.research.google.com/github/vstark21/Neural_Style_Transfer/blob/master/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg"></a>
</div>
First lets import our modules.
```
from main_utils import *
import time
import IPython.display as display
```
Now lets get our data.
```
content_image, style_image = get_data("photo.jpg", "style.jpg")
content_image_copy = content_image.copy()
```
Create a class and function to get loss and gradients to optimize.
```
# Evaluator class for optimizing
class Evaluator:
def loss(self, x):
loss_value, grad_values = loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
grad_values = np.copy(self.grad_values)
return grad_values
# This returns loss and gradients
def loss_and_grads(generated):
generated = generated.reshape((1, 512, 512, 3))
outs = f_outputs([generated])
loss_value = outs[0]
grad_values = np.array(outs[1]).flatten().astype('float64')
return loss_value, grad_values
```
Now lets create variables and constant in the computational graph.
```
# Disabling eager exceution to use K.Gradients function
tf.compat.v1.disable_eager_execution()
# Creating variables and constant in the computational flow graph
content_image = K.constant(content_image)
style_image = K.constant(style_image)
generated_image = K.placeholder((1, 512, 512, 3))
loss = K.variable(0.)
# Get the layers needed to prepare the loss metric
content_layer, style_layers = get_layers(content_image, style_image, generated_image)
# Define loss and gradient
loss = total_loss(content_layer, style_layers, generated_image)
# Define a function to get loss and gradients
f_outputs = K.function([generated_image], [loss, K.gradients(loss, generated_image)])
```
And now we start optimization process.
```
# Creating evaluator object
evaluator = Evaluator()
# Assigning content image to generated image
generated_img = content_image_copy
for i in range(20):
print('In iteration no:', i)
start_time = time.time()
generated_img, min_val, info = fmin_l_bfgs_b(evaluator.loss, generated_img.flatten(),
fprime=evaluator.grads, maxfun=300)
if i % 5 == 0:
generated_image = generated_img.copy()
generated_image = generated_image.reshape(512, 512, 3)
generated_image = np.clip(generated_image, 0, 255).astype('uint8')
cv2.imwrite("result-16 " + str(i) + ".jpg", generated_image)
end_time = time.time()
display.clear_output(wait=True)
print(f'Loss: {round(min_val / 10**6, 3)} x 10^6')
print(f'Iteration {i} took {end_time - start_time} seconds')
```
| github_jupyter |
# Using `ChemicalEnvironments`
Chemical Environments were created as a way to parse SMIRKS strings and make changes in chemical perception space.
In this workbook, we will show you have chemical environments are initiated and used to make changes to a SMIRKS pattern.
**Authors**
* Caitlin C. Bannan from Mobley Group at University of California, Irvine
**Basic Structure of `ChemicalEnvironments`**
`ChemicalEnvironments` are initiated with the following input variables:
* smirks = any SMIRKS string (if None an empty environment is created)
* label = this could be anything, a number/str/int, stored at ChemicalEnvironment.label
* replacements = This is a list of two tuples in the form (short, smirks) to substitute short hand in your SMIRKS strings. This is used to check if your input SMIRKS string or created Chemical Environment are Valid.
**SMIRKS Strings**
Here we use the word SMIRKS to mean SMARTS patterns with indexed atoms, we are not using Chemical Environments to parse SMIRKS strings that describe reactions.
That means these SMIRKS patterns should not contain multiple molecules (`'.'`) or reaction arrows (`'>>'`).
Here we will try to explain the SMIRKS patterns used here, but SMARTS and SMIRKS are a complex language.
SMARTS/SMIRKS strings are similar to SMILES strings with increased complexity.
For more details about this language see the Daylight tutorials:
* [SMILES](http://www.daylight.com/dayhtml_tutorials/languages/smiles/index.html)
* [SMARTS](http://www.daylight.com/dayhtml/doc/theory/theory.smarts.html)
* [SMIRKS](http://www.daylight.com/dayhtml_tutorials/languages/smirks/index.html)
```
# import necessary functions
from openff.toolkit.typing.chemistry import environment as env
from openeye import oechem
```
## Default Chemical Environments
All Chemical Environments can be initated using SMIRKS strings.
If a `ChemicalEnvironment` is initiated with no SMIRKS pattern, it is an empty structure.
However, there are 5 subtypes of `ChemicalEnvironments` that match the types of parameters found in the SMIRNOFF format.
If they are initiated with no SMIRKS pattern, their structure matches a generic for that parameter type, for example `[*:1]~[*:2]` for a bond (that is any atom connected to any other atom by any bond).
The 5 subtypes are listed below with their expected number of indexed atoms and the corresponding SMIRKS structure:
* `AtomChemicalEnvironment`
- expects 1 indexed atom
- default/generic SMIRKS `"[*:1]"`
* `BondChemicalEnvironment`
- expects 2 indexed atoms
- default/generic SMIRKS: `"[*:1]~[*:2]"`
* `AngleChemicalEnvironment`
- expects 3 indexed atoms
- default/generic SMIRKS: `"[*:1]~[*:2]~[*:3]"`
* `TorsionChemicalEnvironment`
- expects 4 indexed atoms in a proper dihedral angle
- default/generic SMIRKS: `"[*:1]~[*:2]~[*:3]~[*:4]"`
* `ImproperChemicalEnvironment`
- expects 4 indexed atoms in an improper dihedral angle
- default/generic SMIRKS: `"[*:1]~[*:2](~[*:3])~[*:4]"`
Here we show how these are initiated. Note that the generic environment is blank, it has the potential to become a SMIRKS pattern, but currently nothing is stored in it. While the subtypes have the shape described above, but wildcards (`'*'` for atoms and `'~'` for bonds).
```
# NBVAL_SKIP
Env = env.ChemicalEnvironment()
atomEnv = env.AtomChemicalEnvironment()
bondEnv = env.BondChemicalEnvironment()
angleEnv = env.AngleChemicalEnvironment()
torsionEnv = env.TorsionChemicalEnvironment()
impropEnv = env.ImproperChemicalEnvironment()
EnvList = [Env, atomEnv, bondEnv, angleEnv, torsionEnv, impropEnv]
names = ['generic', 'Atom','Bond','Angle','Torsion','(Improper']
for idx, Env in enumerate(EnvList):
print("%10s: %s" % (names[idx], Env.asSMIRKS()))
```
## Initiating `ChemicalEnvironments` from SMIRKS Strings
`ChemicalEnvironments` can be initialized by SMIRKS strings. Here we attempt to show the robustness of this parsing. These patterns are intentionally complicated and therefore can be hard to read by humans. Here are some of the key features we would like to test:
* SMILES strings are SMIRKS strings (i.e. 'CCC' should be stored as 3 atoms bonded in a row).
* Replacement strings, such as `"$ewg1"` to mean `"[#7!-1,#8!-1,#16!-1,#9,#17,#35,#53]"`
* Complex recursive SMIRKS such as `"[#6$(*([#6]=[#8])-,=$ewg2))]"`
* Ring indexing, as in SMILES, SMARTS and SMIKRS use a number after an atom to describe the atoms in a ring, such as "[#6:1]1(-;!@[#1,#6])=;@[#6]-;@[#6]1" to show a cyclopropene ring where atom 1 is in the double bond and is bound to a hydrogen or carbon outside the ring.
* Hybrid SMIRKS with atomic symbols for the atoms. These do not have to use the square brackets, for example "C(O-[#7,#8])C[C+0]=[*]"
In this set-up we will show that these SMIRKS patterns are parseable with the OpenEye Toolkits, then create a `ChemicalEnvironment` from the SMIRKS string and then print the `ChemicalEnvironment` as a SMIRKS string. Note that due to the nature of SMIRKS patterns the `ChemicalEnvironment` smirks may not identically match the input SMIRKS. A key difference is that every atom in a `ChemicalEnvironment` SMIRKS will be inside square brackets. Also, "blank" bonds, for example in "CCC" will be converted to their literal meaning, single or aromatic.
```
# NBVAL_SKIP
# define the two replacements strings
replacements = [ ('ewg1', '[#7!-1,#8!-1,#16!-1,#9,#17,#35,#53]'),
('ewg2', '[#7!-1,#8,#16]')]
# define complicated SMIRKS patterns
SMIRKS = ['[#6$(*~[#6]=[#8])$(*-,=$ewg2)]', # complex recursive SMIRKS
'CCC', # SMILES
"[#1:1]-CCC", # simple hybrid
'[#6:1]1(-;!@[#1,#6])=;@[#6]-;@[#6]1', # Complicated ring
'C(O-[#7,#8])CC=[*]', # Hybrid SMIRKS
"[#6$([#6X4](~[$ewg1])(~[#8]~[#1])):1]-[#6X2H2;+0:2]-,=,:;!@;!#[$ewg2:3]-[#4:4]", # its just long
"[#6$([#6X4](~[$ewg1])(~[#8]~[#1])):1]1=CCCC1", # another ring
]
for smirk in SMIRKS:
qmol = oechem.OEQMol()
tmp_smirks = oechem.OESmartsLexReplace(smirk, replacements)
parseable = env.OEParseSmarts(qmol, tmp_smirks)
print("Input SMIRKS: %s" % smirk)
print("\t parseable by OpenEye Tools: %s" % parseable)
Env = env.ChemicalEnvironment(smirks = smirk, replacements = replacements)
print("\t Chemical Environment SMIRKS: %s\n" % Env.asSMIRKS())
```
# Structure of `ChemicalEnvironments`
Up until now, we have discussed only how to initiate `ChemicalEnvironment`s. Now we will explain how they are structured and how to use them to make changes to your SMIRKS pattern (and therefor the fragment you are describing).
To begin with, the overall structure of `ChemicalEnvironment`s is similar to how a chemist might think about a fragment.
We use NetworkX graphs to store information about the pieces.
Nodes store information about Atoms and edges (connecting nodes) store information about Bonds.
Both of these sub-structures, Atoms and Bonds store information about the input SMIRKS pattern in a broken down way so it can be easily editted. The words Atoms and Bonds are capitalized as they are classes in and of themselves.
Both Atoms and Bonds have two types of information
* ORtypes
- things that are OR'd together in the SMIRKS string using a comma (',')
- These have two subtypes:
- ORbases - typically an atomic number
- ORdecorators - typically information that might be true for 1 possible atomic number, but not others
* ANDtypes
- things that are AND'd together in the SMIRKS string using a semi-colon (';')
This starts to sound complicated, so to try to illustrate how this works, we will use an actual Angle found in the [SMIRNOFF99Frosst](https://github.com/openforcefield/smirnoff99Frosst) force field.
Here is the SMIRKS String:
# `"[#6X3,#7:1]~;@[#8;r:2]~;@[#6X3,#7:3]"`
* atom 1 and atom 3
- ORtypes
- '#6X3' - a trivalent carbon
- ORbase = '#6'
- ORdecorators = ['X3']
- '#7' is a nitrogen
- ORbase = '#7'
- ORdecorators []
- ANDtypes
- [] (None)
* atom 2
- ORtypes
- '#8'
- ORbase = '#8'
- ORdecorators = []
- ANDtypes
- ['r'] it is in a ring
* bond 1 and 2 are identical
- ORtypes = None (generic bond ~)
- ANDtypes = ['@']
- it is in a ring
### Selecting Atoms and Bonds
Here we will use the selectAtom and selectBond functions to get a specific atom or bond and then print its information. The 'select' methods ( selectAtom() or selectBond() ) takes an argument descriptor which can be used to select a certain atom or type of atom.
Descriptor input option:
* None - returns a random atom
* int - returns that atom or bond by index
* 'Indexed' - returns a random indexed atom
* 'Unindexed' - returns a random non-indexed atom
* 'Alpha' - returns a random atom alpha to an indexed atom
* 'Beta' - returns a random atom beta to an indexed atom
```
smirks = "[#6X3,#7:1]~;@[#8;r:2]~;@[#6X3,#7:3]"
angle = env.ChemicalEnvironment(smirks = smirks)
# get atom1 and print information
atom1 = angle.selectAtom(1)
print("Atom 1: '%s'" % atom1.asSMIRKS())
print("ORTypes")
for (base, decs) in atom1.getORtypes():
print("\tBase: %s" % base)
str_decs = ["'%s'" % d for d in decs]
str_decs = ','.join(str_decs)
print("\tDecorators: [%s]" % str_decs)
print("ANDTypes:", atom1.getANDtypes())
print()
# get bond1 and print information
bond1 = angle.selectBond(1)
print("Bond 1: '%s'" % bond1.asSMIRKS())
print("ORTypes: ", bond1.getORtypes())
print("ANDTypes: ", bond1.getANDtypes())
```
## Changing ORtypes and ANDtypes
For both ORtypes and ANDtypes for Atoms and Bonds there are "get" and "set" methods.
The set methods completely rewrite that type.
There are also methods for add ORtypes and ANDtypes where you add a single entry to the existing list.
Here we will use the set ORtypes to change atom1 to be a trivalent carbon or a divalent nitrogen.
Then we will also add an ORType and ANDType to atom2 so that it could refer to an oxygen ('#8') or trivalent and neutral nitrogen ('#7X3+0') and in one ring ('R1').
### Final SMIRKS string: `"[#6X3,#7X2:1]~;@[#8,#7X3+0;r;R1:2]~;@[#6X3,#7:3]"`
```
# Change atom1's ORtypes with the setORtype method
new_ORtypes = [ ('#6', ['X3']), ('#7', ['X2']) ]
atom1.setORtypes(new_ORtypes)
print("New Atom 1: %s " % atom1.asSMIRKS())
# Change atom2's AND and OR types with the add*type methods
atom2 = angle.selectAtom(2)
atom2.addANDtype('R1')
atom2.addORtype('#7', ['X3', '+0'])
print("New Atom 2: %s" % atom2.asSMIRKS())
print("\nNew SMIRKS: %s" % angle.asSMIRKS())
```
## Adding new Atoms
The addAtom method is used to introduce atoms bound to existing atoms.
You can add an empty atom or specify information about the new bond and new atom.
Here are the parameters for the addAtom method:
```
Parameters
-----------
bondToAtom: atom object, required
atom the new atom will be bound to
bondORtypes: list of tuples, optional
strings that will be used for the ORtypes for the new bond
bondANDtypes: list of strings, optional
strings that will be used for the ANDtypes for the new bond
newORtypes: list of strings, optional
strings that will be used for the ORtypes for the new atom
newANDtypes: list of strings, optional
strings that will be used for the ANDtypes for the new atom
newAtomIndex: int, optional
integer label that could be used to index the atom in a SMIRKS string
beyondBeta: boolean, optional
if True, allows bonding beyond beta position
```
The `addAtom` method returns the created atom.
Here we will add an alpha atom (oxygen) to atom 3 that is not in a ring and then a beta atom (hydrogen) bound to the alpha atom.
### New SMIRKS pattern: `"[#6X3,#7X2:1]~;@[#8,#7+0X3;R1:2]~;@[#6X3,#7:3]~;!@[#8X2H1;R0]~[#1]"`
```
atom3 = angle.selectAtom(3)
alpha_ORtypes = [('#8', ['X2', 'H1'])]
alpha_ANDtypes = ['R0']
alpha_bondANDtypes = ['!@']
beta_ORtypes = [('#1', [])]
alpha = angle.addAtom(atom3, bondANDtypes = alpha_bondANDtypes,
newORtypes = alpha_ORtypes, newANDtypes = alpha_ANDtypes)
beta = angle.addAtom(alpha, newORtypes = beta_ORtypes)
print("Alpha Atom SMIRKS: %s" % alpha.asSMIRKS())
print("Beta Atom SMIRKS: %s" % beta.asSMIRKS())
print()
print("New overall SMIRKS: %s" % angle.asSMIRKS())
```
## Removing Atoms
The removeAtom method works how you would expect. It removes the specified atom and the bond connecting it to the fragment.
You cannot remove indexed atoms (if you want to remove their OR and AND decorators you can set them to empty lists).
The other option with the `removeAtom` method is to say only remove it if the atom is undecorated. This is done by setting the input variable `isEmpty` to True (default is False). When `isEmpty` is True, the atom is only removed if it has 1 ORtype and no ANDtypes.
The `removeAtom` method returns True if the atom was removed and False if it was not.
As an example, we will remove the hydrogen in the beta position to atom3 that was added above.
### New SMIRKS pattern: `"New overall SMIRKS: [#6X3,#7X2:1]~;@[#8,#7+0X3;R1:2]~;@[#6X3,#7:3]~;!@[#8X2H1;R0]"`
```
removed = angle.removeAtom(beta)
print("The hydrogen beta to atom3 was remove: ", removed)
print("Updated SMIRKS string: %s" % angle.asSMIRKS())
```
## Other `ChemicalEnvironment` Methods
There are a variety of other methods that let you get information about the stored fragment. This includes:
1. Getting information about an atom or bond in an environment (i.e. `isAlpha` returns a boolean)
* Get atoms or bonds in each type of position:
- `getAtoms` or `getBonds`
- returns all atoms or bonds
- `getIndexedAtoms` or `getIndexedBonds`
- `getAlphaAtoms` or `getAlphaBonds`
- `getBetaAtoms` or `getBetaBonds`
- `getUnindexedAtoms` or `getUnindexedBonds`
* Report the minimum order of a bond with `Bond.getOrder`
- Note this is the minimum so a bond that is single or double (`'-,='`) will report the order as 1
* Report the valence and bond order around an atom can be reported with `getValence` and `getBondORder`
* Get a bond between two atoms (or determine if the atoms are bonded) with `getBond(atom1, atom2)`
* Get atoms bound to a specified atom with `getNeighbors`
Here we will show how each of these method types is used:
```
# 1. Getting information about an atom or bond in an environment (i.e. isAlpha returns a boolean)
# Check if the alpha atom above is any of the following
print("Above a carbon atom ('%s') was added in the alpha position to atom 3. This atom is ..." % alpha.asSMIRKS())
print("\t Indexed: ", angle.isIndexed(alpha))
print("\t Unindexed: ", angle.isUnindexed(alpha))
print("\t Alpha: ", angle.isAlpha(alpha))
print("\t Beta: ", angle.isBeta(alpha))
# NOTE - These methods can take an atom or a bond as an argument
# 2. Get atoms or bonds in each type of position, for example getIndexedAtoms or getAlphaBonds
# We will print the SMIRKS for each indexed atom:
indexed = angle.getIndexedAtoms()
print("Here are the SMIRKS strings for the Indexed atoms in the example angle:")
for a in indexed:
print("\tAtom %i: '%s'" % (a.index, a.asSMIRKS()))
print()
bonds = angle.getBonds()
print("Here are the SMIRKS strings for ALL bonds in the example angle:")
for b in bonds:
print("\t'%s'" % b.asSMIRKS())
# 3. Report the minimum order of a bond with Bond.getOrder
bond1 = angle.selectBond(1)
print("Bond 1 (between atoms 1 and 2) has a minimum order of %i" % bond1.getOrder())
# 4. Report the valence and bond order around an atom can be reported with getValence and getBondORder
atom3 = angle.selectAtom(3)
print("Atom 3 has a valency of %i" % angle.getValence(atom3))
print("Atom 3 has a minimum bond order of %i" % angle.getBondOrder(atom3))
# 5. Get a bond between two atoms (or determine if the atoms are bonded) with getBond(atom1, atom2)
# Check for bonds between each pair of indexed atoms
atom_pairs = [ (1,2), (2,3), (1,3) ]
for (A,B) in atom_pairs:
atomA = angle.selectAtom(A)
atomB = angle.selectAtom(B)
# check if there is a bond between the two atoms
bond = angle.getBond(atomA, atomB)
if bond is None:
print("There is no bond between Atom %i and Atom %i" % (A, B))
else:
print("The bond between Atom %i and Atom %i has the pattern '%s'" % (A, B, bond.asSMIRKS()))
# 6. Get atoms bound to a specified atom with getNeighbors
# get the neighbors for each indexed atom
for A in [1,2,3]:
atomA = angle.selectAtom(A)
print("Atom %i has the following neighbors" % A)
for a in angle.getNeighbors(atomA):
print("\t '%s' " % a.asSMIRKS())
print()
```
| github_jupyter |
# Playing with the HEP system of units
**Or, an intro to the `hepunits` package while performing some time-of-flight studies**.
```{warning}
The URL for this page may change, the general design for this tutorial series is in flux.
```
Scikit-HEP packages used:
- [`hepunits`](https://github.com/scikit-hep/hepunits)
- [`Particle`](https://github.com/scikit-hep/particle)
**Authors**
* [Eduardo Rodrigues](https://github.com/eduardo-rodrigues/)
```
import numpy as np
import matplotlib.pyplot as plt
```
## hepunits - The HEP system of units
The package ``hepunits`` collects the most commonly used units and constants in the
HEP System of Units, which differs from the international system of units (aka SI units) by scaling factors
for what are the basic units.
The HEP system of units is based on the following:
| Quantity | Name | Unit|
| -------------------:| ------------------:| ---:|
| Length | millimeter | mm |
| Time | nanosecond | ns |
| Energy | Mega electron Volt| MeV |
| Positron charge | eplus | |
| Temperature | kelvin | K |
| Amount of substance| mole | mol |
| Luminous intensity | candela | cd |
| Plane angle | radian | rad |
| Solid angle | steradian | sr |
Note: no need to make use of sophisticated packages (e.g. as in AstroPy) since we basically never need to change systems of units (we never use ergs as energy, for example ;-)).
### **Basic usage**
**Basic usage is straightforward**, though it may be confusing at first.
Remember, all variables are written wrt to the units:
```
from hepunits import GeV, MeV, cd, eplus, kelvin, mm, mol, ns, rad, sr
mm == ns == MeV == eplus == kelvin == mol == cd == rad == sr == 1
GeV == 1000 * MeV
```
```{note}
1. No error checking is implemented, since units are not objects, rather simple numbers.
Expressions such as `1*ns + 1*mm` produce no error.
2. Units help improving the readability of code and making formula explicit (although correctness must be still manually checked).
```
Add two quantities with different length units:
```
from hepunits import units as u
1 * u.meter + 5 * u.cm
```
The result is in HEP units, so mm. Indeed, for example `u.meter == 1000 == 1000 * u.mm`.
Rather obtain the result in meters:
```
(1 * u.meter + 5 * u.cm) / u.meter
```
Do you need to play a bit more to get a proper feeling? This next (non-academic) exercise should help you ...
### **Quick time-of-flight study**
Let's try to play with units in a meaningful way, in a kind of exercise that physicists encounter. Imagine you are investigating time-of-flight (ToF) detectors for particle identification. The time it takes a particle of velocity $\beta = v/c= pc/E$ to travel a distance $L$ is given by
$$\mathrm{ToF} = \frac{L}{c \beta}$$
It results that the mass $m$ of the particle can be determined from
$$m = \frac{p}{c}\sqrt{\frac{c^2 \mathrm{ToF}^2}{L^2}-1}$$
provided the path length $L$ and the momentum $p$ can be measured, say, by a tracking system.
What are typical ToF differences say for (charged) kaons and pions?
It is practical to perform the calculation as
$$\Delta \mathrm{ToF} = \frac{L}{c}(\frac{1}{\beta_1} - \frac{1}{\beta_2})\,,$$
with $\frac{1}{\beta} = \sqrt{1+m^2c^2/p^2}$.
```
from hepunits import GeV, c_light, meter, ns, ps
def ToF(m: float, p: float, L:float) -> float:
"""Time-of-Flight = particle path length L / (c * β)"""
one_over_β = np.sqrt(
1 + m * m / (p * p)
) # no c factors here because m and p given without them, hence c's cancel out ;-)
return L * one_over_β / c_light
```
For convenience, get hold of data for the proton, $K^+$ and $\pi^+$ taken from the PDG using the [`Particle` package](https://github.com/scikit-hep/particle):
```
from particle.literals import K_plus, pi_plus, proton # particle name literals
```
Calculate the difference in ToF between 10 GeV kaons and pions travelling over 10 meters:
```
delta = (
ToF(K_plus.mass, 10 * GeV, 10 * meter) - ToF(pi_plus.mass, 10 * GeV, 10 * meter)
) / ps
print(f"At 10 GeV, Delta-TOF(K-pi) over 10 meters = {delta:.4} ps")
```
Let's get a bit fancier:
- Compare protons, kaons and pions.
- Look at the ToF difference versus momentum.
```
p = np.arange(0.5, 5.1, 0.05) * GeV
delta1 = (ToF(K_plus.mass, p, 1.0 * meter) - ToF(pi_plus.mass, p, 1.0 * meter)) / ps
delta2 = (ToF(proton.mass, p, 1.0 * meter) - ToF(K_plus.mass, p, 1.0 * meter)) / ps
delta3 = (ToF(proton.mass, p, 1.0 * meter) - ToF(pi_plus.mass, p, 1.0 * meter)) / ps
fig, ax = plt.subplots()
ax.plot(p / GeV, delta1, label="K-$\pi$")
ax.plot(p / GeV, delta2, label="p-K")
ax.plot(p / GeV, delta3, label="p-$\pi$")
ax.set(
xlabel="p [GeV]",
ylabel="$\Delta$ ToF [ps]",
title="Time-of-flight difference for a 1 meter path",
)
ax.grid()
plt.legend()
plt.ylim(bottom=0, top=500)
plt.show()
```
Taking now an example that could be relevant to LHCb conditions - detector timing resolution requirement is getting tough!:
```
p = np.arange(5.0, 10.1, 0.1) * GeV
delta = (ToF(K_plus.mass, p, 10 * meter) - ToF(pi_plus.mass, p, 10 * meter)) / ps
fig, ax = plt.subplots()
ax.plot(p / GeV, delta)
ax.set(
xlabel="p [GeV]",
ylabel="$\Delta$ ToF [ps]",
title="Time-of-flight difference for a 10 meter path",
)
ax.grid()
plt.show()
```
For short flight distances protons, kaons and pions become indistinguishable, as expected:
```
p = np.arange(0.5, 5.1, 0.05) * GeV
s1 = ToF(K_plus.mass, p, 1.38 * meter) / ToF(pi_plus.mass, p, 1.38 * meter)
s3 = ToF(proton.mass, p, 1.38 * meter) / ToF(pi_plus.mass, p, 1.38 * meter)
fig, ax = plt.subplots()
ax.plot(p / GeV, s1, label="K-$\pi$")
ax.plot(p / GeV, s3, label="p-$\pi$")
ax.set(
xlabel="p [GeV]",
ylabel="ToF/ToF($\pi$)",
title="Relative Time-of-flight for a 1 meter flight",
)
ax.grid()
plt.ylim(bottom=1, top=2.2)
plt.legend()
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.markers import MarkerStyle
from warnings import filterwarnings
AllCombos=pd.read_csv("40Missing.csv")
AllCombos['index']=AllCombos.index
mini=pd.read_csv("scoring2.csv")
mini=pd.DataFrame(mini)
mini['index']=mini.index
q=mini.merge(AllCombos,on='index')
q['score']=q["score_x"]
q=q.drop(["score_y","score_x","index"],axis=1)
# q["score"]=q.score.apply(lambda x: x*100)
q=q.dropna()
q=q.sort_values(by="score",ascending=False)
q.head(10)
top=q[q.score>0.70]
freq={}
def addToFreq(entry,Set=q):
global freq
items=[x.strip() for x in entry.split(",") if ("err" not in x and "lim" not in x and x!='pl_st_nref' and x!='pl_locale')]
for item in items:
if item not in freq:
freq[item]=Set.score[Set.features.str.contains(item)].sum()/len(Set.score[Set.features.str.contains(item)])
top.features.apply(lambda x: addToFreq(x))
freq
numTop=20
numTrend=10
Frequency=sorted(freq,key=freq.get)
# print(Frequency)
Frequency.reverse()
bestOverall=Frequency[:numTop]
trendFreq={}
trends={}
# print(Frequency)
# sortedFreq={}
def ScoreAndFreq(entry,Set,currItem):
global trendFreq
items=[x.strip() for x in entry.split(",") if ("err" not in x and "lim" not in x and x!='pl_st_nref' and x!='pl_locale' and x!=currItem)]
for item in items:
if item not in trendFreq:
trendFreq[item]=Set.score[Set.features.str.contains(item)].sum()/len(Set.score[Set.features.str.contains(item)])
for n in bestOverall:
selection=q[q.features.str.contains(n)]
trendFreq={}
selection.features.apply(lambda x:ScoreAndFreq(x,selection,n))
trendBest=sorted(trendFreq,key=trendFreq.get)
trendBest.reverse()
trendBest=trendBest[:numTrend]
out={}
for feat in trendBest:
out[feat]=trendFreq[feat]
trends[n]=out
plt.figure(figsize=(10,8))
print(Frequency[:10])
y=[freq[x] for x in Frequency[0:15]]
fig=plt.plot(Frequency[0:15],y)
plt.xticks(rotation=90)
# plt.savefig("mostImportant.jpg")
for n in Frequency[:15]:
print('# Important feature being considered:',"__"+n+"__",'\n')
print("Trend Feature: Trend Score\n")
num=1
for m in trends[n]:
print(str(num)+".",end=" ")
print(m,":",trends[n][m])
num+=1
num=1
Fig=1
fig=plt.figure(figsize=(15,15))
# fig.set_width=10
# fig.set_height=40
# fig.add_subplot(2,5)
labels=Frequency[0:7]
for n in labels:
c=pd.DataFrame({"feats":list(trends[n].keys())})
c['score']=c.feats.apply(lambda x:trends[n][x])
plt.subplot().scatter(c.feats,c.score,s=200)
# plt.title(n,fontsize=20)
plt.xlabel("Trend Feature",fontsize=27)
plt.ylabel("Trend Score",fontsize=27)
plt.xticks(rotation=90,fontsize=17)
plt.yticks(fontsize=17)
plt.rcParams['legend.title_fontsize'] = 17
plt.legend([x for x in labels], title="Important\n Feature", loc=1, fontsize=17, fancybox=True)
plt.savefig("TOP7Features.jpg")
filterwarnings('ignore')
AllCombos=pd.read_csv("40Missing.csv")
AllCombos['index']=AllCombos.index
mini=pd.read_csv("scoring2.csv")
mini=pd.DataFrame(mini)
mini['index']=mini.index
q=mini.merge(AllCombos,on='index')
q['score']=q["score_x"]
q=q.drop(["score_y","score_x","index"],axis=1)
# q["score"]=q.score.apply(lambda x: x*100)
q=q.dropna()
q=q.sort_values(by="score",ascending=False)
q.head(10)
top=q[q.score>0.70]
freq={}
def addToFreq(entry,Set=q):
global freq
items=[x.strip() for x in entry.split(",") if ("err" not in x and "lim" not in x and x!='pl_st_nref' and x!='pl_locale')]
for item in items:
if item not in freq:
freq[item]=Set.score[Set.features.str.contains(item)].sum()#/len(Set.score[Set.features.str.contains(item)])
top.features.apply(lambda x: addToFreq(x))
freq
numTop=20
numTrend=10
Frequency=sorted(freq,key=freq.get)
# print(Frequency)
Frequency.reverse()
bestOverall=Frequency[:numTop]
trendFreq={}
trends={}
# print(Frequency)
# sortedFreq={}
def ScoreAndFreq(entry,Set,currItem):
global trendFreq
items=[x.strip() for x in entry.split(",") if ("err" not in x and "lim" not in x and x!='pl_st_nref' and x!='pl_locale' and x!=currItem)]
for item in items:
if item not in trendFreq:
trendFreq[item]=Set.score[Set.features.str.contains(item)].sum()#/len(Set.score[Set.features.str.contains(item)])
for n in bestOverall:
selection=q[q.features.str.contains(n)]
trendFreq={}
selection.features.apply(lambda x:ScoreAndFreq(x,selection,n))
trendBest=sorted(trendFreq,key=trendFreq.get)
trendBest.reverse()
trendBest=trendBest[:numTrend]
out={}
for feat in trendBest:
out[feat]=trendFreq[feat]
trends[n]=out
plt.figure(figsize=(10,8))
print(Frequency[:10])
y=[freq[x] for x in Frequency[0:15]]
fig=plt.plot(Frequency[0:15],y)
plt.xticks(rotation=90)
plt.title("Frequency")
# plt.savefig("mostImportant.jpg")
for n in Frequency[:15]:
print('# Important feature being considered:',"__"+n+"__",'\n')
print("Trend Feature: Trend Score\n")
num=1
for m in trends[n]:
print(str(num)+".",end=" ")
print(m,":",trends[n][m])
num+=1
num=1
Fig=1
fig=plt.figure(figsize=(10,10))
# fig.set_width=10
# fig.set_height=40
# fig.add_subplot(2,5)
labels=Frequency[0:7]
for n in labels:
c=pd.DataFrame({"feats":list(trends[n].keys())})
c['score']=c.feats.apply(lambda x:trends[n][x])
plt.subplot().scatter(c.feats,c.score,marker=MarkerStyle(marker="x", fillstyle=None),s=200)
# plt.title(n,fontsize=20)
plt.xlabel("feature",fontsize=20)
plt.ylabel("score",fontsize=20)
plt.xticks(rotation=90,fontsize="x-large")
plt.yticks(fontsize="x-large")
plt.subplot().legend(labels)
plt.savefig("TOP7Features.jpg")
filterwarnings('ignore')
```
| github_jupyter |
cf. [`ComputationalPhysics/doc/Programs/LecturePrograms/programs/StatPhys/python/ising2dim.py`](https://github.com/CompPhysics/ComputationalPhysics/blob/master/doc/Programs/LecturePrograms/programs/StatPhys/python/ising2dim.py)
```
import numpy
import numpy as np
import sys
import math
import matplotlib.pyplot as plt
```
## Periodic boundary conditions
```
def periodic(i,limit,add):
"""
Choose correct matrix index with periodic boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i + limit + add) % limit
```
Set up spin matrix, initialize to ground state
```
size = 256 # L_x
temp = 10. # temperature T
spin_matrix = np.zeros( (size,size), np.int8) + 1
spin_matrix
```
Create and initialize variables
```
E = M = 0
E_av = E2_av = M_av = M2_av = Mabs_av = 0
```
Setup array for possible energy changes
```
w = np.zeros(17, np.float64)
for de in xrange(-8,9,4):
print de
w[de+8] = math.exp(-de/temp)
print w
```
Calculate initial magnetization
```
M = spin_matrix.sum()
print M
```
Calculate initial energy
```
# for i in xrange(16): print i r
# range creates a list, so if you do range(1, 10000000) it creates a list in memory with 9999999 elements.
# xrange is a sequence object that evaluates lazily.
for j in xrange(size):
for i in xrange(size):
E -= spin_matrix.item(i,j) * (spin_matrix.item(periodic(i,size,-1),j) + spin_matrix.item(i,periodic(j,size,1)))
```
Metropolis MonteCarlo computation, 1 single step or iteration, done explicitly:
```
x = int(np.random.random()*size)
print(x)
y = int(np.random.random()*size)
print(y)
deltaE = 2*spin_matrix.item(i,j) * \
(spin_matrix.item(periodic(x,size,-1),y) + spin_matrix.item(periodic(x,size,1),y) + \
spin_matrix.item(x,periodic(y,size,-1))+spin_matrix.item(x,periodic(y,size,1)))
print(deltaE)
print( w[deltaE + 8] )
np.random.random()
print( np.random.random() <= w[deltaE+8])
```
Accept (if True)!
```
print( spin_matrix[x,y] )
print( spin_matrix.item(x,y) )
spin_matrix[x,y] *= -1
M += 2*spin_matrix[x,y]
E += deltaE
print(spin_matrix.item(x,y))
print(M)
print(E)
import pygame
```
## Initialize (all spins up), explicitly shown
```
Lx=256; Ly=256
spin_matrix = np.zeros((Lx,Ly),np.int8)
print(spin_matrix.shape)
spin_matrix.fill(1)
spin_matrix
def initialize_allup( spin_matrix, J=1.0 ):
Lx,Ly = spin_matrix.shape
spin_matrix.fill(1)
M = spin_matrix.sum()
# Calculate initial energy
E=0
for j in xrange(Ly):
for i in xrange(Lx):
E += (-J)*spin_matrix.item(i,j) * \
(spin_matrix.item(periodic(i,Lx,+1),j) + spin_matrix.item(i,periodic(j,Ly,1)) )
print "M: ",M," E: ", E
return E,M
E,M = initialize_allup( spin_matrix)
def initialize_allup1( spin_matrix, J=1.0 ):
Lx,Ly = spin_matrix.shape
spin_matrix.fill(1)
M = spin_matrix.sum()
# Calculate initial energy
E=0
for j in xrange(Ly):
for i in xrange(Lx):
E -= J*spin_matrix.item(i,j) * \
(spin_matrix.item(periodic(i,Lx,-1),j) + spin_matrix.item(i,periodic(j,Ly,1)) )
print "M: ",M," E: ", E
return E,M
E,M = initialize_allup( spin_matrix)
Lx=512; Ly=512
spin_matrix = np.zeros((Lx,Ly),np.int8)
E,M = initialize_allup1( spin_matrix)
E,M = initialize_allup( spin_matrix)
Lx=1024; Ly=1024
print(Lx*Ly)
spin_matrix = np.zeros((Lx,Ly),np.int8)
E,M = initialize_allup1( spin_matrix)
E,M = initialize_allup( spin_matrix)
math.pow(2,31)
```
## Setup array for possible energy changes
```
temp = 1.0
w = np.zeros(17,np.float32)
for de in xrange(-8,9,4): # include +8
w[de+8] = math.exp(-de/temp)
print(w)
```
## Importing from the script `ising2dim.py`
```
import os
print(os.getcwd())
print(os.listdir( os.getcwd() ))
sys.path.append('./')
```
# Reading out data from `./IsingGPU/FileIO/output.h`
Data is generated by the parallel Metropolis algorithm in CUDA C++ in the subdirectory `./IsingGPU/data/`, which is done by the function `process_avgs` in `./IsingGPU/FileIO/output.h`. The values are saved as a character array, which then can be read in as a NumPy array of `float32`'s. Be sure to enforce, declare the `dtype` to be `float32`.
```
avgsresults_GPU = np.fromfile("./IsingGPU/data/IsingMetroGPU.bin",dtype=np.float32)
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU = avgsresults_GPU.reshape(201,7) # 7 different averages
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
ax.scatter( T, E_avg)
plt.show()
Evar_avg = avgsresults_GPU[:,2]
plt.scatter( T, Evar_avg)
plt.show()
M_avg = avgsresults_GPU[:,3]
Mvar_avg = avgsresults_GPU[:,4]
absM_avg = avgsresults_GPU[:,5]
M4_avg = avgsresults_GPU[:,6]
#fig = plt.figure()
#ax = fig.add_subplot(4,1,1)
plt.scatter( T, M_avg)
#fig.add_subplot(4,1,2)
#plt.scatter(T,Mvar_avg)
#fig.add_subplot(4,1,3)
#plt.scatter(T,absM_avg)
#fig.add_subplot(4,1,4)
#plt.scatter(T,M4_avg)
plt.show()
plt.scatter(T,Mvar_avg)
plt.show()
plt.scatter(T,absM_avg)
plt.show()
plt.scatter(T,M4_avg)
plt.show()
```
For
2^10 x 2^10 or 1024 x 1024 grid; 50000 trials, temperature T = 1.0, 1.005,...3. (temperature step of 0.005), so 400 different temperatures, 32 x 32 thread block,
```
avgsresults_GPU = np.fromfile("./IsingGPU/data/IsingMetroGPU.bin",dtype=np.float32)
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU = avgsresults_GPU.reshape( avgsresults_GPU.size/7 ,7) # 7 different averages
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
Evar_avg = avgsresults_GPU[:,2]
M_avg = avgsresults_GPU[:,3]
Mvar_avg = avgsresults_GPU[:,4]
absM_avg = avgsresults_GPU[:,5]
M4_avg = avgsresults_GPU[:,6]
ax.scatter( T, E_avg)
plt.show()
plt.scatter( T, Evar_avg)
plt.show()
plt.scatter( T, M_avg)
plt.show()
plt.scatter(T,Mvar_avg)
plt.show()
plt.scatter(T,absM_avg)
plt.show()
plt.scatter(T,M4_avg)
plt.show()
```
### From drafts
```
avgsresults_GPU = np.fromfile("./IsingGPU/drafts/data/IsingMetroGPU.bin",dtype=np.float32)
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU = avgsresults_GPU.reshape( avgsresults_GPU.size/7 ,7) # 7 different averages
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
Evar_avg = avgsresults_GPU[:,2]
M_avg = avgsresults_GPU[:,3]
Mvar_avg = avgsresults_GPU[:,4]
absM_avg = avgsresults_GPU[:,5]
M4_avg = avgsresults_GPU[:,6]
ax.scatter( T, E_avg)
plt.show()
avgsresults_GPU = np.fromfile("./IsingGPU/drafts/IsingGPU/data/IsingMetroGPU_runs10.bin",dtype=np.float32)
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU = avgsresults_GPU.reshape( avgsresults_GPU.size/7 ,7) # 7 different averages
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
Evar_avg = avgsresults_GPU[:,2]
M_avg = avgsresults_GPU[:,3]
Mvar_avg = avgsresults_GPU[:,4]
absM_avg = avgsresults_GPU[:,5]
M4_avg = avgsresults_GPU[:,6]
ax.scatter( T, E_avg)
plt.show()
plt.scatter( T, Evar_avg)
plt.show()
plt.scatter( T, M_avg)
plt.show()
plt.scatter(T,Mvar_avg)
plt.show()
plt.scatter(T,absM_avg)
plt.show()
plt.scatter(T,M4_avg)
plt.show()
avgsresults_GPU = np.fromfile("./IsingGPU/drafts/IsingGPU/data/IsingMetroGPU.bin",dtype=np.float32)
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
avgsresults_GPU = avgsresults_GPU.reshape( avgsresults_GPU.size/7 ,7) # 7 different averages
print(avgsresults_GPU.shape)
print(avgsresults_GPU.size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
Evar_avg = avgsresults_GPU[:,2]
M_avg = avgsresults_GPU[:,3]
Mvar_avg = avgsresults_GPU[:,4]
absM_avg = avgsresults_GPU[:,5]
M4_avg = avgsresults_GPU[:,6]
ax.scatter( T, E_avg)
plt.show()
plt.scatter( T, Evar_avg)
plt.show()
plt.scatter( T, M_avg)
plt.show()
plt.scatter(T,Mvar_avg)
plt.show()
plt.scatter(T,absM_avg)
plt.show()
plt.scatter(T,M4_avg)
plt.show()
```
### From [CLaigit](https://github.com/CLaigit/isingmodel/blob/master/ising2d.cu)
```
avgsresults_GPU = []
for temp in range(10,31,2):
avgsresults_GPU.append( np.fromfile("./data/ising2d_CLaigit" + str(temp) + ".bin",dtype=np.float64) )
avgsresults_GPU = np.array( avgsresults_GPU)
print( avgsresults_GPU.shape, avgsresults_GPU.size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
T = avgsresults_GPU[:,0]
E_avg = avgsresults_GPU[:,1]
M_avg = avgsresults_GPU[:,2]
heat_cap_avg = avgsresults_GPU[:,3]
mag_sus_avg = avgsresults_GPU[:,4]
ax.scatter( T, E_avg)
plt.show()
plt.scatter( T, M_avg)
plt.show()
plt.scatter( T, heat_cap_avg)
plt.show()
plt.scatter( T, mag_sus_avg)
plt.show()
```
| github_jupyter |
# Generative models - variational auto-encoders
### Author: Philippe Esling (esling@ircam.fr)
In this course we will cover
1. A [quick recap](#recap) on simple probability concepts
2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs)
3. An explanation of the [implementation](#implem) of VAEs
4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)**
<a id="recap"> </a>
## Quick recap on probability
The field of probability aims to model random or uncertain events. Hence, a random variable $X$ denotes a quantity that is uncertain, such as the result of an experiment (flipping a coin) or the measurement of an uncertain property (measuring the temperature). If we observe several occurrences of the variable $\{\mathbf{x}_{i}\}_{i=1}$, it might take different values on each occasion, but some values may occur more often than others. This information is captured by the _probability distribution_ $p(\mathbf{x})$ of the random variable.
To understand these concepts graphically, we will rely on the `Pytorch Probability` package.
```
import torch
import torch.nn as nn
import torch.distributions as distrib
import torchvision
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from helper_plot import hdr_plot_style
hdr_plot_style()
```
### Probability distributions
#### Discrete distributions
Let $\mathbf{x}$ be a discrete random variable with range $R_{X}=\{x_1,\cdots,x_n\}$ (finite or countably infinite). The function
\begin{equation}
p_{X}(x_{i})=p(X=x_{i}), \forall i\in\{1,\cdots,n\}
\end{equation}
is called the probability mass function (PMF) of $X$.
Hence, the PMF defines the probabilities of all possible values for a random variable. The above notation allows to express that the PMF is defined for the random variable $X$, so that $p_{X}(1)$ gives the probability that $X=1$. For discrete random variables, the PMF is also called the \textit{probability distribution}. The PMF is a probability measure, therefore it satisfies all the corresponding properties
- $0 \leq p_{X}(x_i) < 1, \forall x_i$
- $\sum_{x_i\in R_{X}} p_{X}(x_i) = 1$
- $\forall A \subset R_{X}, p(X \in A)=\sum_{x_a \in A}p_{X}(x_a)$
A very simple example of discrete distribution is the `Bernoulli` distribution. With this distribution, we can model a coin flip. If we throw the coin a very large number of times, we hope to see on average an equal amount of _heads_ and _tails_.
```
bernoulli = distrib.Bernoulli(0.5)
samples = bernoulli.sample((10000,))
plt.figure(figsize=(10,8))
sns.distplot(samples)
plt.title("Samples from a Bernoulli (coin toss)")
plt.show()
```
However, we can also _sample_ from the distribution to have individual values of a single throw. In that case, we obtain a series of separate events that _follow_ the distribution
```
vals = ['heads', 'tails']
samples = bernoulli.sample((10,))
for s in samples:
print('Coin is tossed on ' + vals[int(s)])
```
#### Continuous distributions
The same ideas apply to _continuous_ random variables, which can model for instance the height of human beings. If we try to guess the height of someone that we do not know, there is a higher probability that this person will be around 1m70, instead of 20cm or 3m. For the rest of this course, we will use the shorthand notation $p(\mathbf{x})$ for the distribution $p(\mathbf{x}=x_{i})$, which expresses for a real-valued random variable $\mathbf{x}$, evaluated at $x_{i}$, the probability that $\mathbf{x}$ takes the value $x_i$.
One notorious example of such distributions is the Gaussian (or Normal) distribution, which is defined as
\begin{equation}
p(x)=\mathcal{N}(\mu,\sigma)=\frac{1}{\sqrt{2\pi\sigma^{2}}}e^{-\frac{(x-\mu)^{2}}{2\sigma^{2}}}
\end{equation}
Similarly as before, we can observe the behavior of this distribution with the following code
```
normal = distrib.Normal(loc=0., scale=1.)
samples = normal.sample((10000,))
plt.figure(figsize=(10,8))
sns.distplot(samples)
plt.title("Samples from a standard Normal")
plt.show()
```
### Comparing distributions (KL divergence)
$
\newcommand{\R}{\mathbb{R}}
\newcommand{\bb}[1]{\mathbf{#1}}
\newcommand{\bx}{\bb{x}}
\newcommand{\by}{\bb{y}}
\newcommand{\bz}{\bb{z}}
\newcommand{\KL}[2]{\mathcal{D}_{\text{KL}}\left[#1 \| #2\right]}$
Originally defined in the field of information theory, the _Kullback-Leibler (KL) divergence_ (usually noted $\KL{p(\bx)}{q(\bx)}$) is a dissimilarity measure between two probability distributions $p(\bx)$ and $q(\bx)$. In the view of information theory, it can be understood as the cost in number of bits necessary for coding samples from $p(\bx)$ by using a code optimized for $q(\bx)$ rather than the code optimized for $p(\bx)$. In the view of probability theory, it represents the amount of information lost when we use $q(\bx)$ to approximate the true distribution $p(\bx)$. %that explicit the cost incurred if events were generated by $p(\bx)$ but charged under $q(\bx)$
Given two probability distributions $p(\bx)$ and $q(\bx)$, the Kullback-Leibler divergence of $q(\bx)$ _from_ $p(\bx)$ is defined to be
\begin{equation}
\KL{p(\bx)}{q(\bx)}=\int_{\R} p(\bx) \log \frac{p(\bx)}{q(\bx)}d\bx
\end{equation}
Note that this dissimilarity measure is \textit{asymmetric}, therefore, we have
\begin{equation}
\KL{p(\bx)}{q(\bx)}\neq \KL{q(\bx)}{p(\bx)}
\end{equation}
This asymmetry also describes an interesting behavior of the KL divergence, depending on the order to which it is evaluated. The KL divergence can either be a _mode-seeking_ or _mode-coverage} measure.
<a id="vae"></a>
## Variational auto-encoders
As we have seen in the previous AE course, VAEs are also a form generative models. However, they are defined from a more sound probabilistic perspective. to find the underlying probability distribution of the data $p(\mathbf{x})$ based on a set of examples in $\mathbf{x}\in\mathbb{R}^{d_{x}}$. To do so, we consider *latent variables* defined in a lower-dimensional space $\mathbf{z}\in\mathbb{R}^{d_{z}}$ ($d_{z} \ll d_{x}$) with the joint probability distribution $p(\mathbf{x}, \mathbf{z}) = p(\mathbf{x} \vert \mathbf{z})p(\mathbf{z})$. Unfortunately, for complex distributions this integral is too complex and cannot be found in closed form.
### Variational inference
The idea of *variational inference* (VI) allows to solve this problem through *optimization* by assuming a simpler approximate distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})\in\mathcal{Q}$ from a family $\mathcal{Q}$ of approximate densities. Hence, the goal is to minimize the difference between this approximation and the real distribution. Therefore, this turns into the optimization problem of minimizing the Kullback-Leibler (KL) divergence between the parametric approximation and the original density
$$
q_{\phi}^{*}(\mathbf{z}\vert \mathbf{x})=\text{argmin}_{q_{\phi}(\mathbf{z} \vert \mathbf{x})\in\mathcal{Q}} \mathcal{D}_{KL} \big[ q_{\phi}\left(\mathbf{z} \vert \mathbf{x}\right) \parallel p\left(\mathbf{z} \vert \mathbf{x}\right) \big]
\tag{2}
$$
By developing this KL divergence and re-arranging terms (the detailed development can be found in [3](#reference1)), we obtain
$$
\log{p(\mathbf{x})} - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z} \vert \mathbf{x}) \big] =
\mathbb{E}_{\mathbf{z}} \big[ \log{p(\mathbf{x} \vert \mathbf{z})}\big] - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z}) \big]
\tag{3}
$$
This formulation describes the quantity we want to maximize $\log p(\mathbf{x})$ minus the error we make by using an approximate $q$ instead of $p$. Therefore, we can optimize this alternative objective, called the *evidence lower bound* (ELBO)
$$
\begin{equation}
\mathcal{L}_{\theta, \phi} = \mathbb{E} \big[ \log{ p_\theta (\mathbf{x|z}) } \big] - \beta \cdot D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel p_\theta(\mathbf{z}) \big]
\end{equation}
\tag{4}
$$
We can see that this equation involves $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ which *encodes* the data $\mathbf{x}$ into the latent representation $\mathbf{z}$ and a *decoder* $p(\mathbf{x} \vert \mathbf{z})$, which allows generating a data vector $\mathbf{x}$ given a latent configuration $\mathbf{z}$. Hence, this structure defines the *Variational Auto-Encoder* (VAE).
The VAE objective can be interpreted intuitively. The first term increases the likelihood of the data generated given a configuration of the latent, which amounts to minimize the *reconstruction error*. The second term represents the error made by using a simpler posterior distribution $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ compared to the true prior $p_{\theta}(\mathbf{z})$. Therefore, this *regularizes* the choice of approximation $q$ so that it remains close to the true posterior distribution [3].
### Reparametrization trick
Now, while this formulation has some very interesting properties, it involves sampling operations, where we need to draw the latent point $\mathbf{z}$ from the distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})$. The simplest choice for this variational approximate posterior is a multivariate Gaussian with a diagonal covariance structure (which leads to independent Gaussians on every dimension, called the *mean-field* family) so that
$$
\text{log}q_\phi(\mathbf{z}\vert\mathbf{x}) = \text{log}\mathcal{N}(\mathbf{z};\mathbf{\mu}^{(i)},\mathbf{\sigma}^{(i)})
\tag{5}
$$
where the mean $\mathbf{\mu}^{(i)}$ and standard deviation $\mathbf{\sigma}^{(i)}$ of the approximate posterior are different for each input point and are produced by our encoder parametrized by its variational parameters $\phi$. Now the KL divergence between this distribution and a simple prior $\mathcal{N}(\mathbf{0}, \mathbf{I})$ can be very simply obtained with
$$
D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel \mathcal{N}(\mathbf{0}, \mathbf{I}) \big] = \frac{1}{2}\sum_{j=1}^{D}\left(1+\text{log}((\sigma^{(i)}_j)^2)+(\mu^{(i)}_j)^2+(\sigma^{(i)}_j)^2\right)
\tag{6}
$$
While this looks convenient, we will still have to perform gradient descent through a sampling operation, which is non-differentiable. To solve this issue, we can use the *reparametrization trick*, which takes the sampling operation outside of the gradient flow by considering $\mathbf{z}^{(i)}=\mathbf{\mu}^{(i)}+\mathbf{\sigma}^{(i)}\odot\mathbf{\epsilon}^{(l)}$ with $\mathbf{\epsilon}^{(l)}\sim\mathcal{N}(\mathbf{0}, \mathbf{I})$
<a id="implem"> </a>
## VAE implementation
As we have seen, VAEs can be simply implemented by decomposing the above series of operations into an `encoder` which represents the distribution $q_\phi(\mathbf{z}\vert\mathbf{x})$, from which we will sample some values $\tilde{\mathbf{z}}$ (using the reparametrization trick) and compute the Kullback-Leibler (KL) divergence. Then, we use these values as input to a `decoder` which represents the distribution $p_\theta(\mathbf{x}\vert\mathbf{z})$ so that we can produce a reconstruction $\tilde{\mathbf{x}}$ and compute the reconstruction error.
Therefore, we can define the VAE based on our previous implementation of the AE that we recall here
```
class AE(nn.Module):
def __init__(self, encoder, decoder, encoding_dim):
super(AE, self).__init__()
self.encoding_dims = encoding_dim
self.encoder = encoder
self.decoder = decoder
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
```
In order to move to a probabilistic version, we need to add the latent space sampling mechanism, and change the behavior of our `call` function. This process is implemented in the following `VAE` class.
Note that we purposedly rely on an implementation of the `encode` function where the `encoder` first produces an intermediate representation of size `encoder_dims`. Then, this representation goes through two separate functions for encoding $\mathbf{\mu}$ and $\mathbf{\sigma}$. This provides a clearer implementation but also the added bonus that we can ensure that $\mathbf{\sigma} > 0$
```
class VAE(AE):
def __init__(self, encoder, decoder, encoding_dims, latent_dims):
super(VAE, self).__init__(encoder, decoder, encoding_dims)
self.latent_dims = latent_dims
self.mu = nn.Sequential(nn.Linear(self.encoding_dims, self.latent_dims), nn.ReLU())
self.sigma = nn.Sequential(nn.Linear(self.encoding_dims, self.latent_dims), nn.Softplus())
def encode(self, x):
######################
# YOUR CODE GOES HERE
######################
######################
# Solution
x = self.encoder(x)
mu = self.mu(x)
sigma = self.sigma(x)
######################
return mu, sigma
def decode(self, z):
return self.decoder(z)
def forward(self, x):
# Encode the inputs
z_params = self.encode(x)
# Obtain latent samples and latent loss
z_tilde, kl_div = self.latent(x, z_params)
# Decode the samples
x_tilde = self.decode(z_tilde)
return x_tilde.reshape(-1, 1, 28, 28), kl_div
def latent(self, x, z_params):
######################
# YOUR CODE GOES HERE
######################
######################
# Solution
n_batch = x.shape[0]
# Retrieve mean and var
mu, sigma = z_params
# Re-parametrize
q = distrib.Normal(torch.zeros(mu.shape[1]), torch.ones(sigma.shape[1]))
z = (sigma * q.sample((int(n_batch), ))) + mu
# Compute KL divergence
kl_div = 0.5 * torch.sum(1 + sigma - torch.pow(mu, 2) - torch.exp(sigma))
kl_div = kl_div / n_batch
######################
return z, kl_div
```
Now the interesting aspect of VAEs is that we can define any parametric function as `encoder` and `decoder`, as long as we can optimize them. Here, we will rely on simple feed-forward neural networks, but these can be largely more complex (with limitations that we will discuss later in the tutorial).
```
def construct_encoder_decoder(nin, n_latent = 16, n_hidden = 512, n_classes = 1):
# Encoder network
encoder = nn.Sequential(
nn.Flatten(),
nn.Linear(nin, n_hidden), nn.ReLU(),
nn.Linear(n_hidden, n_hidden), nn.ReLU(),
nn.Linear(n_hidden, n_hidden), nn.ReLU(),
)
# Decoder network
decoder = nn.Sequential(
nn.Linear(n_latent, n_hidden), nn.ReLU(),
nn.Linear(n_hidden, n_hidden), nn.ReLU(),
nn.Linear(n_hidden, nin * n_classes), nn.Sigmoid()
)
return encoder, decoder
```
### Evaluating the error
In the definition of the `VAE` class, we directly included the computation of the $D_{KL}$ term to regularize our latent space. However, remember that the complete loss of equation (4) also contains a *reconstruction loss* which compares our reconstructed output to the original data.
While there are several options to compare the error between two elements, there are usually two preferred choices among the generative literature depending on how we consider our problem
1. If we consider each dimension (pixel) to be a binary unit (following a Bernoulli distribution), we can rely on the `binary cross entropy` between the two distributions
2. If we turn our problem to a set of classifications, where each dimension can belong to a given set of *intensity classes*, then we can compute the `multinomial loss` between the two distributions
In the following, we define both error functions and regroup them in the `reconstruction_loss` call (depending on the `num_classes` considered). However, as the `multinomial loss` requires a large computational overhead, and for the sake of simplicity, we will train all our first models by relying on the `binary cross entropy`
```
# Reconstruction criterion
recons_criterion = torch.nn.MSELoss(reduction='sum')
def compute_loss(model, x):
######################
# YOUR CODE GOES HERE
######################
######################
# Solution
x_tilde, kl_div = model(x)
cross_ent = recons_criterion(x_tilde, x)
logpx_z = cross_ent
full_loss = torch.mean(logpx_z - kl_div)
######################
return full_loss
def train_step(model, x, optimizer):
# Compute the loss.
loss = compute_loss(model, x)
# Before the backward pass, zero all of the network gradients
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to parameters
loss.backward()
# Calling the step function to update the parameters
optimizer.step()
return loss
```
### Optimizing a VAE on a real dataset
For this tutorial, we are going to take a quick shot at a real-life problem by trying to train our VAEs on the `FashionMNIST` dataset. This dataset can be natively used in PyTorch by relying on the `torchvision.datasets` classes as follows
```
dataset_dir = './data'
# Going to use 80%/20% split for train/valid
valid_ratio = 0.2
# Load the dataset for the training/validation sets
train_valid_dataset = torchvision.datasets.FashionMNIST(root=dataset_dir, train=True, transform=torchvision.transforms.ToTensor(), download=True)
# Split it into training and validation sets
nb_train = int((1.0 - valid_ratio) * len(train_valid_dataset))
nb_valid = int(valid_ratio * len(train_valid_dataset))
train_dataset, valid_dataset = torch.utils.data.dataset.random_split(train_valid_dataset, [nb_train, nb_valid])
# Load the test set
test_dataset = torchvision.datasets.FashionMNIST(root=dataset_dir, transform=torchvision.transforms.ToTensor(),train=False)
# Prepare
num_threads = 4 # Loading the dataset is using 4 CPU threads
batch_size = 128 # Using minibatches of 128 samples
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, num_workers=num_threads)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False,num_workers=num_threads)
```
The `FashionMNIST` dataset is composed of simple 28x28 black and white images of different items of clothings (such as shoes, bags, pants and shirts). We put a simple function here to display one batch of the test set (note that we keep a fixed batch from the test set in order to evaluate the different variations that we will try in this tutorial).
```
print("The train set contains {} images, in {} batches".format(len(train_loader.dataset), len(train_loader)))
print("The validation set contains {} images, in {} batches".format(len(valid_loader.dataset), len(valid_loader)))
print("The test set contains {} images, in {} batches".format(len(test_loader.dataset), len(test_loader)))
nsamples = 10
classes_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal','Shirt', 'Sneaker', 'Bag', 'Ankle boot']
imgs_test, labels = next(iter(test_loader))
fig = plt.figure(figsize=(20,5))
for i in range(nsamples):
ax = plt.subplot(1,nsamples, i+1)
plt.imshow(imgs_test[i, 0, :, :], vmin=0, vmax=1.0, cmap=matplotlib.cm.gray)
ax.set_title("{}".format(classes_names[labels[i]]), fontsize=15)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
```
Now based on our proposed implementation, the optimization aspects are defined in a very usual way
```
# Using Bernoulli or Multinomial loss
num_classes = 1
# Number of hidden and latent
n_hidden = 512
n_latent = 8
# Compute input dimensionality
nin = imgs_test.shape[2] * imgs_test.shape[3]
# Construct encoder and decoder
encoder, decoder = construct_encoder_decoder(nin, n_hidden = n_hidden, n_latent = n_latent, n_classes = num_classes)
# Build the VAE model
model = VAE(encoder, decoder, n_hidden, n_latent)
# Construct the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
```
Now all that is left to do is train the model. We define here a `train_vae` function that we will reuse along the future implementations and variations of VAEs and flows. Note that this function is set to run for only a very few number of `epochs` and also most importantly, *only considers a subsample of the full dataset at each epoch*. This option is just here so that you can test the different models very quickly on any CPU or laptop.
```
def generate_and_save_images(model, epoch, test_sample):
predictions, _ = model(test_sample)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, 0, :, :].detach(), cmap='gray')
plt.axis('off')
# Tight_layout minimizes the overlap between 2 sub-plots
#plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
epochs=50
test_sample = imgs_test[0:16, :, :, :]
for epoch in range(1, epochs + 1):
full_loss = torch.Tensor([0])
# Forward pass: compute predicted y by passing x to the model.
for i, (x, _) in enumerate(train_loader):
full_loss += train_step(model, x, optimizer)
#for i, (x, _) in enumerate(valid_loader):
# train_step(model, x, optimizer)
print('Epoch: {}, Test set ELBO: {}'.format(epoch, full_loss))
generate_and_save_images(model, epoch, test_sample)
```
### Evaluating generative models
In order to evaluate our upcoming generative models, we will rely on the computation of the Negative Log-Likelihood. This code for the following `evaluate_nll_bpd` is inspired by the [Sylvester flow repository](https://github.com/riannevdberg/sylvester-flows)
```
from scipy.special import logsumexp
def evaluate_nll_bpd(data_loader, model, batch = 500, R = 5):
# Set of likelihood tests
likelihood_test = []
# Go through dataset
for batch_idx, (x, _) in enumerate(data_loader):
for j in range(x.shape[0]):
a = []
for r in range(0, R):
cur_x = x[j].unsqueeze(0)
# Repeat it as batch
x = cur_x.expand(batch, *cur_x.size()[1:]).contiguous()
x = x.view(batch, -1)
x_tilde, kl_div = model(x)
rec = reconstruction_loss(x_tilde, x, average=False)
a_tmp = (rec + kl_div)
a.append(- a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
likelihood_test = np.array(likelihood_test)
nll = - np.mean(likelihood_test)
# Compute the bits per dim (but irrelevant for binary data)
bpd = nll / (np.prod(nin) * np.log(2.))
return nll, bpd
```
Now we can evaluate our VAE model more formally as follows.
```
# Plot final loss
plt.figure()
plt.plot(losses_kld[:, 0].numpy());
# Evaluate log-likelihood and bits per dim
nll, _ = evaluate_nll_bpd(test_loader, model)
print('Negative Log-Likelihood : ' + str(nll))
```
We can also evaluate the latent space of our model, which should be organized (being the overall point of using a VAE instead of a common AE).
```
x = np.linspace(-3, 3, 8)
y = np.linspace(-3, 3, 8)
fig = plt.figure(figsize=(10, 8))
for i in range(8):
for j in range(8):
plt.subplot(8, 8, (i * 8) + j + 1)
final_tensor = torch.zeros(n_latent)
final_tensor[0] = x[i]
final_tensor[3] = y[j]
plt.imshow(model.decode(final_tensor).detach().reshape(28, 28), cmap='gray')
plt.axis('off')
```
### Limitations of VAEs - (**exercise**)
Although VAEs are extremely powerful tools, they still have some limitations. Here we list the three most important and known limitations (all of them are still debated and topics of active research).
1. **Blurry reconstructions.** As can be witnessed directly in the results of the previous vanilla VAE implementation, the reconstructions appear to be blurry. The precise origin of this phenomenon is still debated, but the proposed explanation are
1. The use of the KL regularization
2. High variance regions of the latent space
3. The reconstruction criterion (expectation)
4. The use of simplistic latent distributions
2. **Posterior collapse.** The previous *blurry reconstructions* issue can be mitigated by using a more powerful decoder. However, relying on a decoder with a large capacity causes the phenomenon of *posterior collapse* where the latent space becomes useless. A nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/)
3. **Simplistic Gaussian approximation**. In the derivation of the VAE objective, recall that the KL divergence term needs to be computed analytically. Therefore, this forces us to rely on quite simplistic families. However, the Gaussian family might be too simplistic to model real world data
In the present tutorial, we show how normalizing flows can be used to mostly solve the third limitation, while also adressing the two first problems. Indeed, we will see that normalizing flows also lead to sharper reconstructions and also act on preventing posterior collapse
<a id="improve"></a>
## Improving the quality of VAEs
As we discussed in the previous section, several known issues have been reported when using the vanilla VAE implementation. We listed some of the major issues as being
1. **Blurry reconstructions.**
2. **Posterior collapse.**
3. **Simplistic Gaussian approximation**.
Here, we discuss some recent developments that were proposed in the VAE literature and simple adjustments that can be made to (at least partly) alleviate these issues. However, note that some more advanced proposals such as PixelVAE [5](#reference1) and VQ-VAE [6](#reference1) can lead to wider increases in quality
### Reducing the bluriness of reconstructions
In this tutorial, we relied on extremely simple decoder functions, to show how we could easily define VAEs and normalizing flows together. However, the capacity of the decoder obviously directly influences the quality of the final reconstruction. Therefore, we could address this issue naively by using deep networks and of course convolutional layers as we are currently dealing with images.
First you need to construct a more complex encoder and decoder
```
def construct_encoder_decoder_complex(nin, n_latent = 16, n_hidden = 512, n_params = 0, n_classes = 1):
# Encoder network
encoder = ...
# Decoder network
decoder = ...
return encoder, decoder
```
### Preventing posterior collapse with Wasserstein-VAE-MMD (InfoVAE)
As we discussed earlier, the reason behind posterior collapse mostly relates to the KL divergence criterion (a nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/). This can be mitigated by relying on a different criterion, such as regularizing the latent distribution by using the *Maximum Mean Discrepancy* (MMD) instead of the KL divergence. This model was independently proposed as the *InfoVAE* and later also as the *Wasserstein-VAE*.
Here we provide a simple implementation of the `InfoVAEMMD` class based on our previous implementations.
```
def compute_kernel(x, y):
return ...
def compute_mmd(x, y):
return ...
class InfoVAEMMD(VAE):
def __init__(self, encoder, decoder):
super(InfoVAEMMD, self).__init__(encoder, decoder)
def latent(self, x, z_params):
return ...
```
### Putting it all together
Here we combine all these ideas (except for the MMD, which is not adequate as the flow definition already regularizes the latent space without the KL divergence) to perform a more advanced optimization of the dataset. Hence, we will rely on the complex encoder and decoder with gated convolutions, the multinomial loss and the normalizing flows in order to improve the overall quality of our reconstructions.
```
# Size of latent space
n_latent = 16
# Number of hidden units
n_hidden = 256
# Rely on Bernoulli or multinomial
num_classes = 128
# Construct encoder and decoder
encoder, decoder = ...
# Create VAE or (InfoVAEMMD - WAE) model
model_flow_p = ...
# Create optimizer algorithm
optimizer = ...
# Add learning rate scheduler
scheduler = ...
# Launch our optimization
losses_flow_param = ...
```
*NB*: It seems that the multinomial version have a hard time converging. Although I only let this run for 200 epochs and only for a subsampling of 5000 examples, it might need more time, but this might also come from a mistake somewhere in my code ... If you spot something odd please let me know :)
### References
<a id="reference1"></a>
[1] Rezende, Danilo Jimenez, and Shakir Mohamed. "Variational inference with normalizing flows." _arXiv preprint arXiv:1505.05770_ (2015). [link](http://arxiv.org/pdf/1505.05770)
[2] Kingma, Diederik P., Tim Salimans, and Max Welling. "Improving Variational Inference with Inverse Autoregressive Flow." _arXiv preprint arXiv:1606.04934_ (2016). [link](https://arxiv.org/abs/1606.04934)
[3] Kingma, D. P., & Welling, M. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114. (2013). [link](https://arxiv.org/pdf/1312.6114)
[4] Rezende, D. J., Mohamed, S., & Wierstra, D. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082. (2014). [link](https://arxiv.org/pdf/1401.4082)
[5] Gulrajani, I., Kumar, K., Ahmed, F., Taiga, A. A., Visin, F., Vazquez, D., & Courville, A. (2016). Pixelvae: A latent variable model for natural images. arXiv preprint arXiv:1611.05013. [link](https://arxiv.org/pdf/1611.05013)
[6] Van den Oord, A., & Vinyals, O. (2017). Neural discrete representation learning. In NIPS 2017 (pp. 6306-6315). [link](http://papers.nips.cc/paper/7210-neural-discrete-representation-learning.pdf)
### Inspirations and resources
https://blog.evjang.com/2018/01/nf1.html
https://github.com/ex4sperans/variational-inference-with-normalizing-flows
https://akosiorek.github.io/ml/2018/04/03/norm_flows.html
https://github.com/abdulfatir/normalizing-flows
https://github.com/riannevdberg/sylvester-flows
| github_jupyter |
<a href="https://colab.research.google.com/github/noobs-creation/pyimagesearch-stuffs/blob/main/keras_regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>

# Regression with Keras
### by [PyImageSearch.com](http://www.pyimagesearch.com)
## Welcome to **[PyImageSearch Plus](http://pyimg.co/plus)** Jupyter Notebooks!
This notebook is associated with the [Regression with Keras](https://www.pyimagesearch.com/2019/01/21/regression-with-keras/) blog post published on 2019-01-21.
Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.
We recommend that you execute (press ▶️) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:
* [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html#notebook-user-interface)
* [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)
As a reminder, these PyImageSearch Plus Jupyter Notebooks are not for sharing; please refer to the **Copyright** directly below and **Code License Agreement** in the last cell of this notebook.
Happy hacking!
*Adrian*
<hr>
***Copyright:*** *The contents of this Jupyter Notebook, unless otherwise indicated, are Copyright 2020 Adrian Rosebrock, PyimageSearch.com. All rights reserved. Content like this is made possible by the time invested by the authors. If you received this Jupyter Notebook and did not purchase it, please consider making future content possible joining PyImageSearch Plus at [http://pyimg.co/plus/](http://pyimg.co/plus) today.*
### Download the code zip file
```
!wget https://s3-us-west-2.amazonaws.com/static.pyimagesearch.com/keras-regression/keras-regression.zip
!unzip -qq keras-regression.zip
%cd keras-regression
```
### Downloading the House Prices Dataset
```
!git clone https://github.com/emanhamed/Houses-dataset
```
## Blog Post Code
### Import Packages
```
# import the necessary packages
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import argparse
import locale
import glob
import cv2
import os
```
### Loading the House Prices Dataset
```
def load_house_attributes(inputPath):
# initialize the list of column names in the CSV file and then
# load it using Pandas
cols = ["bedrooms", "bathrooms", "area", "zipcode", "price"]
df = pd.read_csv(inputPath, sep=" ", header=None, names=cols)
# determine (1) the unique zip codes and (2) the number of data
# points with each zip code
zipcodes = df["zipcode"].value_counts().keys().tolist()
counts = df["zipcode"].value_counts().tolist()
# loop over each of the unique zip codes and their corresponding
# count
for (zipcode, count) in zip(zipcodes, counts):
# the zip code counts for our housing dataset is *extremely*
# unbalanced (some only having 1 or 2 houses per zip code)
# so let's sanitize our data by removing any houses with less
# than 25 houses per zip code
if count < 25:
idxs = df[df["zipcode"] == zipcode].index
df.drop(idxs, inplace=True)
# return the data frame
return df
def process_house_attributes(df, train, test):
# initialize the column names of the continuous data
continuous = ["bedrooms", "bathrooms", "area"]
# performin min-max scaling each continuous feature column to
# the range [0, 1]
cs = MinMaxScaler()
trainContinuous = cs.fit_transform(train[continuous])
testContinuous = cs.transform(test[continuous])
# one-hot encode the zip code categorical data (by definition of
# one-hot encoing, all output features are now in the range [0, 1])
zipBinarizer = LabelBinarizer().fit(df["zipcode"])
trainCategorical = zipBinarizer.transform(train["zipcode"])
testCategorical = zipBinarizer.transform(test["zipcode"])
# construct our training and testing data points by concatenating
# the categorical features with the continuous features
trainX = np.hstack([trainCategorical, trainContinuous])
testX = np.hstack([testCategorical, testContinuous])
# return the concatenated training and testing data
return (trainX, testX)
```
### Implementing a Neural Network for Regression
```
def create_mlp(dim, regress=False):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))
model.add(Dense(4, activation="relu"))
# check to see if the regression node should be added
if regress:
model.add(Dense(1, activation="linear"))
# return our model
return model
```
### Implementing our Keras Regression Script
```
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--dataset", type=str, required=True,
# help="path to input dataset of house images")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"dataset": "Houses-dataset/Houses Dataset",
}
# construct the path to the input .txt file that contains information
# on each house in the dataset and then load the dataset
print("[INFO] loading house attributes...")
inputPath = os.path.sep.join([args["dataset"], "HousesInfo.txt"])
df = load_house_attributes(inputPath)
# construct a training and testing split with 75% of the data used
# for training and the remaining 25% for evaluation
print("[INFO] constructing training/testing split...")
(train, test) = train_test_split(df, test_size=0.25, random_state=42)
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (this will lead to
# better training and convergence)
maxPrice = train["price"].max()
trainY = train["price"] / maxPrice
testY = test["price"] / maxPrice
# process the house attributes data by performing min-max scaling
# on continuous features, one-hot encoding on categorical features,
# and then finally concatenating them together
print("[INFO] processing data...")
(trainX, testX) = process_house_attributes(df, train, test)
# create our MLP and then compile the model using mean absolute
# percentage error as our loss, implying that we seek to minimize
# the absolute percentage difference between our price *predictions*
# and the *actual prices*
model = create_mlp(trainX.shape[1], regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# train the model
print("[INFO] training model...")
model.fit(x=trainX, y=trainY,
validation_data=(testX, testY),
epochs=200, batch_size=8)
# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict(testX)
# compute the difference between the *predicted* house prices and the
# *actual* house prices, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
print("[INFO] avg. house price: {}, std house price: {}".format(
locale.currency(df["price"].mean(), grouping=True),
locale.currency(df["price"].std(), grouping=True)))
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std))
```
For a detailed walkthrough of the concepts and code, be sure to refer to the full tutorial, [*Regression with Keras*](https://www.pyimagesearch.com/2019/01/21/regression-with-keras/) blog post published on 2019-01-21.
# Code License Agreement
```
Copyright (c) 2020 PyImageSearch.com
SIMPLE VERSION
Feel free to use this code for your own projects, whether they are
purely educational, for fun, or for profit. THE EXCEPTION BEING if
you are developing a course, book, or other educational product.
Under *NO CIRCUMSTANCE* may you use this code for your own paid
educational or self-promotional ventures without written consent
from Adrian Rosebrock and PyImageSearch.com.
LONGER, FORMAL VERSION
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
Notwithstanding the foregoing, you may not use, copy, modify, merge,
publish, distribute, sublicense, create a derivative work, and/or
sell copies of the Software in any work that is designed, intended,
or marketed for pedagogical or instructional purposes related to
programming, coding, application development, or information
technology. Permission for such use, copying, modification, and
merger, publication, distribution, sub-licensing, creation of
derivative works, or sale is expressly withheld.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
| github_jupyter |
(sec:problog:ipynb)=
# Bayesian Networks (Jupyter NB) #
:::{note}
This page is based on the [*Bayesian networks*] ProbLog tutorial, which
is executed from within Python using the [*ProbLog library*].
:::
[*Bayesian networks*]: https://dtai.cs.kuleuven.be/problog/tutorial/basic/02_bayes.html
[*ProbLog library*]: https://dtai.cs.kuleuven.be/problog/tutorial/advanced/01_python_interface.html
:::{admonition} Content format
:class: attention
This page is written in the [*Jupyter Notebook*] format.
The ProbLog content is executed directly from Python.
Alternatively, see the [Markdown Notebook version][notebook] of the
*Bayesian Networks* tutorial for an example of executing ProbLog code
with [`ipywidgets`]; the [Percent Notebook version][percent] to see
how ProbLog code can be executed with a bespoke
[iPython cell magic command][magic]; or the
[Markdown Notebook version][sp-markdown] based on the [Sphinx-ProbLog]
extension, which uses *native*, interactive [ProbLog] code boxes.
You can execute the code blocks enclosed below by launching this page as a
Jupyter Notebook with MyBinder -- this option is available from the
*Launch Menu* that appears after hovering the mouse cursor over the
{fa}`rocket` icon shown in the top bar.
You can also **enable the code cells** to work directly on this page with
[Thebe] by activating {fa}`play` *Live Code* from the
{fa}`rocket` *Launch Menu*.
<!--https://sphinx-panels.readthedocs.io/en/latest/#link-badgeshttps://sphinx-panels.readthedocs.io/en/latest/#link-badges-->
<!--{link-badge}`https://problog-template.simply-logical.space/slides/bayesian_networks-mnb.slides.html,Static Slides,link,badge-info badge-pill text-white`-->
In addition to this book page and the corresponding Jupyter Notebook,
*static* and *interactive* [reveal.js] slides are built from the page source.
The static slides can be accessed with the [Static Slides (Jupyter NB)] link
listed in the left panel (the table of content) or with this button
[![View Slides][slides-badge]][slides-link].
To launch the interactive version of the slides (with executable code boxes),
you need to open this page as a Jupyter Notebook in Binder -- either via
the {fa}`rocket` *Launch Menu* or using this button
[![Open in Binder][binder-badge]][binder-link];
then open [RISE] by clicking the {fa}`chart-bar` button located in the top bar
of the Jupyter Notebook interface.
:::
[*Jupyter Notebook*]: https://jupyterbook.org/file-types/notebooks.html
[`ipywidgets`]: https://ipywidgets.readthedocs.io/
[notebook]: bayesian_networks-mnb
[percent]: bayesian_networks-pnb
[sp-markdown]: bayesian_networks-sp-mnb
[Sphinx-ProbLog]: https://github.com/simply-logical/sphinx-problog
[ProbLog]: https://dtai.cs.kuleuven.be/problog/
[magic]: https://ipython.readthedocs.io/en/stable/interactive/magics.html#cell-magics
[Thebe]: https://jupyterbook.org/interactive/launchbuttons.html#live-interactive-pages-with-thebelab
[reveal.js]: https://github.com/hakimel/reveal.js/
[Static Slides (Jupyter NB)]: https://problog-template.simply-logical.space/slides/bayesian_networks-jnb.slides.html
[binder-badge]: https://mybinder.org/badge_logo.svg
[binder-link]: https://mybinder.org/v2/gh/simply-logical/problog-book-template/master?urlpath=tree/src/text/bayesian_networks-jnb.ipynb
[slides-badge]: https://img.shields.io/badge/view-slides-blue.svg
[slides-link]: https://problog-template.simply-logical.space/slides/bayesian_networks-jnb.slides.html
[RISE]: https://rise.readthedocs.io/en/stable/
:::{note}
These slides are also available as a [book page][bp1], which explains how to
launch them as a Jupyter Notebook or interactive slides.
:::
[bp1]: ..
:::{note}
This Jupyter Notebook is also available as a [book page][bp2], which explains
how to launch this content as *static* and *interactive* slides.
:::
[bp2]: ..
:::{tip}
This page includes a number of Python cells holding code needed to set up
ProbLog ipywidgets.
You can reveal their content by clicking the {fa}`plus-circle` buttons, which
appear towards the right edge of this page.
:::
```
# Install dependencies if in Colab
try:
import google.colab
!pip install -r https://raw.githubusercontent.com/simply-logical/problog-book-template/master/requirements.txt
except ImportError:
pass
import matplotlib.pyplot as plt
from problog.program import PrologString
from problog import get_evaluatable
def plot_outcome(pl_dict):
x = list(pl_dict.keys())
y = [pl_dict[key] for key in x]
x = [str(key) for key in x]
plt.barh(x, y, height=.5)
plt.xlim([0, 1.15])
plt.ylim([-.5, len(x) - .5])
for i, v in enumerate(y):
plt.text(v + .02, i + .0, '{:.2f}'.format(v), fontweight='bold')
def evaluate_problog(programme):
p = PrologString(programme)
d = get_evaluatable().create_from(p).evaluate()
plot_outcome(d)
```
We illustrate the use of Bayesian networks in ProbLog using the famous [Earthquake] example.
[Earthquake]: http://www.bnlearn.com/bnrepository/#earthquake
Suppose there is a burglary in our house with probability 0.7 and an earthquake with probability 0.2. Whether our alarm will ring depends on both burglary and earthquake:
* if there is a burglary and an earthquake, the alarm rings with probability 0.9;
* if there is only a burglary, it rings with probability 0.8;
* if there is only an earthquake, it rings with probability 0.1;
* if there is neither a burglary nor an earthquake, the alarm doesn't ring.
To model this as a Bayesian network, one would use three random variables, *burglary*, *earthquake* and *alarm*, with *burglary* and *earthquake* being parents of *alarm*. To model this in ProbLog, there are two possible solutions: using 'plain' ProbLog or using some syntactic sugar called probabilistic clauses and annotated disjunctions. We now explain both solutions.
digraph alarm1 { burglary -> alarm; earthquake -> alarm; }
[ProbLog syntax documentation]
[ProbLog syntax documentation]: https://problog.readthedocs.io/en/latest/modeling_basic.html#problog
## Probabilistic facts ##
In 'plain' ProbLog, we can encode the Bayesian network as follows.
* Since *burglary* and *earthquake* are random variable without parents, we can simply encode them as probabilistic facts, with the proper probability.
* To express the dependence of the random variable *alarm* on its parents *burglary* and *earthquake*, we use one Prolog rule for every possible state of the parents.
- The first rule covers the case in which *burglary* and *earthquake* are both true. The required rule is `alarm :- burglary, earthquake, p_alarm1`, with `p_alarm1` an auxiliary atom defined by means of the probabilistic fact `0.9::p_alarm1`. The point of adding this atom is to ensure that the probability of *alarm* in this case will be 0.9 as required.
- The second rule covers the case that *burglary* is true but *earthquake* is false. Note that *earthquake* being false is encoded using the "\+" symbol for negation (as in regular Prolog).
- The third rule covers the case that *burglary* is false and *earthquake* is true.
- The fourth case (*burglary* and *earthquake* are both false) does not require a rule. This is because, according to our Bayesian network, the probability of *alarm* is 0 in this case.
We obtain the following ProbLog program.
```
probabilistic_facts = (
"""0.7::burglary.
0.2::earthquake.
0.9::p_alarm1.
0.8::p_alarm2.
0.1::p_alarm3.
alarm :- burglary, earthquake, p_alarm1.
alarm :- burglary, \+earthquake, p_alarm2.
alarm :- \+burglary, earthquake, p_alarm3.
evidence(alarm,true).
query(burglary).
query(earthquake)."""
)
evaluate_problog(probabilistic_facts)
```
When pressing 'Evaluate', ProbLog2 calculates the probability of there being a *burglary* or an *earthquake*, given the evidence that the *alarm* rang. The resulting marginals are: $P(burglary)=0.9896$ and $P(earthquake)=0.2275$.
## Probabilistic clauses ##
While the above is a correct encoding of the given Bayesian network, it is perhaps not very intuitive due to the auxiliary atoms. Fortunately, ProbLog2 offers some syntactic sugar called **probabilistic clauses** to encode this in a more readable way. Above, we encoded the information that the conditional probability of an *alarm* given a *burglary* and an *earthquake* equals 0.9 using the rule `alarm :- burglary, earthquake, p_alarm1`, plus the probabilistic fact `0.9::p_alarm1`. We can replace both with a single probabilistic clause of the form `0.9::alarm :- burglary, earthquake`. This should be read as: if *burglary* and *earthquake* are true, this causes *alarm* to become true with probability 0.9 if there is a *burglary* and an *earthquake*. As this example illustrates, a probabilistic clause has a body, just like regular ProbLog rules, and a head. The difference is that now, the head is annotated with a probability. By also using probabilistic clauses for the other rules in the ProbLog encoding of the Bayesian network, we get the following program.
```
probabilistic_clauses = (
"""0.7::burglary.
0.2::earthquake.
0.9::alarm :- burglary, earthquake.
0.8::alarm :- burglary, \+earthquake.
0.1::alarm :- \+burglary, earthquake.
evidence(alarm,true).
query(burglary).
query(earthquake)."""
)
evaluate_problog(probabilistic_clauses)
```
As you can verify by pressing 'Evaluate', this returns the same marginals as the 'plain' ProbLog encoding: $P(burglary)=0.9896$ and $P(earthquake)=0.2275$. This is not a coincidence: the two programs are equivalent (but the program with probabilistic clauses has the advantage of not needing any auxiliary atoms).
[Probabilistic clauses documentation]
[Probabilistic clauses documentation]: https://problog.readthedocs.io/en/latest/modeling_basic.html#probabilistic-clauses
## First-order ##
To illustrate the use of *first-order* ProbLog programs, we show below a first-order extension of the *Alarm* example.
digraph alarm2 { burglary -> alarm; earthquake -> alarm; alarm -> "calls(john)"; alarm -> "calls(...)"; alarm -> "calls(mary)"; }
Suppose there are $N$ people and each person independently *calls* the police with a certain probability, depending on the *alarm* ringing or not: if the *alarm* rings, the probability of *calling* is 0.8, otherwise it is 0.1. This can be modelled as follows. We again use probabilistic clauses and show the case $N=2$ (two people).
```
first_order = (
"""person(john).
person(mary).
0.7::burglary.
0.2::earthquake.
0.9::alarm :- burglary, earthquake.
0.8::alarm :- burglary, \+earthquake.
0.1::alarm :- \+burglary, earthquake.
0.8::calls(X) :- alarm, person(X).
0.1::calls(X) :- \+alarm, person(X).
evidence(calls(john),true).
evidence(calls(mary),true).
query(burglary).
query(earthquake)."""
)
evaluate_problog(first_order)
```
When pressing 'Evaluate', ProbLog2 calculates the probability of there being a *burglary* or an *earthquake*, given the evidence that both *john* and *mary* *called*. We obtain $P(burglary)=0.981939$ and $P(earthquake)=0.226851$.
In general, any Boolean Bayesian network can be modeled in ProbLog using the above methodology. This can also be extended to non-Boolean Bayesian networks (in which some variables can take more than two possible values), by using annotated disjunctions with multiple atoms in the head.
## Annotated disjunctions: Dealing with multi-valued variables ##
Since the random variables in the Bayesian network are all Boolean, we only need a single literal in the head of the rules. We can extend the Bayesian network to have a multi-valued variable by indicating the severity of the *earthquake*. The literal `earthquake` now has three possible values `none`, `mild`, `heavy` instead of previously two (no or yes). The probabilities of each value is denoted by the **annotated disjunction** in `0.01::earthquake(heavy); 0.19::earthquake(mild); 0.8::earthquake(none)`. An annotated disjunction is similar to a probabilistic disjunction, but with a different head. Instead of it being one atom annotated with a probability, it is now a disjunction of atoms each annotated with a probability.
```
annotated_disjunctions = (
"""person(john).
person(mary).
0.7::burglary.
0.01::earthquake(heavy); 0.19::earthquake(mild); 0.8::earthquake(none).
0.90::alarm :- burglary, earthquake(heavy).
0.85::alarm :- burglary, earthquake(mild).
0.80::alarm :- burglary, earthquake(none).
0.10::alarm :- \+burglary, earthquake(mild).
0.30::alarm :- \+burglary, earthquake(heavy).
0.8::calls(X) :- alarm, person(X).
0.1::calls(X) :- \+alarm, person(X).
evidence(calls(john),true).
evidence(calls(mary),true).
query(burglary).
query(earthquake(_))."""
)
evaluate_problog(annotated_disjunctions)
```
[Annotated disjunctions documentation]
[Annotated disjunctions documentation]: https://problog.readthedocs.io/en/latest/modeling_basic.html#annotated-disjunctions
| github_jupyter |
```
"""
Please run notebook locally (if you have all the dependencies and a GPU).
Technically you can run this notebook on Google Colab but you need to set up microphone for Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
5. Set up microphone for Colab
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg portaudio19-dev
!pip install unidecode
!pip install pyaudio
# ## Install NeMo
BRANCH = 'v1.0.0b3'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html
## Grab the config we'll use in this example
!mkdir configs
```
This notebook demonstrates automatic speech recognition (ASR) from a microphone's stream in NeMo.
It is **not a recommended** way to do inference in production workflows. If you are interested in
production-level inference using NeMo ASR models, please sign-up to Jarvis early access program: https://developer.nvidia.com/nvidia-jarvis
The notebook requires PyAudio library to get a signal from an audio device.
For Ubuntu, please run the following commands to install it:
```
sudo apt-get install -y portaudio19-dev
pip install pyaudio
```
This notebook requires the `torchaudio` library to be installed for MatchboxNet. Please follow the instructions available at the [torchaudio Github page](https://github.com/pytorch/audio#installation) to install the appropriate version of torchaudio.
If you would like to install the latest version, please run the following command to install it:
```
conda install -c pytorch torchaudio
```
```
import numpy as np
import pyaudio as pa
import os, time
import nemo
import nemo.collections.asr as nemo_asr
# sample rate, Hz
SAMPLE_RATE = 16000
```
## Restore the model from NGC
```
asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained('QuartzNet15x5Base-En')
```
## Observing the config of the model
```
from omegaconf import OmegaConf
import copy
# Preserve a copy of the full config
cfg = copy.deepcopy(asr_model._cfg)
print(OmegaConf.to_yaml(cfg))
```
### Modify preprocessor parameters for inference
```
# Make config overwrite-able
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
# spectrogram normalization constants
normalization = {}
normalization['fixed_mean'] = [
-14.95827016, -12.71798736, -11.76067913, -10.83311182,
-10.6746914, -10.15163465, -10.05378331, -9.53918999,
-9.41858904, -9.23382904, -9.46470918, -9.56037,
-9.57434245, -9.47498732, -9.7635205, -10.08113074,
-10.05454561, -9.81112681, -9.68673603, -9.83652977,
-9.90046248, -9.85404766, -9.92560366, -9.95440354,
-10.17162966, -9.90102482, -9.47471025, -9.54416855,
-10.07109475, -9.98249912, -9.74359465, -9.55632283,
-9.23399915, -9.36487649, -9.81791084, -9.56799225,
-9.70630899, -9.85148006, -9.8594418, -10.01378735,
-9.98505315, -9.62016094, -10.342285, -10.41070709,
-10.10687659, -10.14536695, -10.30828702, -10.23542833,
-10.88546868, -11.31723646, -11.46087382, -11.54877829,
-11.62400934, -11.92190509, -12.14063815, -11.65130117,
-11.58308531, -12.22214663, -12.42927197, -12.58039805,
-13.10098969, -13.14345864, -13.31835645, -14.47345634]
normalization['fixed_std'] = [
3.81402054, 4.12647781, 4.05007065, 3.87790987,
3.74721178, 3.68377423, 3.69344, 3.54001005,
3.59530412, 3.63752368, 3.62826417, 3.56488469,
3.53740577, 3.68313898, 3.67138151, 3.55707266,
3.54919572, 3.55721289, 3.56723346, 3.46029304,
3.44119672, 3.49030548, 3.39328435, 3.28244406,
3.28001423, 3.26744937, 3.46692348, 3.35378948,
2.96330901, 2.97663111, 3.04575148, 2.89717604,
2.95659301, 2.90181116, 2.7111687, 2.93041291,
2.86647897, 2.73473181, 2.71495654, 2.75543763,
2.79174615, 2.96076456, 2.57376336, 2.68789782,
2.90930817, 2.90412004, 2.76187531, 2.89905006,
2.65896173, 2.81032176, 2.87769857, 2.84665271,
2.80863137, 2.80707634, 2.83752184, 3.01914511,
2.92046439, 2.78461139, 2.90034605, 2.94599508,
2.99099718, 3.0167554, 3.04649716, 2.94116777]
cfg.preprocessor.normalize = normalization
# Disable config overwriting
OmegaConf.set_struct(cfg.preprocessor, True)
```
## Setup preprocessor with these settings
```
asr_model.preprocessor = asr_model.from_config_dict(cfg.preprocessor)
# Set model to inference mode
asr_model.eval();
asr_model = asr_model.to(asr_model.device)
```
## Setting up data for Streaming Inference
```
from nemo.core.classes import IterableDataset
from nemo.core.neural_types import NeuralType, AudioSignal, LengthsType
import torch
from torch.utils.data import DataLoader
# simple data layer to pass audio signal
class AudioDataLayer(IterableDataset):
@property
def output_types(self):
return {
'audio_signal': NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(self, sample_rate):
super().__init__()
self._sample_rate = sample_rate
self.output = True
def __iter__(self):
return self
def __next__(self):
if not self.output:
raise StopIteration
self.output = False
return torch.as_tensor(self.signal, dtype=torch.float32), \
torch.as_tensor(self.signal_shape, dtype=torch.int64)
def set_signal(self, signal):
self.signal = signal.astype(np.float32)/32768.
self.signal_shape = self.signal.size
self.output = True
def __len__(self):
return 1
data_layer = AudioDataLayer(sample_rate=cfg.preprocessor.sample_rate)
data_loader = DataLoader(data_layer, batch_size=1, collate_fn=data_layer.collate_fn)
# inference method for audio signal (single instance)
def infer_signal(model, signal):
data_layer.set_signal(signal)
batch = next(iter(data_loader))
audio_signal, audio_signal_len = batch
audio_signal, audio_signal_len = audio_signal.to(asr_model.device), audio_signal_len.to(asr_model.device)
log_probs, encoded_len, predictions = model.forward(
input_signal=audio_signal, input_signal_length=audio_signal_len
)
return log_probs
# class for streaming frame-based ASR
# 1) use reset() method to reset FrameASR's state
# 2) call transcribe(frame) to do ASR on
# contiguous signal's frames
class FrameASR:
def __init__(self, model_definition,
frame_len=2, frame_overlap=2.5,
offset=10):
'''
Args:
frame_len: frame's duration, seconds
frame_overlap: duration of overlaps before and after current frame, seconds
offset: number of symbols to drop for smooth streaming
'''
self.vocab = list(model_definition['labels'])
self.vocab.append('_')
self.sr = model_definition['sample_rate']
self.frame_len = frame_len
self.n_frame_len = int(frame_len * self.sr)
self.frame_overlap = frame_overlap
self.n_frame_overlap = int(frame_overlap * self.sr)
timestep_duration = model_definition['AudioToMelSpectrogramPreprocessor']['window_stride']
for block in model_definition['JasperEncoder']['jasper']:
timestep_duration *= block['stride'][0] ** block['repeat']
self.n_timesteps_overlap = int(frame_overlap / timestep_duration) - 2
self.buffer = np.zeros(shape=2*self.n_frame_overlap + self.n_frame_len,
dtype=np.float32)
self.offset = offset
self.reset()
def _decode(self, frame, offset=0):
assert len(frame)==self.n_frame_len
self.buffer[:-self.n_frame_len] = self.buffer[self.n_frame_len:]
self.buffer[-self.n_frame_len:] = frame
logits = infer_signal(asr_model, self.buffer).cpu().numpy()[0]
# print(logits.shape)
decoded = self._greedy_decoder(
logits[self.n_timesteps_overlap:-self.n_timesteps_overlap],
self.vocab
)
return decoded[:len(decoded)-offset]
@torch.no_grad()
def transcribe(self, frame=None, merge=True):
if frame is None:
frame = np.zeros(shape=self.n_frame_len, dtype=np.float32)
if len(frame) < self.n_frame_len:
frame = np.pad(frame, [0, self.n_frame_len - len(frame)], 'constant')
unmerged = self._decode(frame, self.offset)
if not merge:
return unmerged
return self.greedy_merge(unmerged)
def reset(self):
'''
Reset frame_history and decoder's state
'''
self.buffer=np.zeros(shape=self.buffer.shape, dtype=np.float32)
self.prev_char = ''
@staticmethod
def _greedy_decoder(logits, vocab):
s = ''
for i in range(logits.shape[0]):
s += vocab[np.argmax(logits[i])]
return s
def greedy_merge(self, s):
s_merged = ''
for i in range(len(s)):
if s[i] != self.prev_char:
self.prev_char = s[i]
if self.prev_char != '_':
s_merged += self.prev_char
return s_merged
```
# Streaming Inference
Streaming inference depends on a few factors, such as the frame length and buffer size. Experiment with a few values to see their effects in the below cells.
```
# duration of signal frame, seconds
FRAME_LEN = 1.0
# number of audio channels (expect mono signal)
CHANNELS = 1
CHUNK_SIZE = int(FRAME_LEN*SAMPLE_RATE)
asr = FrameASR(model_definition = {
'sample_rate': SAMPLE_RATE,
'AudioToMelSpectrogramPreprocessor': cfg.preprocessor,
'JasperEncoder': cfg.encoder,
'labels': cfg.decoder.vocabulary
},
frame_len=FRAME_LEN, frame_overlap=2,
offset=4)
asr.reset()
p = pa.PyAudio()
print('Available audio input devices:')
input_devices = []
for i in range(p.get_device_count()):
dev = p.get_device_info_by_index(i)
if dev.get('maxInputChannels'):
input_devices.append(i)
print(i, dev.get('name'))
if len(input_devices):
dev_idx = -2
while dev_idx not in input_devices:
print('Please type input device ID:')
dev_idx = int(input())
empty_counter = 0
def callback(in_data, frame_count, time_info, status):
global empty_counter
signal = np.frombuffer(in_data, dtype=np.int16)
text = asr.transcribe(signal)
if len(text):
print(text,end='')
empty_counter = asr.offset
elif empty_counter > 0:
empty_counter -= 1
if empty_counter == 0:
print(' ',end='')
return (in_data, pa.paContinue)
stream = p.open(format=pa.paInt16,
channels=CHANNELS,
rate=SAMPLE_RATE,
input=True,
input_device_index=dev_idx,
stream_callback=callback,
frames_per_buffer=CHUNK_SIZE)
print('Listening...')
stream.start_stream()
# Interrupt kernel and then speak for a few more words to exit the pyaudio loop !
try:
while stream.is_active():
time.sleep(0.1)
finally:
stream.stop_stream()
stream.close()
p.terminate()
print()
print("PyAudio stopped")
else:
print('ERROR: No audio input device found.')
```
| github_jupyter |
# Capability Correlations
Let's start by importing all of the necessary libraries to conduct the analysis.
```
from py2neo import Graph
import numpy as np
from pandas import DataFrame
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
import json
import math
import pandas as pd
import plotly
import plotly.graph_objs as go
import qgrid
from scipy import stats, spatial
from sklearn.cluster.bicluster import SpectralBiclustering
from matplotlib.colors import ListedColormap
import operator
import math
from IPython.display import display, HTML
import collections
# please add your plotly api credentials to plotly_config in your own machine. Visit https://plot.ly/python/getting-started/
plotly_config = json.load(open('plotly_config.json'))
plotly.tools.set_credentials_file(username=plotly_config['username'], api_key=plotly_config['key'])
```
## Table of Contents
- [1. Patent and Publication differences on term pairs](#one)
- [1.1. Axis building](#one-one)
- [1.2. Function Design](#one-two)
- [1.3. Patents and Publication Matrixes](#one-three)
- [1.4. Analysing the differences](#one-four)
- [2. Patents and Publication Matrixes: Chronological Evolution](#two)
- [2.1. Absolute Evolution](#two-one)
- [2.2. Comparing the chronological evolution of asset types](#two-two)
- [2.2.1. Feedstocks](#two-two-one)
- [2.2.2. Outputs](#two-two-two)
- [3. Comparing the patenting and publication rates of individual terms](#three)
- [3.1. Function design](#three-one)
- [3.2. Feedstock](#three-two)
- [3.3. Processing Technologies](#three-three)
- [3.4. Output](#three-four)
- [3.5. Comparison](#three-five)
## 1. Patent and Publication differences on term pairs <a class="anchor" id="one"></a>
In order to establish a base for understading the basis of the work, we wish to understand the occurence of severall feedstocks, processing technologies and outputs in our database.
For example, how many assets (patents, papers, etc) contain the mix of processing technology X for output Y?
To understand this in a more general way, the [AMICA](https://amica-pathfinder.net/) database will be transformed in an [co-occurence matrix](https://en.wikipedia.org/wiki/Co-occurrence_matrix). This means, in the above described example that the number of assets that contain that mix will be an entry in a matrix, whyle the corresponfing technology and output will be columns/lines of the matrix.
### 1.1. Axis building <a class="anchor" id="one-one"></a>
We start by creating, like previsouly a list of terms (feedstocks, processing technologies and outputs) so these become the axis of the capability matrix.
```
local_connection_url = "http://localhost:7474/db/data"
connection_to_graph = Graph(local_connection_url)
f_terms = list(set(DataFrame(connection_to_graph.data('MATCH (a:Asset)-[:CONTAINS]->(fs:Feedstock) RETURN fs.term, count(a)')).as_matrix()[:, 1]))
o_terms = list(set(DataFrame(connection_to_graph.data('MATCH (a:Asset)-[:CONTAINS]->(fs:Output) RETURN fs.term, count(a)')).as_matrix()[:, 1]))
pt_terms = list(set(DataFrame(connection_to_graph.data('MATCH (a:Asset)-[:CONTAINS]->(fs:ProcessingTech) RETURN fs.term, count(a)')).as_matrix()[:, 1]))
bbo = list(set(f_terms + pt_terms + o_terms))
print len(bbo)
matrix_axis_names = bbo
```
Our matrix will have a total of 342 rows and 342 columns.
### 1.2. Function Design <a class="anchor" id="one-two"></a>
We start by creating a function that given an asset (e.g."Patent", or "Publication"), returns a matrix where each entry correponds to the number of documents containing a certain term. For example:
matrix[i, j] = z
norm_matrix[i, j] = w
There are z documents containing the term i and j.
Where:
norm_matrix[i,j] = (matrix[i, j] - mean(matrix)) / standard_deviation(matrix)
```
def get_asset_matrix(asset, normalization=True):
# define queries
asset_no_interestions = """ MATCH (a:Asset)-[:CONTAINS]->(fs:Feedstock)
MATCH (a:Asset)-[:CONTAINS]->(out:Output)
MATCH (a:Asset)-[:CONTAINS]->(pt:ProcessingTech)
WHERE a.type = "{}"
RETURN fs.term, pt.term, out.term, count(a)
""".format(asset)
process_variables = ['Feedstock', 'Output', 'ProcessingTech']
asset_intersections = """ MATCH (a:Asset)-[:CONTAINS]->(fs:{})
MATCH (a:Asset)-[:CONTAINS]->(t:{})
WHERE fs<>t AND a.type = "{}"
RETURN fs.term, t.term, count(a)
"""
# get data
data_no_intersections = DataFrame(connection_to_graph.data(asset_no_interestions)).as_matrix()
# create matrix
asset_matrix = np.zeros([len(matrix_axis_names), len(matrix_axis_names)])
# for no intersections data
for row in data_no_intersections:
# the last column is the frequency (count)
frequency = row[0]
indexes = [matrix_axis_names.index(element) for element in row[1::]]
# add frequency value to matrix position
for pair in itertools.combinations(indexes, 2):
asset_matrix[pair[0], pair[1]] += frequency
asset_matrix[pair[1], pair[0]] += frequency
# for intersecting data
for category in process_variables:
process_data = DataFrame(connection_to_graph.data(asset_intersections.format(category, category, asset))).as_matrix()
for row in process_data:
frequency = row[0]
indexes = [matrix_axis_names.index(element) for element in row[1::]]
# add frequency value to matrix position
for pair in itertools.combinations(indexes, 2):
asset_matrix[pair[0], pair[1]] += frequency / 2 # Divided by two because query not optimized
asset_matrix[pair[1], pair[0]] += frequency / 2 # Divided by two because query not optimized
# normalize
#normalized_asset_matrix = (asset_matrix - np.mean(asset_matrix)) / np.std(asset_matrix)
no_duplicates = np.triu(asset_matrix, 1)
total_documents = np.sum(no_duplicates)
normalized_asset_matrix = asset_matrix / total_documents
# dynamic return
if normalization == True:
return normalized_asset_matrix
else:
return asset_matrix
```
### 1.3. Patents and Publication Matrixes <a class="anchor" id="one-three"></a>
We can now create the capability matrixes of publications and patents.
First, a query is created:
```
norm_mode = True
publication_matrix = get_asset_matrix('PUBLICATION', normalization=norm_mode)
patent_matrix = get_asset_matrix('PATENT', normalization=norm_mode)
```
Consequently, both matrixes are plotted.
```
def borders(width, color):
plt.axhline(y=0, color='k',linewidth=width)
plt.axhline(y=get_asset_matrix('PUBLICATION', normalization=norm_mode).shape[1], color=color,linewidth=width)
plt.axvline(x=0, color='k',linewidth=width)
plt.axvline(x=get_asset_matrix('PUBLICATION', normalization=norm_mode).shape[0], color=color,linewidth=width)
# create subplots
plt.subplots(2,1,figsize=(17,17))
bwhite = ListedColormap(['white', 'black'])
graph_holder = 0.001
plt.subplot(121)
sns.heatmap(publication_matrix, cbar=None,cmap=bwhite, center=graph_holder, square=True, xticklabels=False, yticklabels=False)
borders(1.5, 'k')
plt.title('Publications Heatmap')
plt.subplot(122)
sns.heatmap(patent_matrix, cbar=None,cmap=bwhite, center=graph_holder, square=True, xticklabels=False, yticklabels=False)
borders(1.5, 'k')
plt.title('Patents Heatmap')
plt.show()
# create subplots
vmin = 0.0000
vmax = 0.001
bwhite = 'binary'
plt.subplots(2,1,figsize=(17,17))
plt.subplot(121)
sns.heatmap(publication_matrix, cbar=True, cbar_kws={"shrink": .2}, cmap=bwhite, square=True, xticklabels=False, yticklabels=False, vmin=vmin, vmax=vmax)
borders(1.5, 'k')
plt.title('Publications Heatmap')
plt.subplot(122)
sns.heatmap(patent_matrix, cbar=True, cbar_kws={"shrink": .2}, cmap=bwhite, square=True, xticklabels=False, yticklabels=False, vmin=vmin, vmax=vmax)
borders(1.5, 'k')
plt.title('Patents Heatmap')
plt.show()
```
Due to the scarcity of documents in certain intersepting categories, the matrixes are rather hard to read.
We analyse some basic stats of these two matrixes..
```
print 'PATENTS:'
print 'Rows:', patent_matrix.shape[0]
print 'Columns:', patent_matrix.shape[1]
print 'Mean: ', np.mean(patent_matrix)
print 'Standart Deviation', np.std(patent_matrix)
print 'Max: ', np.amax(patent_matrix)
print 'Min: ', np.amin(patent_matrix)
print 'PUBLICATIONS:'
print 'Rows:', publication_matrix.shape[0]
print 'Columns:', publication_matrix.shape[1]
print 'Mean: ', np.mean(publication_matrix)
print 'Standart Deviation', np.std(publication_matrix)
print 'Max: ', np.amax(publication_matrix)
print 'Min: ', np.amin(publication_matrix)
```
Clearly, there are more publications, for instants, the average cell in the matrix has a value of 1.96, which compared to patents (0.7), is more than the double.
Moreover, due to very high max values, (883, 2169) the matrixes are very irregular.
### 1.4. Analysing the differences <a class="anchor" id="one-four"></a>
Now, we create a matrix where every entry correponds to the following:
difference[i,j] = patents[i,j] - publications[i,j].
This difference matrix, will give us a feel for what combination of terms are more patented Vs. researched or vice-versa.
We subtract the matrixes:
```
differences = patent_matrix - publication_matrix
```
And we plot the `diferences` matrix.
```
plt.subplots(1,1,figsize=(9,9))
plt.subplot(111)
sns.heatmap(differences, square=True, xticklabels=False, yticklabels=False)
plt.title('The heatmap of differences')
plt.show()
```
Due to the high volume of entries, it can be hard to visualize what is happening.
Therefore, let's create a table with the combination of terms that are the most dicrepant.
```
# list where all the values and indexes of matrix are stored
values = []
indexes = []
no_duplicates = np.abs(np.triu(differences, 1))
# loop through the matrix
for row_n in range(differences.shape[0]):
for col_n in range(differences.shape[1]):
values.append(no_duplicates[row_n, col_n])
indexes.append((row_n, col_n))
Z = [indexes for _,indexes in sorted(zip(values,indexes))]
```
Let us create a dataframe of the most negative and positive relations for easy visualization.
```
term_Dataframe = pd.DataFrame(
{'First Term': [matrix_axis_names[e[0]] for e in Z],
'Second Term': [matrix_axis_names[e[1]] for e in Z],
'Patents': [patent_matrix[e[0], e[1]] for e in Z],
'Publications': [publication_matrix[e[0], e[1]] for e in Z],
'Difference': [no_duplicates[e[0], e[1]] for e in Z]
})
term_Dataframe = term_Dataframe[['First Term', 'Second Term', 'Patents', 'Publications', 'Difference']]
term_Dataframe = term_Dataframe.sort_values('Difference', ascending=False).head(n=15)
print 'Absolute:'
display(HTML(term_Dataframe.to_html(index=False)))
total_patents = 4585
total_publications = 5313
patent_matrix = patent_matrix / total_patents
publication_matrix = publication_matrix / total_publications
differences = patent_matrix - publication_matrix
# list where all the values and indexes of matrix are stored
values = []
indexes = []
no_duplicates = np.abs(np.triu(differences, 1))
# loop through the matrix
for row_n in range(differences.shape[0]):
for col_n in range(differences.shape[1]):
values.append(no_duplicates[row_n, col_n])
indexes.append((row_n, col_n))
Z = [indexes for _,indexes in sorted(zip(values,indexes))]
term_Dataframe = pd.DataFrame(
{'First Term': [matrix_axis_names[e[0]] for e in Z],
'Second Term': [matrix_axis_names[e[1]] for e in Z],
'Patents': [patent_matrix[e[0], e[1]] for e in Z],
'Publications': [publication_matrix[e[0], e[1]] for e in Z],
'Difference': [no_duplicates[e[0], e[1]] for e in Z]
})
term_Dataframe = term_Dataframe[['First Term', 'Second Term', 'Patents', 'Publications', 'Difference']]
term_Dataframe = term_Dataframe.sort_values('Difference', ascending=False).head(n=15)
print 'Absolute:'
display(HTML(term_Dataframe.to_html(index=False)))
```
## 2. Patents and Publication Matrixes: Chronological Evolution <a class="anchor" id="two"></a>
### 2.1. Absolute Evolution <a class="anchor" id="two-one"></a>
We start by creating a function that given a certain asset type and a timeline, returns the number of documents of that type for every year.
```
def getTotalDocuments(assetType, startYear, endYear):
assetQuery = """MATCH (a:Asset)
WHERE a.type="{}"
AND toInteger(a.year)>={} AND toInteger(a.year)<={}
AND NOT a.year = "Null"
RETURN a.year, count(a)
ORDER BY a.year""".format(assetType, startYear, endYear)
dataReturn = DataFrame(connection_to_graph.data(assetQuery)).as_matrix()
timeLine = np.arange(startYear, endYear + 1)
finalMatrix = np.transpose(np.vstack((timeLine, timeLine)))
for i in range(finalMatrix.shape[0]):
finalMatrix[i, 1] = 0
for j in range(dataReturn.shape[0]):
if finalMatrix[i, 0] == int(dataReturn[j, 0]):
finalMatrix[i, 1] = dataReturn[j, 1]
toReturn = {}
toReturn['Years'] = finalMatrix[:, 0]
toReturn['Quantity'] = finalMatrix[:, 1]
return toReturn
```
We now test the function
```
# define data
startYear = 1990
endYear = 2017
patentTimeline = getTotalDocuments('PATENT', startYear, endYear)
publicationTimeline = getTotalDocuments('PUBLICATION', startYear, endYear)
title = 'Evolution of asset quantity over time'
x_label = 'Years'
y_label = 'Number of Records'
# plot evolution
plt.subplots(1,1,figsize=(16, 5))
plt.subplot(111)
plt.plot(patentTimeline['Years'], patentTimeline['Quantity'], label='Patents')
plt.plot(publicationTimeline['Years'], publicationTimeline['Quantity'], label='Publications')
plt.legend()
plt.xticks(publicationTimeline['Years'])
plt.title('Evolution of asset quantity over time')
plt.xlabel('Years')
plt.ylabel('Number of Records')
plt.show()
```
There is a clear relationship between the volume of patents and publications. Moreover, the number of patents seems to be inferior on average to the number of publications.
However, there is a period where the number of patents is superior to the number of publications. Particularly the period between 2005 and 2011.
### 2.2. Comparing the chronological evolution of asset types <a class="anchor" id="two-two"></a>
A function that gives the chronological evolution of a certain asset.
```
def getDocuments(processType, processTerm, assetType, startYear, endYear):
assetQuery = """MATCH (a:Asset)-[:CONTAINS]->(fs:{})
WHERE fs.term = "{}" AND a.type="{}"
AND toInteger(a.year)>={} AND toInteger(a.year)<={}
AND NOT a.year = "Null"
RETURN a.year, count(a)
ORDER BY a.year""".format(processType, processTerm, assetType, startYear, endYear)
assetTotalQuery = """ MATCH (a:Asset)
WHERE a.type="{}"
AND toInteger(a.year)>={} AND toInteger(a.year)<={}
AND NOT a.year = "Null"
RETURN a.year, count(a)
ORDER BY a.year""".format(assetType, startYear, endYear)
dataReturn = DataFrame(connection_to_graph.data(assetQuery)).as_matrix()
dataNormReturn = DataFrame(connection_to_graph.data(assetTotalQuery)).as_matrix()
timeLine = np.arange(startYear, endYear + 1)
finalMatrix = np.transpose(np.vstack((timeLine, timeLine, timeLine)))
for i in range(finalMatrix.shape[0]):
finalMatrix[i, 1] = 0
finalMatrix[i, 2] = 0
for j in range(dataReturn.shape[0]):
if finalMatrix[i, 0] == int(dataReturn[j, 0]):
finalMatrix[i, 1] = dataReturn[j, 1]
for k in range(dataNormReturn.shape[0]):
if finalMatrix[i, 0] == int(dataNormReturn[k, 0]):
finalMatrix[i, 2] = dataNormReturn[k, 1]
toReturn = {}
toReturn['Years'] = finalMatrix[:, 0]
toReturn['Quantity'] = finalMatrix[:, 1]
toReturn['NormQuantity'] = [finalMatrix[e, 1] / float(finalMatrix[e, 2]) if finalMatrix[e, 2] != 0 else 0 for e in range(finalMatrix.shape[0])]
return toReturn
```
#### 2.2.1. Feedstocks <a class="anchor" id="two-two-one"></a>
We then test the function for severall different values of Feedstocks, in this case, the ones that are more proiminent in the database.
```
feedstockList = ['waste', 'algae', 'cellulose', 'sugar', 'paper', 'wood', 'residues', 'corn']
palette = plt.get_cmap('tab20')
plotCounter = 1
colorCounter = 0
plt.subplots(1,1,figsize=(30, 10))
for term in feedstockList:
termPat = getDocuments('Feedstock', term, 'PATENT', 1990, 2017)
termPub = getDocuments('Feedstock', term, 'PUBLICATION', 1990, 2017)
plt.subplot(2,4, plotCounter)
plt.plot(termPat['Years'], termPat['Quantity'], label = 'Patents', color = palette(colorCounter))
plt.plot(termPub['Years'], termPub['Quantity'], label = 'Publications', color = palette(colorCounter + 1))
plt.xlim(1990,2017)
plt.ylim(-10,310)
plt.title(term.upper())
plt.legend()
plotCounter += 1
colorCounter += 2
plt.show()
```
It appears that the number of publications is on average, far superior to the number of patents. However, the behaviour of these assets appears to follow the behaviour of the general dataset.(e.g. Small period where patents are more important.)
#### 2.2.2. Outputs <a class="anchor" id="two-two-two"></a>
What about outputs as process variables?
```
outputList = ["ethanol", "biodiesel", "biogas", "bioethanol", "bio-oil", "gasoline", "methanol", "butanol"]
palette = plt.get_cmap('tab20')
plotCounter = 1
colorCounter = 0
plt.subplots(1,1,figsize=(30, 10))
for term in outputList:
termPat = getDocuments('Output', term, 'PATENT', 1990, 2017)
termPub = getDocuments('Output', term, 'PUBLICATION', 1990, 2017)
plt.subplot(2,4, plotCounter)
plt.plot(termPat['Years'], termPat['Quantity'], label = 'Patents', color = palette(colorCounter))
plt.plot(termPub['Years'], termPub['Quantity'] , label = 'Publications', color = palette(colorCounter + 1))
plt.xlim(1990,2017)
plt.ylim(-10,310)
plt.title(term.upper())
plt.legend()
plotCounter += 1
colorCounter += 2
plt.show()
```
The behaviour does not appear to differ greatly from the feedstocks.
#### 2.2.3. Normalized Feedstocks <a class="anchor" id="two-two-one"></a>
```
feedstockList = ['waste', 'algae', 'cellulose', 'sugar', 'paper', 'wood', 'residues', 'corn']
palette = plt.get_cmap('tab20')
plotCounter = 1
colorCounter = 0
plt.subplots(1,1,figsize=(30, 10))
for term in feedstockList:
termPat = getDocuments('Feedstock', term, 'PATENT', 2000, 2017)
termPub = getDocuments('Feedstock', term, 'PUBLICATION', 2000, 2017)
plt.subplot(2,4, plotCounter)
plt.plot(termPat['Years'], termPat['NormQuantity'], label = 'Patents', color = palette(colorCounter))
plt.plot(termPub['Years'], termPub['NormQuantity'], label = 'Publications', color = palette(colorCounter + 1))
plt.grid()
plt.xlim(2000,2017)
plt.ylim(-0.05,0.5)
plt.title(term.upper())
plt.legend()
plotCounter += 1
colorCounter += 2
plt.show()
```
## 3. Comparing the patenting and publication rates of individual terms <a class="anchor" id="three"></a>
### 3.1. Function design <a class="anchor" id="three-one"></a>
We create a function that given a certain process type (e.g. Output, ProcTech or Feedstock), returns the total assets in terms of patents and publications.
```
def get_asset_distribution(processType):
"""
This function takes a process type, say Feedstocks and returns the total assets in terms of Patents
and Publications for that same asset.
"""
q = """ MATCH (a:Asset)-[:CONTAINS]->(fs:{})
WHERE a.type="PATENT" OR a.type="PUBLICATION"
RETURN fs.term,a.type, count(a)
ORDER BY fs.term""".format(processType)
q_total = """ MATCH (a:Asset)-[:CONTAINS]->(fs:{})
WHERE a.type="PATENT" OR a.type="PUBLICATION"
RETURN a.type, count(a)""".format(processType)
data = DataFrame(connection_to_graph.data(q)).as_matrix()
patent_total = DataFrame(connection_to_graph.data(q_total)).as_matrix()[0, 1]
publication_total = DataFrame(connection_to_graph.data(q_total)).as_matrix()[1, 1]
terms = list(set(data[:, 2]))
patents = []
publications = []
for term in terms:
publications.append(0)
patents.append(0)
for data_row in data:
if data_row[2] == term and data_row[0] == "PUBLICATION":
publications = publications[:-1] + [data_row[1]]
if data_row[2] == term and data_row[0] == "PATENT":
patents = patents[:-1] + [data_row[1]]
distribution = {}
distribution['terms'] = terms
distribution['publications'] = publications
distribution['patents'] = patents
distribution['publications_norm'] = [e / float(publication_total) for e in distribution['publications']]
distribution['patents_norm'] = [e / float(patent_total) for e in distribution['patents']]
return distribution
```
### 3.2. Feedstock <a class="anchor" id="three-two"></a>
We start by analysing Feedstock terms.
```
processType = 'Feedstock'
distribution = get_asset_distribution(processType)
def get_closest_in_line(i):
x0 = distribution['publications_norm'][i]
y0 = distribution['patents_norm'][i]
m = 1
k = 0
x = (x0 + m * y0 - m * k)/((m ** 2) + 1)
y = m * ((x0 + m * y0 - m*k)/((m**2) + 1)) + k
return [x, y]
fig, ax1 = plt.subplots(figsize=(9,9))
plt.scatter(np.asarray(distribution['publications_norm']), np.asarray(distribution['patents_norm']), marker=".", color='purple')
plt.plot([-0.1, 0.11], [-0.1, 0.11], ls='--', color='black')
for index, term in enumerate(distribution['terms']):
other_ = get_closest_in_line(index)
if distribution['publications_norm'][index] < distribution['patents_norm'][index]:
color = 'red'
else:
color = 'blue'
plt.plot([distribution['publications_norm'][index], other_[0]], [distribution['patents_norm'][index], other_[1]], color=color, lw=0.7)
plt.title('{} asset distribution.'.format(processType))
plt.xlabel('{} Publications'.format(processType))
plt.ylabel('{} Patents'.format(processType))
plt.xlim([-0.01, 0.11])
plt.ylim([-0.01, 0.11])
plt.show()
```
There appears to be a high positive correlation, the more patented a term is, the more researched it is.
Every term has been normalized.
**Outlier Detection**
We create a function that returns the outliers of a given list.
```
def distance_to_mean(i):
x1 = distribution['publications_norm'][i]
y1 = distribution['patents_norm'][i]
x2 = get_closest_in_line(i)[0]
y2 = get_closest_in_line(i)[1]
distance = math.sqrt(((x1 - x2)**2) + ((y1 - y2)**2))
return distance
def winner(i):
x1 = distribution['publications_norm'][i]
y1 = distribution['patents_norm'][i]
if x1 > y1:
return 'Publications'
else:
return 'Patents'
# create dataframe
term_Dataframe = pd.DataFrame(
{'Name': distribution['terms'],
'Patent Percentage': distribution['patents_norm'],
'Publications Percentage': distribution['publications_norm'],
'Distance to Mean': [distance_to_mean(i) for i in range(len(distribution['terms']))],
'Bias': [winner(i) for i in range(len(distribution['terms']))]
})
# prepare dataframe
term_Dataframe = term_Dataframe[['Name', 'Patent Percentage','Publications Percentage', 'Distance to Mean', 'Bias']]
term_Dataframe = term_Dataframe.sort_values('Distance to Mean', ascending=False).head(n=10)
display(HTML(term_Dataframe.to_html(index=False)))
counter=collections.Counter([winner(i) for i in range(len(distribution['terms']))])
print 'In {} terms, {} appear more in patents and {} appear more in publications.'.format(len(distribution['terms']), counter['Patents'], counter['Publications'])
```
We then plot the above lists's boxplot.
### 3.3. Processing Technologies <a class="anchor" id="three-three"></a>
```
processType = 'ProcessingTech'
distribution = get_asset_distribution(processType)
fig, ax1 = plt.subplots(figsize=(9,9))
plt.scatter(np.asarray(distribution['publications_norm']), np.asarray(distribution['patents_norm']), marker=".", color='green')
plt.plot([-0.1, 0.15], [-0.1, 0.15], ls='--', color='black')
for index, term in enumerate(distribution['terms']):
other_ = get_closest_in_line(index)
if distribution['publications_norm'][index] < distribution['patents_norm'][index]:
color = 'red'
else:
color = 'blue'
plt.plot([distribution['publications_norm'][index], other_[0]], [distribution['patents_norm'][index], other_[1]], color=color, lw=0.7)
plt.title('{} asset distribution.'.format(processType))
plt.xlabel('{} Publications'.format(processType))
plt.ylabel('{} Patents'.format(processType))
plt.xlim([-0.01, 0.15])
plt.ylim([-0.01, 0.15])
plt.show()
```
On average, processing technologies are more researched than patented. Which makes sense because they concern technologies and not processes.
**Outliers**
```
# create dataframe
term_Dataframe = pd.DataFrame(
{'Name': distribution['terms'],
'Patent Percentage': distribution['patents_norm'],
'Publications Percentage': distribution['publications_norm'],
'Distance to Mean': [distance_to_mean(i) for i in range(len(distribution['terms']))],
'Bias': [winner(i) for i in range(len(distribution['terms']))]
})
# prepare dataframe
term_Dataframe = term_Dataframe[['Name', 'Patent Percentage','Publications Percentage', 'Distance to Mean', 'Bias']]
term_Dataframe = term_Dataframe.sort_values('Distance to Mean', ascending=False).head(n=10)
display(HTML(term_Dataframe.to_html(index=False)))
counter=collections.Counter([winner(i) for i in range(len(distribution['terms']))])
print 'In {} terms, {} appear more in patents and {} appear more in publications.'.format(len(distribution['terms']), counter['Patents'], counter['Publications'])
```
### 3.4. Output <a class="anchor" id="three-four"></a>
We take a look at the outputs.
```
processType = 'Output'
distribution = get_asset_distribution(processType)
fig, ax1 = plt.subplots(figsize=(9,9))
plt.scatter(np.asarray(distribution['publications_norm']), np.asarray(distribution['patents_norm']), marker=".", color='brown')
plt.plot([-0.1, 0.32], [-0.1, 0.32], ls='--', color='black')
for index, term in enumerate(distribution['terms']):
other_ = get_closest_in_line(index)
if distribution['publications_norm'][index] < distribution['patents_norm'][index]:
color = 'red'
label = 'Patent Bias'
else:
color = 'blue'
label = 'Publication Bias'
plt.plot([distribution['publications_norm'][index], other_[0]], [distribution['patents_norm'][index], other_[1]], color=color, lw=0.7, label=label)
plt.title('{} asset distribution.'.format(processType))
plt.xlabel('{} Publications'.format(processType))
plt.ylabel('{} Patents'.format(processType))
plt.xlim([-0.01, 0.31])
plt.ylim([-0.01, 0.31])
plt.show()
```
Outputs appear to have the same behaviour as processing technologies, a higher tendency to be published rather than patented.
**Outliers**
```
# create dataframe
term_Dataframe = pd.DataFrame(
{'Name': distribution['terms'],
'Patent Percentage': distribution['patents_norm'],
'Publications Percentage': distribution['publications_norm'],
'Distance to Mean': [distance_to_mean(i) for i in range(len(distribution['terms']))],
'Bias': [winner(i) for i in range(len(distribution['terms']))]
})
# prepare dataframe
term_Dataframe = term_Dataframe[['Name', 'Patent Percentage','Publications Percentage', 'Distance to Mean', 'Bias']]
term_Dataframe = term_Dataframe.sort_values('Distance to Mean', ascending=False).head(n=10)
display(HTML(term_Dataframe.to_html(index=False)))
counter=collections.Counter([winner(i) for i in range(len(distribution['terms']))])
print 'In {} terms, {} appear more in patents and {} appear more in publications.'.format(len(distribution['terms']), counter['Patents'], counter['Publications'])
```
### 3.5. Comparison <a class="anchor" id="three-five"></a>
```
processTypes = ['Feedstock', 'Output', 'ProcessingTech']
colors = ['purple', 'green', 'red']
fig, ax1 = plt.subplots(figsize=(9,9))
for master_idx, process in enumerate(processTypes):
distribution = get_asset_distribution(process)
plt.scatter(distribution['publications_norm'], distribution['patents_norm'], color=colors[master_idx], lw=0.7, marker='.', label=process)
plt.plot([-0.1, 0.32], [-0.1, 0.32], ls='--', color='black')
plt.legend()
plt.xlabel('Publications')
plt.ylabel('Patents')
plt.show()
```
| github_jupyter |
```
import holey.rasterize
import numpy as np
uk_xy = np.asarray([
[-0.101, 0.872], [-0.080, 0.883], [-0.069, 0.888], [-0.054, 0.890],
[-0.045, 0.897], [-0.057, 0.895], [-0.073, 0.900], [-0.087, 0.898],
[-0.090, 0.904], [-0.069, 0.907], [-0.069, 0.921], [-0.080, 0.919],
[-0.073, 0.928], [-0.052, 0.930], [-0.048, 0.942], [-0.062, 0.949],
[-0.054, 0.958], [-0.069, 0.954], [-0.087, 0.952], [-0.087, 0.959],
[-0.080, 0.966], [-0.085, 0.973], [-0.087, 0.965], [-0.097, 0.965],
[-0.097, 0.975], [-0.092, 0.984], [-0.101, 0.980], [-0.108, 0.980],
[-0.104, 0.987], [-0.102, 0.993], [-0.115, 1.001], [-0.099, 0.996],
[-0.101, 1.007], [-0.090, 1.010], [-0.087, 1.021], [-0.069, 1.021],
[-0.052, 1.022], [-0.052, 1.017], [-0.069, 1.010], [-0.064, 1.005],
[-0.048, 1.005], [-0.031, 1.005], [-0.031, 0.996], [-0.040, 0.987],
[-0.045, 0.980], [-0.052, 0.975], [-0.040, 0.973], [-0.026, 0.968],
[-0.020, 0.954], [-0.006, 0.947], [ 0.003, 0.935], [ 0.006, 0.926],
[ 0.005, 0.921], [ 0.022, 0.923], [ 0.033, 0.912], [ 0.029, 0.905],
[ 0.017, 0.900], [ 0.012, 0.895], [ 0.027, 0.893], [ 0.019, 0.886],
[ 0.001, 0.883], [-0.012, 0.884], [-0.029, 0.883], [-0.038, 0.879],
[-0.057, 0.881], [-0.062, 0.876], [-0.078, 0.876], [-0.087, 0.872],
[-0.030, 0.907], [-0.007, 0.905], [-0.057, 0.916], [-0.025, 0.933],
[-0.077, 0.990], [-0.059, 0.993]])
uk_xy = np.degrees(uk_xy)
uk_triangles = np.asarray([
[67, 66, 1], [65, 2, 66], [ 1, 66, 2], [64, 2, 65], [63, 3, 64],
[60, 59, 57], [ 2, 64, 3], [ 3, 63, 4], [ 0, 67, 1], [62, 4, 63],
[57, 59, 56], [59, 58, 56], [61, 60, 69], [57, 69, 60], [ 4, 62, 68],
[ 6, 5, 9], [61, 68, 62], [69, 68, 61], [ 9, 5, 70], [ 6, 8, 7],
[ 4, 70, 5], [ 8, 6, 9], [56, 69, 57], [69, 56, 52], [70, 10, 9],
[54, 53, 55], [56, 55, 53], [68, 70, 4], [52, 56, 53], [11, 10, 12],
[69, 71, 68], [68, 13, 70], [10, 70, 13], [51, 50, 52], [13, 68, 71],
[52, 71, 69], [12, 10, 13], [71, 52, 50], [71, 14, 13], [50, 49, 71],
[49, 48, 71], [14, 16, 15], [14, 71, 48], [17, 19, 18], [17, 20, 19],
[48, 16, 14], [48, 47, 16], [47, 46, 16], [16, 46, 45], [23, 22, 24],
[21, 24, 22], [17, 16, 45], [20, 17, 45], [21, 25, 24], [27, 26, 28],
[20, 72, 21], [25, 21, 72], [45, 72, 20], [25, 28, 26], [44, 73, 45],
[72, 45, 73], [28, 25, 29], [29, 25, 31], [43, 73, 44], [73, 43, 40],
[72, 73, 39], [72, 31, 25], [42, 40, 43], [31, 30, 29], [39, 73, 40],
[42, 41, 40], [72, 33, 31], [32, 31, 33], [39, 38, 72], [33, 72, 38],
[33, 38, 34], [37, 35, 38], [34, 38, 35], [35, 37, 36]])
class Tri:
def __init__(self, points, simplices):
self.simplices = simplices
self.points = points
tri = Tri(uk_xy, uk_triangles)
%matplotlib inline
import matplotlib.pyplot as plt
extent, target = holey.rasterize.pixels(tri, (300, 400))
plt.imshow(target, extent=extent)
plt.title('Indices of triangles')
plt.colorbar();
```
| github_jupyter |
```
from math import exp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
# sklearn.datasets
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
# 100 samples
data = np.array(df.iloc[:100, [0,1,-1]])
# 2 features
return data[:,:2], data[:,-1]
# split the dataset into the Train set and Test set
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
#
plt.figure(figsize=(6,4))
plt.tick_params(direction='in')
plt.scatter(X[:,0],X[:,1],c=y)
plt.savefig('./fig/fig1',dpi=300)
class LogisticReressionClassifier:
# define iteration & learning rate
def __init__(self,max_iter = 200,learning_rate = 0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
# Sigmoid function
def sigmoid(self,x):
return 1/(1 + np.exp(-x))
def data_matrix(self,X):
data_mat = []
for d in X:
# add bias
data_mat.append([1.0,*d])
return data_mat
# train parameter on the train set
def fit(self,X,y):
data_mat = self.data_matrix(X)
# initialization weights
self.weights = np.zeros((len(data_mat[0]),1),dtype = np.float32)
for iter_ in range(self.max_iter):
for i in range(len(X)):
res = self.sigmoid(np.dot(data_mat[i],self.weights))
# loss
error = y[i] - res
# update weights
self.weights += self.learning_rate * error * np.transpose([data_mat[i]])
print('LogisticRegression Model(learning_rate={},iter={})'.format(\
self.learning_rate,self.max_iter))
# evaluate on the test set
def score(self,X_test,y_test):
right = 0
X_test = self.data_matrix(X_test)
for x,y in zip(X_test,y_test):
res = np.dot(x,self.weights)
if (res > 0 and y == 1) or (res < 0 and y == 0):
right += 1
return right/len(X_test)
clf = LogisticReressionClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf.weights
```
$$
\begin{equation}
\begin{aligned}
decision\space boundary:\\
h_\theta & =sigmoid(\theta^{T}x) \\
& = sigmoid(\theta_0+\theta_1x_1+\theta_2x_2)\\
& = sigmoid(z)\\
& = \frac{1}{1+e^{-\theta^{T}x}}\\
& = \frac{1}{1+e^{-z}}\\
\\
z & =\theta^{T}x\\
\\
h_\theta & = \frac{1}{2} = \frac{1}{1+e^{-z}}\\
1+e^{-z} & = 2\\
e^{-z} & = 1\\
z & = 0 \\
\\
z & = \theta^{T}x\\
z & = \theta_0+\theta_1x_1+\theta_2x_2\\
\theta_0+\theta_1x_1+\theta_2x_2 & = 0\\
x_2 &= \frac{\theta_0+\theta_1x_1}{\theta_2}
\end{aligned}
\end{equation}
$$
```
# decision boundary
x_ponits = np.arange(4, 8)
y_ = -(clf.weights[1]*x_ponits + clf.weights[0])/clf.weights[2]
plt.figure(figsize=(6,4))
plt.plot(x_ponits, y_,c='r')
plt.tick_params(direction='in')
plt.scatter(X[:,0],X[:,1],c=y)
plt.savefig('./fig/fig2',dpi=300)
```
| github_jupyter |
```
import string
import re
from os import listdir
from collections import Counter
from nltk.corpus import stopwords
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
def clean_doc(doc):
tokens = doc.split()
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
tokens = [re_punc.sub('', w) for w in tokens]
tokens = [word for word in tokens if word.isalpha()]
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
tokens = [word for word in tokens if len(word) > 1]
return tokens
def add_doc_to_vocab(filename, vocab):
doc = load_doc(filename)
tokens = clean_doc(doc)
vocab.update(tokens)
# vocab = Counter()
# process_docs('txt_sentoken/neg', vocab)
# process_docs('txt_sentoken/pos', vocab)
# print(len(vocab))
def save_list(lines, filename):
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
save_list(tokens, 'vocab.txt')
def doc_to_line(filename, vocab):
doc = load_doc(filename)
tokens = clean_doc(doc)
tokens = [w for w in tokens if w in vocab]
return ''.join(tokens)
def process_docs(directory, vocab):
lines = list()
for filename in listdir(directory):
if not filename.endswith(".txt"):
next
path = directory + '/' + filename
line = doc_to_line(path, vocab)
lines.append(line)
return lines
vocab_filename = 'vocab.txt'
vocab = load_doc(vocab_filename)
vocab = vocab.split()
vocab = set(vocab)
negative_lines = process_docs('txt_sentoken/neg', vocab)
save_list(negative_lines, 'negative.txt')
positive_lines = process_docs('txt_sentoken/pos', vocab)
save_list(positive_lines, 'positive.txt')
def load_clean_dataset(vocab):
neg = negative_lines = process_docs('txt_sentoken/neg', vocab)
pos = positive_lines = process_docs('txt_sentoken/pos', vocab)
docs = neg + pos
labels = [0 for _ in range(len(neg))] + [1 for _ in range(len(pos))]
return docs, labels
vocab_filename = 'vocab.txt'
vocab = load_doc(vocab_filename)
vocab = vocab.split()
vocab = set(vocab)
docs, labels = load_clean_dataset(vocab)
print(len(docs), len(labels))
from keras.preprocessing.text import Tokenizer
def create_tokenizer(lines):
tokenzier = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
def process_docs(directory, vocab, is_train):
lines = list()
for filename in listdir(directory):
if is_train and filename.startswith('cv9'):
continue
if not is_train and not filename.startswith('cv9'):
continue
path = directory + '/' + filename
line = doc_to_line(path, vocab)
lines.append(line)
return lines
def load_clean_dataset(vocab, is_train):
neg = negative_lines = process_docs('txt_sentoken/neg', vocab, is_train)
pos = positive_lines = process_docs('txt_sentoken/pos', vocab, is_train)
docs = neg + pos
labels = [0 for _ in range(len(neg))] + [1 for _ in range(len(pos))]
return docs, labels
vocab_filename = 'vocab.txt'
vocab = load_doc(vocab_filename)
vocab = vocab.split()
vocab = set(vocab)
train_docs, ytrain = load_clean_dataset(vocab, True)
test_docs, ytest = load_clean_dataset(vocab, False)
tokenizer = create_tokenizer(train_docs)
# docs, labels = load_clean_dataset(vocab)
# print(len(docs), len(labels))
Xtrain = tokenizer.texts_to_matrix(train_docs, mode = 'freq')
Xtest = tokenizer.texts_to_matrix(test_docs, mode = 'freq')
```
| github_jupyter |
#### Comment Installer notre Data
Premier étape : télécharger le .rar
avoir les différent fichier comme : ( une liste... )
*Train:
** files
** Image
** Label
** Model
http://www.robots.ox.ac.uk/~vgg/data/vgg_face/
fonction pour récuper 1 image et 1 csv par rapport a 1 url
```
import requests
def recup(url, df, x, Folder_Images, Folder_Labels, new_name):
r = requests.get(url, allow_redirects=True, timeout=1)
image = os.path.join(Folder_Images, str(new_name)+".jpg")
csv = os.path.join(Folder_Labels, str(new_name)+".csv")
df2 = pd.DataFrame({'':[str(df[2][x])+" "+str(df[3][x])+" "+str(df[4][x])+" "+str(df[5][x])]})
with open(image, 'wb') as img :
img.write(r.content)
df2.to_csv(csv,index=False)
```
on test la fonction
```
import os
import pandas as pd
Folder_Images = os.path.join(os.path.dirname(os.path.abspath('Image')),'Image')
Folder_Labels = os.path.join(os.path.dirname(os.path.abspath('Label')),'Label')
path = "files"
i = os.listdir(path)[0]
df = pd.read_csv(os.path.join(path,i), sep=" ",header=None)
url = df[1][0]
recup(url, df, 0, Folder_Images, Folder_Labels, 0)
```
on parcour tout la list des url
```
path = "Image"
counter = 0
Folder_Images = os.path.join(os.path.dirname(os.path.abspath('Image')),'Image')
Folder_Labels = os.path.join(os.path.dirname(os.path.abspath('Label')),'Label')
path = "files"
for e,i in enumerate(os.listdir(path)):
try:
df = pd.read_csv(os.path.join(path,i), sep=" ",header=None)
for x in range(len(df)):
url = df[1][x]
if url[-4:] == ".jpg":
print(url)
recup(url, df, x, Folder_Images, Folder_Labels, counter)
counter += 1
except:
print("Error")
continue
```
On nettoye les données recus
```
#Try with 2 image not 100 xD, it for Clear my databass
for e,i in enumerate(os.listdir(path)):
print(i)
try:
img = cv2.imread(os.path.join(path,i))
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
faces = detector.detectMultiScale(gray, 1.3, 5);
if len(faces) == 0:
print("Remove")
os.remove(os.path.join(path,i))
os.remove(os.path.join(label,i[:-4]+".csv"))
else:
print("pass")
except:
print("Remove")
os.remove(os.path.join(path,i))
os.remove(os.path.join(label,i[:-4]+".csv"))
```
| github_jupyter |
```
#export
from fastai2.torch_basics import *
from fastai2.data.all import *
from fastai2.text.core import *
#hide
from nbdev.showdoc import *
#default_exp text.data
#default_cls_lvl 3
```
# Text data
> Functions and transforms to help gather text data in a `Datasets`
## Backwards
Reversing the text can provide higher accuracy with an ensemble with a forward model. All that is needed is a `type_tfm` that will reverse the text as it is brought in:
```
#export
def reverse_text(x): return x.flip(0)
t = tensor([0,1,2])
r = reverse_text(t)
test_eq(r, tensor([2,1,0]))
```
## Numericalizing
Numericalization is the step in which we convert tokens to integers. The first step is to build a correspondence token to index that is called a vocab.
```
#export
def make_vocab(count, min_freq=3, max_vocab=60000, special_toks=None):
"Create a vocab of `max_vocab` size from `Counter` `count` with items present more than `min_freq`"
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
special_toks = ifnone(special_toks, defaults.text_spec_tok)
for o in reversed(special_toks): #Make sure all special tokens are in the vocab
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
```
If there are more than `max_vocab` tokens, the ones kept are the most frequent.
> Note: For performance when using mixed precision, the vocabulary is always made of size a multiple of 8, potentially by adding `xxfake` tokens.
```
count = Counter(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'd'])
test_eq(set([x for x in make_vocab(count) if not x.startswith('xxfake')]),
set(defaults.text_spec_tok + 'a'.split()))
test_eq(len(make_vocab(count))%8, 0)
test_eq(set([x for x in make_vocab(count, min_freq=1) if not x.startswith('xxfake')]),
set(defaults.text_spec_tok + 'a b c d'.split()))
test_eq(set([x for x in make_vocab(count,max_vocab=12, min_freq=1) if not x.startswith('xxfake')]),
set(defaults.text_spec_tok + 'a b c'.split()))
#export
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
TensorText.__doc__ = "Semantic type for a tensor representing text"
LMTensorText.__doc__ = "Semantic type for a tensor representing text in language modeling"
#export
class Numericalize(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000, special_toks=None, pad_tok=None):
store_attr(self, 'vocab,min_freq,max_vocab,special_toks,pad_tok')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if getattr(dsets, 'counter', None) is not None else Counter(p for o in dsets for p in o)
if self.special_toks is None and hasattr(dsets, 'special_toks'):
self.special_toks = dsets.special_toks
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab, special_toks=self.special_toks)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o if self.vocab[o_] != self.pad_tok)
```
If no `vocab` is passed, one is created at setup from the data, using `make_vocab` with `min_freq` and `max_vocab`.
```
start = 'This is an example of text'
num = Numericalize(min_freq=1)
num.setup(L(start.split(), 'this is another text'.split()))
test_eq(set([x for x in num.vocab if not x.startswith('xxfake')]),
set(defaults.text_spec_tok + 'This is an example of text this another'.split()))
test_eq(len(num.vocab)%8, 0)
t = num(start.split())
test_eq(t, tensor([11, 9, 12, 13, 14, 10]))
test_eq(num.decode(t), start.split())
num = Numericalize(min_freq=2)
num.setup(L('This is an example of text'.split(), 'this is another text'.split()))
test_eq(set([x for x in num.vocab if not x.startswith('xxfake')]),
set(defaults.text_spec_tok + 'is text'.split()))
test_eq(len(num.vocab)%8, 0)
t = num(start.split())
test_eq(t, tensor([0, 9, 0, 0, 0, 10]))
test_eq(num.decode(t), f'{UNK} is {UNK} {UNK} {UNK} text'.split())
#hide
df = pd.DataFrame({'texts': ['This is an example of text', 'this is another text']})
tl = TfmdLists(df, [attrgetter('text'), Tokenizer.from_df('texts'), Numericalize(min_freq=2)])
test_eq(tl, [tensor([2, 8, 9, 10, 0, 0, 0, 11]), tensor([2, 9, 10, 0, 11])])
```
## LM_DataLoader -
```
#export
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
#export
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
#export
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
#export
#TODO: add backward
@log_args(but_as=TfmdDL.__init__)
@delegates()
class LMDataLoader(TfmdDL):
"A `DataLoader` suitable for language modeling"
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
# The "-1" is to allow for final label, we throw away the end that's less than bs
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=None, **kwargs):
lens = self.lens.coll if dataset is None else None
seq_len = self.seq_len if seq_len is None else seq_len
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
show_doc(LMDataLoader, title_level=2)
```
`dataset` should be a collection of numericalized texts for this to work. `lens` can be passed for optimizing the creation, otherwise, the `LMDataLoader` will do a full pass of the `dataset` to compute them. `cache` is used to avoid reloading items unnecessarily.
The `LMDataLoader` will concatenate all texts (maybe `shuffle`d) in one big stream, split it in `bs` contiguous sentences, then go through those `seq_len` at a time.
```
#hide
bs,sl = 4,3
ints = L([0,1,2,3,4],[5,6,7,8,9,10],[11,12,13,14,15,16,17,18],[19,20],[21,22]).map(tensor)
dl = LMDataLoader(ints, bs=bs, seq_len=sl)
list(dl)
test_eq(list(dl),
[[tensor([[0, 1, 2], [5, 6, 7], [10, 11, 12], [15, 16, 17]]),
tensor([[1, 2, 3], [6, 7, 8], [11, 12, 13], [16, 17, 18]])],
[tensor([[3, 4], [8, 9], [13, 14], [18, 19]]),
tensor([[4, 5], [9, 10], [14, 15], [19, 20]])]])
bs,sl = 4,3
ints = L([0,1,2,3,4],[5,6,7,8,9,10],[11,12,13,14,15,16,17,18],[19,20],[21,22,23],[24]).map(tensor)
dl = LMDataLoader(ints, bs=bs, seq_len=sl)
test_eq(list(dl),
[[tensor([[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]]),
tensor([[1, 2, 3], [7, 8, 9], [13, 14, 15], [19, 20, 21]])],
[tensor([[3, 4, 5], [ 9, 10, 11], [15, 16, 17], [21, 22, 23]]),
tensor([[4, 5, 6], [10, 11, 12], [16, 17, 18], [22, 23, 24]])]])
#hide
#Check lens work
dl = LMDataLoader(ints, lens=ints.map(len), bs=bs, seq_len=sl)
test_eq(list(dl),
[[tensor([[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]]),
tensor([[1, 2, 3], [7, 8, 9], [13, 14, 15], [19, 20, 21]])],
[tensor([[3, 4, 5], [ 9, 10, 11], [15, 16, 17], [21, 22, 23]]),
tensor([[4, 5, 6], [10, 11, 12], [16, 17, 18], [22, 23, 24]])]])
dl = LMDataLoader(ints, bs=bs, seq_len=sl, shuffle=True)
for x,y in dl: test_eq(x[:,1:], y[:,:-1])
((x0,y0), (x1,y1)) = tuple(dl)
#Second batch begins where first batch ended
test_eq(y0[:,-1], x1[:,0])
test_eq(type(x0), LMTensorText)
#hide
#test new works
dl = LMDataLoader(ints, bs=bs, seq_len=sl, shuffle=True)
dl1 = dl.new()
test_eq(dl1.seq_len, sl)
dl2 = dl.new(seq_len=2)
test_eq(dl2.seq_len, 2)
```
### Showing -
```
#export
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
#export
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
```
## Classification
For classification, we deal with the fact that texts don't all have the same length by using padding.
```
#export
def pad_input(samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect `samples` and adds padding"
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
```
`pad_idx` is used for the padding, and the padding is applied to the `pad_fields` of the samples. The padding is applied at the beginning if `pad_first` is `True`, and if `backwards` is added, the tensors are flipped.
```
test_eq(pad_input([(tensor([1,2,3]),1), (tensor([4,5]), 2), (tensor([6]), 3)], pad_idx=0),
[(tensor([1,2,3]),1), (tensor([4,5,0]),2), (tensor([6,0,0]), 3)])
test_eq(pad_input([(tensor([1,2,3]), (tensor([6]))), (tensor([4,5]), tensor([4,5])), (tensor([6]), (tensor([1,2,3])))], pad_idx=0, pad_fields=1),
[(tensor([1,2,3]),(tensor([6,0,0]))), (tensor([4,5]),tensor([4,5,0])), ((tensor([6]),tensor([1, 2, 3])))])
test_eq(pad_input([(tensor([1,2,3]),1), (tensor([4,5]), 2), (tensor([6]), 3)], pad_idx=0, pad_first=True),
[(tensor([1,2,3]),1), (tensor([0,4,5]),2), (tensor([0,0,6]), 3)])
test_eq(pad_input([(tensor([1,2,3]),1), (tensor([4,5]), 2), (tensor([6]), 3)], pad_idx=0, backwards=True),
[(tensor([3,2,1]),1), (tensor([5,4,0]),2), (tensor([6,0,0]), 3)])
x = test_eq(pad_input([(tensor([1,2,3]),1), (tensor([4,5]), 2), (tensor([6]), 3)], pad_idx=0, backwards=True),
[(tensor([3,2,1]),1), (tensor([5,4,0]),2), (tensor([6,0,0]), 3)])
#hide
#Check retain type
x = [(TensorText([1,2,3]),1), (TensorText([4,5]), 2), (TensorText([6]), 3)]
y = pad_input(x, pad_idx=0)
for s in y: test_eq(type(s[0]), TensorText)
#export
def pad_input_chunk(samples, pad_idx=1, pad_first=True, seq_len=72):
"Pad `samples` by adding padding by chunks of size `seq_len`"
max_len = max([len(s[0]) for s in samples])
def _f(x):
l = max_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_res, pad_chunk])
return retain_type(x1, x)
return [(_f(s[0]), *s[1:]) for s in samples]
```
The difference with the base `pad_input` is that most of the padding is applied first (if `pad_first=True`) or at the end (if `pad_first=False`) but only by a round multiple of `seq_len`. The rest of the padding is applied to the end (or the beginning if `pad_first=False`). This is to work with `SequenceEncoder` with recurrent models.
```
test_eq(pad_input_chunk([(tensor([1,2,3,4,5,6]),1), (tensor([1,2,3]), 2), (tensor([1,2]), 3)], pad_idx=0, seq_len=2),
[(tensor([1,2,3,4,5,6]),1), (tensor([0,0,1,2,3,0]),2), (tensor([0,0,0,0,1,2]), 3)])
test_eq(pad_input_chunk([(tensor([1,2,3,4,5,6]),), (tensor([1,2,3]),), (tensor([1,2]),)], pad_idx=0, seq_len=2),
[(tensor([1,2,3,4,5,6]),), (tensor([0,0,1,2,3,0]),), (tensor([0,0,0,0,1,2]),)])
test_eq(pad_input_chunk([(tensor([1,2,3,4,5,6]),), (tensor([1,2,3]),), (tensor([1,2]),)], pad_idx=0, seq_len=2, pad_first=False),
[(tensor([1,2,3,4,5,6]),), (tensor([1,2,3,0,0,0]),), (tensor([1,2,0,0,0,0]),)])
#export
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
"A `DataLoader` that goes throught the item in the order given by `sort_func`"
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
if 'val_res' in kwargs and kwargs['val_res'] is not None: res = kwargs['val_res']
else: res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
```
`res` is the result of `sort_func` applied on all elements of the `dataset`. You can pass it if available to make the init much faster by avoiding an initial pass over the whole dataset. For example if sorting by text length (as in the default `sort_func`, called `_default_sort`) you should pass a list with the length of each element in `dataset` to `res` to take advantage of this speed-up.
To get the same init speed-up for the validation set, `val_res` (a list of text lengths for your validation set) can be passed to the `kwargs` argument of `SortedDL`. Below is an example to reduce the init time by passing a list of text lengths for both the training set and the validation set:
```
# Pass the training dataset text lengths to SortedDL
srtd_dl=partial(SortedDL, res = train_text_lens)
# Pass the validation dataset text lengths
dl_kwargs = [{},{'val_res': val_text_lens}]
# init our Datasets
dsets = Datasets(...)
# init our Dataloaders
dls = dsets.dataloaders(...,dl_type = srtd_dl, dl_kwargs = dl_kwargs)
```
If `shuffle` is `True`, this will shuffle a bit the results of the sort to have items of roughly the same size in batches, but not in the exact sorted order.
```
ds = [(tensor([1,2]),1), (tensor([3,4,5,6]),2), (tensor([7]),3), (tensor([8,9,10]),4)]
dl = SortedDL(ds, bs=2, before_batch=partial(pad_input, pad_idx=0))
test_eq(list(dl), [(tensor([[ 3, 4, 5, 6], [ 8, 9, 10, 0]]), tensor([2, 4])),
(tensor([[1, 2], [7, 0]]), tensor([1, 3]))])
ds = [(tensor(range(random.randint(1,10))),i) for i in range(101)]
dl = SortedDL(ds, bs=2, create_batch=partial(pad_input, pad_idx=-1), shuffle=True, num_workers=0)
batches = list(dl)
max_len = len(batches[0][0])
for b in batches:
assert(len(b[0])) <= max_len
test_ne(b[0][-1], -1)
```
## TransformBlock for text
To use the data block API, you will need this build block for texts.
```
#export
class TextBlock(TransformBlock):
"A `TransformBlock` for texts"
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, backwards=False, **kwargs):
type_tfms = [tok_tfm, Numericalize(vocab, **kwargs)]
if backwards: type_tfms += [reverse_text]
return super().__init__(type_tfms=type_tfms,
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={'seq_len': seq_len} if is_lm else {'before_batch': partial(pad_input_chunk, seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a dataframe using `text_cols`"
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a `path`"
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
```
For efficient tokenization, you probably want to use one of the factory methods. Otherwise, you can pass your custom `tok_tfm` that will deal with tokenization (if your texts are already tokenized, you can pass `noop`), a `vocab`, or leave it to be inferred on the texts using `min_freq` and `max_vocab`.
`is_lm` indicates if we want to use texts for language modeling or another task, `seq_len` is only necessary to tune if `is_lm=False`, and is passed along to `pad_input_chunk`.
```
show_doc(TextBlock.from_df)
```
Here is an example using a sample of IMDB stored as a CSV file:
```
path = untar_data(URLs.IMDB_SAMPLE)
df = pd.read_csv(path/'texts.csv')
imdb_clas = DataBlock(
blocks=(TextBlock.from_df('text', seq_len=72), CategoryBlock),
get_x=ColReader('text'), get_y=ColReader('label'), splitter=ColSplitter())
dls = imdb_clas.dataloaders(df, bs=64)
dls.show_batch(max_n=2)
```
`vocab`, `is_lm`, `seq_len`, `min_freq` and `max_vocab` are passed to the main init, the other argument to `Tokenizer.from_df`.
```
show_doc(TextBlock.from_folder)
```
`vocab`, `is_lm`, `seq_len`, `min_freq` and `max_vocab` are passed to the main init, the other argument to `Tokenizer.from_folder`.
## TextDataLoaders -
```
#export
class TextDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for NLP problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from `df` in `path` with `valid_pct`"
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader("text"),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `csv` file in `path/csv_fname`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv)
show_doc(TextDataLoaders, title_level=2)
```
You should not use the init directly but one of the following factory methods. All those factory methods accept as arguments:
- `text_vocab`: the vocabulary used for numericalizing texts (if not passed, it's infered from the data)
- `tok_tfm`: if passed, uses this `tok_tfm` instead of the default
- `seq_len`: the sequence length used for batch
- `bs`: the batch size
- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)
- `shuffle_train`: if we shuffle the training `DataLoader` or not
- `device`: the PyTorch device to use (defaults to `default_device()`)
```
show_doc(TextDataLoaders.from_folder)
```
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.
Here is an example on a sample of the IMDB movie review dataset:
```
#slow
path = untar_data(URLs.IMDB)
dls = TextDataLoaders.from_folder(path)
dls.show_batch(max_n=3)
show_doc(TextDataLoaders.from_df)
```
`seed` can optionally be passed for reproducibility. `text_col`, `label_col` and optionaly `valid_col` are indices or names of columns for texts/labels and the validation flag. `label_delim` can be passed for a multi-label problem if your labels are in one column, separated by a particular char. `y_block` should be passed to indicate your type of targets, in case the library did no infer it properly.
Here are examples on subsets of IMDB:
```
dls = TextDataLoaders.from_df(df, path=path, text_col='text', label_col='label', valid_col='is_valid')
dls.show_batch(max_n=3)
dls = TextDataLoaders.from_df(df, path=path, text_col='text', is_lm=True, valid_col='is_valid')
dls.show_batch(max_n=3)
show_doc(TextDataLoaders.from_csv)
```
Opens the csv file with `header` and `delimiter`, then pass all the other arguments to `TextDataLoaders.from_df`.
```
dls = TextDataLoaders.from_csv(path=path, csv_fname='texts.csv', text_col='text', label_col='label', valid_col='is_valid')
dls.show_batch(max_n=3)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
# Multilingual Universal Sentence Encoder Q&A 检索
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 查看</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行 </a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 中查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td>
<td data-parent-segment-id="12900598"><a href="https://tfhub.dev/s?q=google%2Funiversal-sentence-encoder-multilingual-qa%2F3%20OR%20google%2Funiversal-sentence-encoder-qa%2F3"><img src="https://tensorflow.google.cn/images/hub_logo_32px.png">查看 TF Hub 模型</a></td>
</table>
这是使用 [Univeral Encoder Multilingual Q&A 模型](https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3)进行文本问答检索的演示,其中对模型的 **question_encoder** 和 **response_encoder** 的用法进行了说明。我们使用来自 [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) 段落的句子作为演示数据集,每个句子及其上下文(句子周围的文本)都使用 **response_encoder** 编码为高维嵌入向量。这些嵌入向量存储在使用 [simpleneighbors](https://pypi.org/project/simpleneighbors/) 库构建的索引中,用于问答检索。
检索时,从 [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) 数据集中随机选择一个问题,并使用 **question_encoder** 将其编码为高维嵌入向量,然后查询 simpleneighbors 索引会返回语义空间中最近邻的列表。
### 更多模型
您可以在[此处](https://tfhub.dev/s?module-type=text-embedding)找到所有当前托管的文本嵌入向量模型,还可以在[此处](https://tfhub.dev/s?dataset=squad)找到所有在 SQuADYou 上训练过的模型。
## 设置
```
%%capture
#@title Setup Environment
# Install the latest Tensorflow version.
!pip install -q tensorflow_text
!pip install -q simpleneighbors[annoy]
!pip install -q nltk
!pip install -q tqdm
#@title Setup common imports and functions
import json
import nltk
import os
import pprint
import random
import simpleneighbors
import urllib
from IPython.display import HTML, display
from tqdm.notebook import tqdm
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
nltk.download('punkt')
def download_squad(url):
return json.load(urllib.request.urlopen(url))
def extract_sentences_from_squad_json(squad):
all_sentences = []
for data in squad['data']:
for paragraph in data['paragraphs']:
sentences = nltk.tokenize.sent_tokenize(paragraph['context'])
all_sentences.extend(zip(sentences, [paragraph['context']] * len(sentences)))
return list(set(all_sentences)) # remove duplicates
def extract_questions_from_squad_json(squad):
questions = []
for data in squad['data']:
for paragraph in data['paragraphs']:
for qas in paragraph['qas']:
if qas['answers']:
questions.append((qas['question'], qas['answers'][0]['text']))
return list(set(questions))
def output_with_highlight(text, highlight):
output = "<li> "
i = text.find(highlight)
while True:
if i == -1:
output += text
break
output += text[0:i]
output += '<b>'+text[i:i+len(highlight)]+'</b>'
text = text[i+len(highlight):]
i = text.find(highlight)
return output + "</li>\n"
def display_nearest_neighbors(query_text, answer_text=None):
query_embedding = model.signatures['question_encoder'](tf.constant([query_text]))['outputs'][0]
search_results = index.nearest(query_embedding, n=num_results)
if answer_text:
result_md = '''
<p>Random Question from SQuAD:</p>
<p> <b>%s</b></p>
<p>Answer:</p>
<p> <b>%s</b></p>
''' % (query_text , answer_text)
else:
result_md = '''
<p>Question:</p>
<p> <b>%s</b></p>
''' % query_text
result_md += '''
<p>Retrieved sentences :
<ol>
'''
if answer_text:
for s in search_results:
result_md += output_with_highlight(s, answer_text)
else:
for s in search_results:
result_md += '<li>' + s + '</li>\n'
result_md += "</ol>"
display(HTML(result_md))
```
运行以下代码块,下载并将 SQuAD 数据集提取为:
- **句子**是(文本, 上下文)元组的列表,SQuAD 数据集中的每个段落都用 NLTK 库拆分成句子,并且句子和段落文本构成(文本, 上下文)元组。
- **问题**是(问题, 答案)元组的列表。
注:您可以选择下面的 **squad_url**,使用本演示为 SQuAD 训练数据集或较小的 dev 数据集(1.1 或 2.0)建立索引。
```
#@title Download and extract SQuAD data
squad_url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' #@param ["https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"]
squad_json = download_squad(squad_url)
sentences = extract_sentences_from_squad_json(squad_json)
questions = extract_questions_from_squad_json(squad_json)
print("%s sentences, %s questions extracted from SQuAD %s" % (len(sentences), len(questions), squad_url))
print("\nExample sentence and context:\n")
sentence = random.choice(sentences)
print("sentence:\n")
pprint.pprint(sentence[0])
print("\ncontext:\n")
pprint.pprint(sentence[1])
print()
```
以下代码块使用 <a>Univeral Encoder Multilingual Q&A 模型</a>的 **question_encoder** 和 <strong>response_encoder</strong> 签名对 TensorFlow 计算图 **g** 和**会话**进行设置。
```
#@title Load model from tensorflow hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3" #@param ["https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3", "https://tfhub.dev/google/universal-sentence-encoder-qa/3"]
model = hub.load(module_url)
```
以下代码块计算所有文本的嵌入向量和上下文元组,并使用 **response_encoder** 将它们存储在 [simpleneighbors](https://pypi.org/project/simpleneighbors/) 索引中。
```
#@title Compute embeddings and build simpleneighbors index
batch_size = 100
encodings = model.signatures['response_encoder'](
input=tf.constant([sentences[0][0]]),
context=tf.constant([sentences[0][1]]))
index = simpleneighbors.SimpleNeighbors(
len(encodings['outputs'][0]), metric='angular')
print('Computing embeddings for %s sentences' % len(sentences))
slices = zip(*(iter(sentences),) * batch_size)
num_batches = int(len(sentences) / batch_size)
for s in tqdm(slices, total=num_batches):
response_batch = list([r for r, c in s])
context_batch = list([c for r, c in s])
encodings = model.signatures['response_encoder'](
input=tf.constant(response_batch),
context=tf.constant(context_batch)
)
for batch_index, batch in enumerate(response_batch):
index.add_one(batch, encodings['outputs'][batch_index])
index.build()
print('simpleneighbors index for %s sentences built.' % len(sentences))
```
检索时,使用 **question_encoder** 对问题进行编码,而问题嵌入向量用于查询 simpleneighbors 索引。
```
#@title Retrieve nearest neighbors for a random question from SQuAD
num_results = 25 #@param {type:"slider", min:5, max:40, step:1}
query = random.choice(questions)
display_nearest_neighbors(query[0], query[1])
```
| github_jupyter |
<h1 style="color: #0f49c4 ;"><center>Correlaid - Machine Learning Spring School</center></h1>
<h1 style="color: #0f49c4 ;"><center>Introduction to Python</center></h1>
<h2>Content</h2>
<ol>
<li> Python and Jupyter </li>
<li> Basics </li>
<li> Control Flow and Functions </li>
<li> Import and Export </li>
<li> NumPy and Pandas </li>
<li> Visualization </li>
</ol>
### Literature and further sources
Joel Grus: *Data Science from Scratch*, O'Reilly, 2015
Of particular interest is Chapter 2 *A Crash Course in Python*, which forms the basis for this notebook:
http://proquest.tech.safaribooksonline.de/book/databases/9781491901410/2dot-a-crash-course-in-python/python_html
J.R. Johansson: *Scientific Python Lectures*
https://github.com/jrjohansson/scientific-python-lectures/blob/master/Lecture-1-Introduction-to-Python-Programming.ipynb
Python Tutorials
http://www.tutorialspoint.com/python/
https://www.python-kurs.eu
<h1><center> 1 Python and Jupyter
### 1.1 Getting started with Jupyter
Jupyter is based on [iPython](https://ipython.org), an interactive Python shell. Jupyter itself is a notebook application in which executable Python code and explanations (like this one) can be mixed into documents called notebooks. The individual blocks in the notebook are called Notes or Cells. Blocks containing explanations are written in [Markdown Language](https://de.wikipedia.org/wiki/Markdown).
Code blocks contain executable Python code. The type of a block can be changed using the menu bar and `Esc-M` (Markdown) or `Esc-Y` (Code).
Each block is executable. To execute a block, the following options are available:
- `Ctrl-Enter`: Execute
- `Shift-Enter`: Execute and jump to the next block
- Run-Cell command in the menu bar: like `Shift-Enter`.
<div class="alert alert-info"><b>Try it!</b> Double-click once in this text and then press Shift-Enter.</div>
Other shortcuts: by clicking over the keyboard icon in the menu bar or by [cheat-sheet](https://www.google.de/search?q=jupyter+cheat+sheet).
### 1.2 The first Python program
Python is an interpreter language. That is, every command is given to the interpreter (running in the background) and executed.
<div class="alert alert-info"><b>Try it!</b> Press Shift-Enter in the next code cell.</div>
```
print("Hello World!")
print(3+2)
# Variable assignment
x = 3+5
# Displaying a value within a Python session even without print
x
# Calculator 2^8
2**8
```
### 1.3 The Zen of Python
As you will soon realize, Python has its own peculiarities and it seems a bit unusual at first glance. At second glance, you'll notice that many things in Java or C# are very awkward to formulate compared to Python. Good Python code is described as "Pythonic". Some basic principles of Python programming are these:
*Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Readability counts.
There should be one-- and preferably only one--obvious way to do it.*
All 19 aphorisms can be found at [python.org](http://legacy.python.org/dev/peps/pep-0020/). You can also display them via `import this` in the Python interpreter.
<div class="alert alert-info"><b>Try it!</b> Create a new code cell and execute the above mentioned command.</div>
### 1.4 Python 2.7 vs. Python 3.x
The [history of Python](https://en.wikipedia.org/wiki/History_of_Python) dates back to the early 90s. Over the years, a lot of messiness has accumulated, which was cleaned up in 2009 with Python 3.0. It was consciously accepted that Python 3 programs are incompatible with Python 2.7, the last two-version. Since many libraries are based on Python 2.7, this version is still used today.
We work with Python 3, so when copying 2.7 code examples from the Internet, adjustments may be necessary. The most important differences are briefly presented below. More information can be found [here](https://docs.python.org/3.0/whatsnew/3.0.html).
#### Print Function
An important difference from Python 3.x to Python 2.7: `print` is a function and must be written with parentheses:
print "Hello" # allowed in Python 2.7
print ("Hello") # Python 3.x
#### Type conversions
In Python 3, type checking has been tightened so that number types in particular must be explicitly cast for combination with strings.
sum = 3+5
print "Sum: "+sum # Python 2.7
print("Summe: "+str(summe)) # Python 3.x
#### Generator objects instead of list, dictionaries etc.
In Python 3, many functions like `range` (creates list with numbers) were changed, so that only generator-objects instead of lists were generated. Generatores are more efficient, because they generate the values only when they are needed.
### 1.5 Indentation and line breaks
Code blocks in Python are not defined by curly braces, but by indentation alone. The head of a code block (branch, loop, function, etc.) is always terminated with a colon `:`, after which it must be indented.
An `if` statement, which in C-based languages looks like this:
if (a < b)
{
statement;
statement;
}
is thus formulated in Python like this:
{ if a < b:
statement
statement
Be careful: The number of spaces must be correct and tabs should be avoided. Jupyter does the latter automatically.
A line break always introduces a new command. However, inside parentheses, spaces and line breaks are ignored.
list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
easier_to_read_list_of_lists = [ [1, 2, 3],
[4, 5, 6],
[7, 8, 9] ]
If a long command is to be wrapped, a backslash can be used to insert a newline:
very_long_variable_name = module_a.very_long_function_name() \
.concatenated_function_call()
### 1.6 Comments and Docstrings
In Python a **comment** starts with <code>#</code>. The rest of the line is ignored. In case of a multi-line comment, each line must be marked with a <code>#</code>.
```
# This is a comment.
# This is a
# multi-line comment.
```
A **docstring** is usually the first statement of a Python modul (Class, Method, Function). A docstring hat has the syntax <code>"""This ist my documentation"""</code>.
With <code>help</code> you can see the documentation.
```
def myFunction():
"""Documentation of the function"""
help(myFunction)
help(print)
```
<h1><center> 2 Basics
### 2.1 Operators
**Arithmetic Operators**
Python's got - like a calculator - arithmetic operators, functions and constants.
<div class="alert alert-danger"><strong>Attention:</strong> The decimal separator is a point and not a comma!!</div>
| **Operator, function, constant** | **description** |
|:---------------------------|:------------------------------------------|
| +, -, *, /, // | addition, substraction, multiplication, division |
| ^, % | power, rest function |
| abs() | absolut value |
| round() | round |
<div class="alert alert-info"><b>Try it!</b> Execute the following cells.</div>
```
1.1 + 1.1
5*3
print(10/3)
print(10//3)
abs(-3.33334)
```
The modul `math` provides more functions and constants. It is included by default in every python installtion. Therefore, we do not need to install it through our anaconda navigator.
```
import math as m
```
| **Operator, function, constant** | **description** |
|:---------------------------|:--------------------------------------|
| m.exp() | exponential function |
| m.log(), m.log10() | logarithm |
| m.sin(), m.cos(), m.tan() | trigonometric functions |
| m.sqrt() | Square root |
| m.pi | π |
<div class="alert alert-info"><b>Try it!</b> Execute the following cells.</div>
```
m.exp(4)
m.sqrt(4)
m.pi
```
**Assignment Operator**
With the assignment operator `=` it is possible to name values or results in order to reuse them.
```
a = 2+3
print(a)
A # Python is case sensitive
a, b = 1, 2 # Multiple assignment:
# a and b are assigned simultaneously
a
b
```
**Comparison Operator**\
Are used to compare two values. A comparison returns ether `True` or `False`.
| **Operator** | **description** |
|:---------------------------|:------------------------------------------------|
| >, >=, <, <= | greater, greater-than-or-equal-to, less, less-than-or-equal-to |
| ==, != | equal, not equal |
| ! | NOT (Negation) |
| & , and | AND |
| I, or | OR |
<div class="alert alert-info"><b>Try it!</b> Execute the following cells.</div>
```
4 >= 3
a = 3 # Assignment
a == 10
a != 10
(3 < 2) & (4 == (3 + 1))
```
<div class="alert alert-success"><b>Exercise:</b> Assign <code>a</code> the value 10 and <code>b</code> the value 5. Is <code>b</code> greater-than-equal to <code>a</code> satisfied?</div>
### 2.2 Data types
Each variable has a data type. In this chapter, we look at the four most common types:
* **strings**: Strings of characters, spaces, or other symbols such as special characters, numbers, etc. To define strings, all characters are written inside single or double quotes, e.g. "hello", 'house'.
* **integers**: Positive or negative numbers, e.g. -1, 4, 7, -10.
* **floats**: Decimal numbers or floating point numbers, e.g., 1.34, -3.33.
* **boolean**: Boolean variables can take the values `True` and `False`
```
print(9)
print(3.76)
print(3.00)
print("Hello world!")
print(True)
print(False)
```
The data type is given using the function `type()`. The output `int` stands for an integer object, `float` for a decimal number, `str` for a string and `bool` for a truth value.
```
print(9, type(9))
print(3.76, type(3.76))
print(3.00, type(3.00))
print("Hello world!", type("Hello world!"))
print(True, type(True))
print(False, type(False))
```
<div class="alert alert-danger"><strong>Attention:</strong> We cannot caluclate with strings.</div>
```
3 + "2" # Integer + String
3.76 + "2" # Float + String
```
#### 2.2.1 Typecasting
The data type of a variable can be converted to another data type. This conversion is calles *typecasting*. For example, an integer object can be converted to a float object:
```
float(100)
```
<div class="alert alert-success"><b>Exercise:</b> What happens if we typecast a float into an integer? And what happens if we typecast a boolean into an integer or float?</div>
#### 2.2.2 String Operations
We can access individual elements (characters) of a string. This is called indexing. An index denotes a certain position and is specified with *square* brackets and the index number.
<div class="alert alert-danger"><strong>Attention:</strong> The index position count starts with value 0. </div>
The indexing follows the logic: <code>[Start:Stop:Interval]</code>. However, these parameters are optional.
```
alphabet = "abcdefg"
```
| **a** | **b** | **c** | **d** | **e** | **f** | **g** |
|-------|-------|-------|-------|-------|-------|-------|
| 0 | 1 | 2 | 3 | 4 | 5 | 6 |
```
alphabet[0]
alphabet[0:4]
```
<div class="alert alert-danger"><strong>Attention:</strong> The interval excludes the right endpoint, i.e. the interval contains the element on position 0, but not the element on position 4: (0, 4]</div>
```
alphabet[4:]
alphabet[::2]
alphabet[-1] # last element
len(alphabet) # number of elements
len("Hello World") # Blank character
```
| **H** | **e** | **l** | **l** | **o** | | **W** | **o** | **r** | **l** | **d** |
|-------|-------|-------|-------|-------|-------|-------|-------|-------|-------|------|
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |
<div class="alert alert-success"><b>Exercise:</b> You have given the string: <b>s1="house"</b>. What expressions give us the last character <b>e</b> in <b>s1</b>?
<ol>
<li> s1[len(s1)-1]</li>
<li> s1[len(s1)+1]</li>
<li> s1[-1]</li>
<li> s1[len(s1)]</li>
</ol>
</div>
### 2.3 Data collections
#### 2.3.1 Tuples
Tuples can contain objects of different data types. There are special due to their immutability. While you can overwrite values for other data containers, this is not possible for tuples. Tuples are instantiated with **round brackets** and the elements are separated by a comma.
```
tuple1 = ("Hello", "World", 123)
```
| **"Hello"** | **"World"** | **123** |
|-------------|-------------|---------|
| <center>0</center> | <center>1</center> | <center>2</center> |
```
type(tuple1)
tuple1[0]
type(tuple1[0])
print(tuple1[2])
print(type(tuple1[2]))
# Immutable
tuple1[0] = 3 # Error
# Concatenate
tuple2 = tuple1 + ("456", 789)
tuple2
# Nest
tuple_nest = (1, 4, ("House", "Bread"), "Disco", (4, 9))
len(tuple_nest)
```
| **1** | **4** | **("House", "Bread")** | **"Disco"** | **(4, 9)** |
|-------|-------|----------------------|-------------|------------|
| 0 | 1 | <center>2</center> | <center>3</center> | <center>4</center> |
```
print("3rd element: ", tuple_nest[2])
print("First element of the third element: ", tuple_nest[2][0])
```
#### 2.3.2 Lists
Lists are often used data containers that can contain different data types, but also empty sets. Lists can be instantiated by **square brackets** or by the function `list()`.
```
# Empty list
list1 = []
list1
type(list1)
list2 = [1, 4, ("House", "Bread"), "Disco", [4, 9]]
print("3rd element of list: ", list2[2])
print("Last element of list: ", list2[-1])
# Mutable
list2[0] = 1.43
print(list2)
```
Methods for data operations are available to lists.
<div class="alert alert-danger"><strong>Attention:</strong> Both functions and methods are called with their respective names and <b>round</b> brackets. However, functions and methods differ: Methods are functions that are bounded to an objects. Functions (e.g. print()) can be called directly - methods only in conjuction with a links object (e.g. a.append()).</div>
```
list3 = [4, 3, 1]
list3.append(2)
list3
list2.extend(list3)
print(list2)
list3.pop()
print(list3)
list3.sort()
list3
list3.remove(3)
list3
```
#### 2.3.3 Dictionaries
The instantiation of a dictionary is done by **curly braces** or by the function `dict()`. It consists of an arbitrary number of *keys* and *values*. Instead of numeric indexes, as is the case with lists, a dictionary is indexed by its *keys*.
| Key 1 | Key 2 | Key 3 |
|--------- |--------- |--------- |
| Value 1 | Value 2 | Value 3 |
```
dict1 = {"strongly disagree": 1, "disagree": 2, "neutral": 3, "agree": 4, "strongly agree": 5}
dict1
dict1["neutral"]
dict1["neutral"] = -1
dict1["neutral"]
dict1.keys()
dict1.values()
dict1['do not know'] = 0
dict1
del(dict1['do not know'])
dict1
"strongly agree" in dict1
"do not know" in dict1
```
#### 2.3.4 Sets
In a set, each element occurs exactly once. Sets are *mutable* like dictionaries or lists - that means values can be removed, added or changed. The instantiation is done like with dictionaries using **curly brackets**, but without the *key* values or with 'set()'.
```
set1 = {"tomato", "zucchini", "potato", "bell pepper"}
set1 #ordered
# convert list to set
list6 = [3, 3, 4, 5, 5, 6]
set2 = set(list6)
print(set2) # each element occurs once
set1.add("apple")
set1
set1.remove("potato")
set1
"apple" in set1
```
set1 | apple | bell pepper | tomato | zucchini
---- | ----- | -------- | ------- | -------
**set3** | **pear** | **raspberry** | **bell pepper** | **tomato**
```
# intersection
set3 = {"pear", "raspberry", "bell pepper", "tomato"}
set1 & set3
set1.intersection(set3)
# difference set
set1.difference(set3)
set3.difference(set1)
set1.union(set3)
set4 = {"apple", "zucchini"}
set4.issubset(set1)
set1.issuperset(set4)
set1.isdisjoint(set4)
```
<div class="alert alert-success"><b>Exercise:</b> <code>a = (3, 1, 2)</code> is given. Sort the numbers using the function <b>sorted</b>. How does the data type changes?</div>
<div class="alert alert-success"><b>Exercise:</b> The tuple <code>b = ("a", "1", 2, ("b", 3, "c"), ("4", "d"))</code> is given. Index both nested tuples <code>("b", 3, "c"), ("4", "d")</code> and the character <b>c</b>.</div>
### 2.4 Counter and Errors
[Counters](https://docs.python.org/3/library/collections.html?highlight=counter#counter-objects) are used to quickly determine the frequency of occurrence of different keys in a list. Counters must be imported from the built-in Python module **collections**.
```
from collections import Counter
num_list = [0, 1,2, 0, 1, 1, 1, 2, 2, 3]
c = Counter(num_list)
print(c)
doc = ['I', 'I','I', 'am','am','here','who','who','who','who','else']
word_count = Counter(doc)
print(word_count)
print(word_count.most_common(3))
```
We already know two different error types:
* **Type Error**: Wrong data type
* **Name Error**: Not assigned variable name
However, there are a lot more errors which may occur.\\
Python gives us not only the error type, but also the line where the error is. In this example we see that the error happened in line 6:
```
int1 = 5
int2 = 8
string1 = "3"
print(int1 + int2)
print(int1 + string1)
print(int1 * int2)
```
<div class="alert alert-danger"><strong>Attention</strong>: Python executes a script line by line. This means that the lines are not executed further once an error occurs. In the previous example, Python does not execute the seventh line (int1 * int2).</div>
**Dealing with errors**\
Errors cannot always be avoided. However, we can create scripts that are prepared for errors and can react to them. \
A **try-except** statement can be helpful:
```
# example 1
try:
print(3 / 0)
except:
print("Do not divide by zero!")
# example 2
try:
print(3 / 1)
except:
print("Do not divide by zero!")
```
First the **try** block is executed. If no error occurs, the **except** block is skipped (example 2). If an error occurs during the execution of the **try** block, the rest of the block is skipped and the **except** block is executed.
```
# example 3
x = 0
try:
print(3 / x)
except NameError:
print("What is x?")
# example 4
try:
print(3 / x)
except NameError:
print("What is x?")
except ZeroDivisionError:
print("Do not divide by zero!")
# example 5
try:
print(3 / x)
except (NameError, ZeroDivisionError):
print("Not assigned or divided by zero!")
# example 6
try:
print(3 / x)
except:
pass
```
The **pass** instruction does not result in any operation. This means that nothing happens when **pass** is executed. While comments (#) are ignored and not executed, the **pass** instruction is executed, but nothing happens.
<h1><center> 3 Control Flow and Functions
The control flow denotes the sequence of commands in our program. In this learning unit we will get to know three control structures (case distinctions: `if`, `elif`, `else`; condition-controlled loops: `while` and collection-controlled loops: `for`), which allow us to deviate from the sequential processing of commands and to make the sequence of commands dependent on individual expressions. In addition to control structures, we will also familiarize ourselves with functions.
### 3.1 Conditions
**if-statement**
```
num = 1
if num > 0:
print("The number is positive.")
if num > 0:
print("The number is positive.") # No indentation Error
if num > 0: print("The number is positive.")
num = -1
if num > 0:
print("The number is positive.")
```
**if-else-statement**
```
if num > 0:
print("The number is positive.")
else:
print("The number is not positive.")
```
**elif-statement**
The indented block after elif is only checked if the expression of the previous **if condition** has taken the truth value `False`. If the condition of elif also takes the truth value `False`, either the next **elif** condition is checked or the statement in the **else** block is executed. <br>
There can be multiple **elif** blocks, but only one **else** block. This must be placed at the end of the statement. Additional **elif** blocks after the **else** block are not allowed.
```
num = 3
if num > 5:
print("{} is a positive number greater 5.".format(num))
elif 0 < num <= 5:
print("{} is a positive number, but less than 5.".format(num))
elif num == 0:
print("{} is zero.".format(num))
else:
print("{} is a negative number".format(num))
```
<div class="alert alert-success"><b>Exercise:</b> Write a statement that for all numbers greater than 0 and less than or equal to 10 outputs the sentence: The number {x} is greater than 0 and less than or equal to 10. <br>
Test your statement using the number 11.</div>
### 3.2 While loops
Loops allow us to execute code iteratively, changing variables systematically. `while` loops are executed as long as a certain condition is met.<br>
<div class="alert alert-danger"><strong>Attention</strong>: If the condition cannot take the truth value False, the process is not aborted and the computer must be restarted!.</div>
```
idx = 1 # Define an index
while idx < 5: # The loop is executed until i is no longer less than 5
print(idx)
idx += 1 # Short for idx = idx + 1
idx = 1
while idx < 5:
print(idx)
idx += 1
else:
print(f"The varaible takes value {idx}. The condition is not met anymore.")
```
### 3.3 For loops
Before we look at `for` loops, we will learn about the `range()` function. This dynamically generates a sequence of numbers within the desired parameters. The general syntax is:
range(start, end, increment).
If no start value is given, Python starts at the number 0. If no number range (steps) is defined, the interpreter generates a list of consecutive numbers (e.g. ones steps). The final value must always be specified and is **not** included in the generated list.
Examples:
range(5) # generates the values 0, 1, 2, 3, 4
range(3, 10) # generates the values 3, 4, 5, 6, 7, 8, 9
range(4, 10, 2) # generates the values 4, 6, 8
```
a = range(10)
list(a)
list(range(3, 10))
list(range(-3, 10))
list(range(0, 10, 2)) # Steps
# Argument-unpacking Operator *
[1, 2, *range(7, 15)]
```
We know that each element in a data container has a specific index value. `for` loops take advantage of this and thus make it possible to operate on every element in a data container.
```
# very very inefficient
x = [1, 2, 3, 4, 5]
print(x[0])
print(x[1])
print(x[2])
print(x[3])
print(x[4])
# better
for i in x:
print(i)
for x in "banane":
print(x)
k = range(100, 200, 10)
for i, x in enumerate(k): # enumerate gives back the values + indexes of the data container
print(i, x)
```
<div class="alert alert-success"><b>Exercise:</b> The list <code>l = list(range(1, 13))*4</code> is given. Assign the value 2 to even-numbered entries that are also divisible by 5, the value 1 to all other even-numbered entries, and the value 0 to odd-numbered entries. <br>
<b>Hint</b>: Use a for-loop!</div>
<div class="alert alert-success"><b>Exercise:</b> Display all values of the Fibonacci series up to 1000. <br>
<b>Hint</b>: Use a while-loop and multiple assignments!</div>
<div class="alert alert-success"><b>Exercise:</b> The list <code>col = ["rot", "blau", "gelb", "rot", "blau"]</code> is given. Copy all elements of col to col_new using a while-loop. Stop the loop, if the color is not blue or red. <br>
<b>Hint</b>: Use the method append! </div>
**List comprehension**\
List comprehensions allows us to easily and efficiently generate lists in Python. Every list comprehension can be written als for-loop, however, it is not possible to write every for-loop as list comprehension. We differ between conditional (contain if-statement) and unconditional (do not contain an if-statement) list comprehension. The syntax looks like this: \
`[expression for item in iterable if Bedingung == True]`
```
numbers = [4, 7, 23, 76, 103]
[i + 1 for i in numbers] # New list
[i + 1 for i in numbers if i < 100]
[i + 1 if i < 100 else i *10 for i in numbers] # If-else-Statement
[(i, i**2) for i in numbers] # Tuples
```
<div class="alert alert-success"><b>Exercise:</b> Logarithmize as sparingly as possible all values in <code>numbers = [15, 100, 30, 43, 80]</code> using the function <b>log</b> from the package <b>math</b>.</div>
### 3.4 Functions
```
# First function
def hello(name):
print(f"Hello, {name}!")
hello("Johanna")
```
**Local and global variables** \
Each variable which is defined **inside** a function, is only valid locally.
```
def ex_function():
print(a)
a = 500
print("within function:", a)
a = 3
print("before function:", a)
bsp_funktion() # NameError
print("after function:", a)
def ex_function():
a = 500
print("within function:", a)
a = 3
print("before function:", a)
ex_function() # a = 500 only within the function
print("after function:", a) # a is 3 globally
def ex_function():
global a # variable is defined globally
a = 500
print("function:", a)
a = 3
print("before function:", a)
ex_function()
print("after function:", a) # a = 500 globally
```
**Print and Return**
```
def ex_print(a):
b = a * 2
print(b)
output_print = ex_print(5)
print(output_print)
output_print*2 # Fehlermeldung
def ex_return(a):
b = a * 2
return b
output_return = ex_return(5)
print(output_return)
output_return*2
```
**Parameters**
* Positional: Are obligatory and do not have any standardized value
* Keyword: Do have a standardized value
```
def hello(name, weather="sunny"):
return(f"Hello, {name}! The weather is {weather}.")
print(hello("Johanna"))
print(hello("Felix", "rainy"))
print(hello("rainy", "Felix"))
print(hello(weather="rainy", name="Felix"))
```
<div class="alert alert-success"><b>Exercise:</b> Write the function <b>max_two</b>, which displays the maximum value of two numbers. Test your function with random numbers.</div>
<h1><center> 4 Import and Export with Pandas
**.csv**
```
import pandas as pd
df = pd.read_csv("Datensatz_Herzinfarkt.csv", sep=",")
df.dtypes # Pandas recognize data types
pd.read_csv("Datensatz_Herzinfarkt.csv", header= None)
pd.read_csv("Datensatz_Herzinfarkt.csv", header = None, names=["A", "B", "C", "D", "E", "F", "G", "H"])
pd.read_csv("Datensatz_Herzinfarkt.csv", usecols=["Blutdruck", "Cholesterin", "Herzinfarkt"])
pd.read_csv("Datensatz_Herzinfarkt.csv", nrows=100)
```
**Git**
```
url = "https://raw.githubusercontent.com/datasets/rio2016/master/athletes.csv"
df2 = pd.read_csv(url)
df2
```
**Libraries**
```
import seaborn as sns
sns.get_dataset_names()
df_iris = sns.load_dataset("iris")
df_iris
```
**Export to .csv**
```
df_iris.to_csv("df_neu.csv")
```
<h1><center> 5 NumPy and Pandas
### 5.1 NumPy
Numerical Python (`NumPy`) is a standard library for mathematical and numerical functions in Python. With `NumPy` you can create efficient multidimensional arrays, perform fast arithmetic operations (without using loops) and generate random numbers.
```
import numpy as np
```
**Generate Arrays**\
NumPy extends Python with additional data structures like the NumPy array. This looks like a list at first sight, but only data of the same data type can be stored in a `NumPy` array.
```
x = np.array([1, 2, 3, 4, 5, 6])
print(x, type(x))
print(x.dtype) # Integers
list1 = [1, 2, 3, 4, 5, 6]
print(list1, type(list1))
print(x.ndim)
print(x.shape)
print(x.size)
# Arange() function
np.arange(1, 14)
# reshape() function
print(x)
print(x.shape)
print("---------------")
print(x.reshape(2,3))
print(x.reshape(2,3).shape)
np.zeros([3, 5, 2])
np.full([3,5], 10)
np.eye(5) # 5x5 Matrix
np.diag([7, 8, 9])
np.linspace(start=0, stop=20, num=5)
np.linspace(start=0, stop=20, num=5, dtype=int)
```
**Indexing**\
Like list. However, Indexing with list and booleans possible
```
print(x[0])
print(x[0:4])
list2 = [2, 3]
print(x[list2])
list3 = [True, False, False, False, False, True]
print(x[list3])
# mutable
x[0] = 100.3
print(x) # Float to Integer
b = np.arange(10, 19).reshape(3,3)
print(b)
```
| | j = 0 | j = 1 | j = 2 |
|---------|---------|---------|---------|
|**i = 0**| B[0, 0] | B[0, 1] | B[0, 2] |
|**i = 1**| B[1, 0] | B[1, 1] | B[1, 2] |
|**i = 2**| B[2, 0] | B[2, 1] | B[2, 2] |
```
print(b[1, 2])
print(b[1][2])
print(b[1, 1:3])
print(b[1][1:3])
```
**Arithmetic operations in contrast to lists easy possible**
```
print(b + 3)
list2 + 3 # TypeError
print([j + 3 for j in list2])
```
<div class="alert alert-success"><b>Exercise:</b> Create the array <b>a</b> with values -15, -14, -13, ... 13, 14. Select the elements $a_i$ from the vector <b>a</b> which fulfill the following conditions:
<ol>
<li> a_i is less than 10.</li>
<li> a_i is a negative number. </li>
<li> a_i is less than ten and greater than -10.</li>
<li> a_i is a multiple of 3. </li>
</div>
### 5.2 Pandas
Pandas (acronym for Python and data analysis) provides additional functions and data structures for data analysis. The two most important data structures are called `DataFrame` and `Series`. The first denotes a data table, the second a column of a data table (i.e. a `DataFrame` consists of `Series`).
```
#import pandas as pd
```
#### 5.2.1 Series
Series' objects contain an additional index in addition to the values. In the default setting, this is numeric and starts at 0. However, it can also be customized.
```
a = pd.Series([15, 16, 17, 18])
print(a)
print(a.index)
print(a.values)
b = pd.Series([15, 16, 17, 18], index=["a", "b", "c", "d"])
b
print(b["c"]) # Indexing using string
print(b.c) # dot notation
print(b[2]) # Indexing with position
```
#### 5.2.2 DataFrames
A DataFrame contains multiple Series objects and is the "default" object when analyzing multidimensional data. Usually, the columns contain different types of data (our *variables*) and the rows contain the corresponding values (our *observations*).
```
a = [21, 22, 23, 24]
b = [1000, 2000, 1500, 1700]
table1 = pd.DataFrame({"a":a, "b":b}) # key is column name
table1
table2 = pd.DataFrame({
"age":a,
"income":b,
"faculty": "economics",
"studies": pd.Categorical(["A", "A", "B", "B"])},
index=("Person1", "Person2", "Person3", "Person4"))
table2
table2.head(2)
table2.tail(n = 2)
table2.index
table2.columns
table2.describe()
table2.describe(include = "all")
```
**Indexing**
* []: Indexing with numpy.notation
* .: Attribute access operator
* .loc: lable-based
* .iloc: integer-position
* .at: one value
```
table2[0:1]
table2.age
table2.loc["Person1"]
table2.iloc[3]
table2.at["Person2", "income"]
```
<div class="alert alert-success"><b>Exercise:</b> The dataframe <b>e</b> is given. Get an overview of <b>e</b> by
<ol>
<li> looking at the indexes.</li>
<li> output the column names. </li>
<li> output the first two rows.</li>
<li> sort <b>e</b> descending by ranking using the method <b>sort_values</b>. </li>
</ol>
</div>
```
e = pd.DataFrame({
"Title":["Pulp Fiction", "Die Verurteilten", "Der Pate", "Fight Club", "The Dark Knight"],
"Year": [1994, 1994, 1972, 1999, 2008],
"Ranking": [1, 5, 3, 2, 4]})
e
```
<h1><center> 6 Visualization </h1>
Numerous plots can be implemented in Python. It is crucial to choose the right plot for the right data. First, we need to ask ourselves what type of data we are dealing with:
* categorical
* numeric
* categorical + numeric
* spatial data
* etc.
The website https://www.data-to-viz.com/index.html provides information to different visualization techniques.
### 6.1 First graphic
```
import matplotlib.pyplot as plt
#import seaborn as sns
y = [1, 4, 2, 3]
plt.plot(y)
plt.show()
```
**High-Level-Parameters:**
| Argument | Value | Description |
|:------------|:---------------------------------|:-----------------------------------------|
| linestyle | "--", "-.", "", ... | line type |
| marker | "o", "v", "1", ... | Type of data point symbols |
| color | "Name", "RGB", "Hex-Code" | Colour of data point symbols |
| linewidth | Number | Line width |
| label | "Legend" | Legend |
```
x = [0, 3, 4, 5]
plt.plot(x, y, color="green", marker="o", linestyle="--")
plt.show()
```
**Low-Level-Functions:**
| Function | Description |
|:---------------------------------------------------------------------------------|:------------------------------------|
| plt.title("Text") | Adds title |
| plt.xlabel("Text") | Adds title for the x-axis |
| plt.ylabel("Text") | Adds title for the y-axis |
| plt.legend(loc="Ort", fontsize=Zahl, ...) | Characteristics of the legend |
| plt.grid(b=bool, ...) | Characteristics of the grid |
| plt.axis(xmin=Zahl, xmax=Zahl, ymin=Zahl, ymax=Zahl, option=bool,...) | Characteristics of the axes |
| plt.axvline() | Vertical line |
| plt.axhline() | Horizontal line |
```
# Plot
plt.plot(x, y, label="min.Temp")
# Layout
plt.title("Grafik1 $\sim N(\mu, \sigma^2)$", loc="left", fontsize=20)
plt.xlabel("days", style="italic")
plt.ylabel("temp", color="red")
plt.xlim([1,9])
plt.ylim([0,5])
plt.legend(loc = "upper right")
# PShow plot
plt.show()
```
### 6.2 Visualization of one variable
```
df = pd.read_csv("Bestsellers.csv")
df.head()
```
**Barplot**
```
# absolute values
data4bar = df["Genre"].value_counts()
data4bar
# Bars
plt.bar(data4bar.index, data4bar.values)
# Layout
plt.title("Säulendiagramm")
plt.xlabel("Genre")
plt.ylabel("Anzahl")
# Show plot
plt.show()
plt.bar(data4bar.index, data4bar.values, color=["Red", "Yellow"])
plt.show()
plt.bar(data4bar.index, data4bar.values, color=(0.2, 0.4, 0.6, 0.2), edgecolor = "red")
plt.show()
sns.barplot(x = data4bar.index, y = data4bar.values, color="#69b3a2")
plt.show()
```
**Histogram**
```
plt.hist(df["Reviews"], density = True)
plt.show()
sns.set(style="white")
sns.displot(df["Reviews"])
plt.show()
sns.histplot(data=df["Reviews"], bins=10)
plt.show()
```
**Boxplot**
```
sns.boxplot(x=df["Reviews"])
plt.show()
```
**Kernel**
```
sns.kdeplot(df["Reviews"])
plt.show()
sns.kdeplot(df["Reviews"], shade=True, color="olive")
plt.show()
```
<div class="alert alert-success"><b>Exercise:</b> Import the dataset <b>company</b>. Create a line chart with <i>total_profit</i> and <i>month_number</i>. Thereby, the line color should be green, the title <i>Profit per month</i>, the x-axis title <i>Month</i>, the y-axis title <i>Profit in US-Dollary</i>, the data point symbol triagnles and the line type: "-.-.-.-.-.-.-.-.". Furthermore, there should be a red, horizontal line, which marks the arithmetic mean of <i>total_profit</i>.<br>
<b>Hint:</b> Use the function np.mean!</div>
### 6.3 Visualization of two variables
```
# Scatterplot
plt.plot(df["Reviews"], df["Price"], marker="o", linestyle="")
# Layout
plt.title("Scatterplot")
plt.xlabel("User Rating")
plt.ylabel("Price")
# Show plot
plt.show()
# Scatterplot
plt.plot(df["Reviews"], df["Price"], marker="o", alpha=0.2, linestyle="")
# Layout
plt.title("Scatterplot")
plt.xlabel("User Rating")
plt.ylabel("Price")
# Show plot
plt.show()
sns.boxplot(y=df["Reviews"], x=df["Genre"])
plt.show()
```
<div class="alert alert-success"><b>Exercise:</b> Import the dataset <b>HappinessReport2019</b>. Create a scatterplot with <i>Overall rank</i> and <i>Social support</i> with green dots. </div>
| github_jupyter |
# Appendix A
# Data processing and feature engineering
This notebook describes the preprocessing/features engineering which is done on the raw data of the titanic dataset. The data are from the kaggle challenge "Titanic: Machine Learning from Disaster". Some data are missing, the features are hardly relevant...These data are not really useful as they are provided. Therefore many challengers suggested to perform some features engineering first, to increase the efficiency of the models.
As our main interested is not in getting the maximal score, but explore the feasibility of a full-privacy-preserving method based on homomorphic encryption, I did not spend a lot of time on data completion and feature engineering. The method developed below is widely inspired from other challengers' methods which are referenced in the notebook.
All the method described and performed below is implemented in src.features as build_features.processing().
The other notebooks will call this function to performed the operations shown below.
## Imports
```
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set
```
## Setting Parameters
```
DATA_PATH="../data/raw/"
WRITE_PATH = "../data/processed/"
train_set_df = pd.read_csv(DATA_PATH + "train.csv")
test_set_df = pd.read_csv(DATA_PATH + "test.csv")
train_set_df.head()
```
We decide to perform data completion, and features engineering on all the data we have, without making any distinction between the test and train dataset. It is a contestable decision, as we will use the test set for data processing, so a bias will be introduced in the model evaluation. However, the decision is motivated with a regard to the dataset size, which is really small. The idea is to works on a more representative dataset to process the data.
```
all_data_df=pd.concat([train_set_df,test_set_df], sort=True).reset_index(drop=True)
all_data_df.head()
```
## Missing values
The first issue to tackle is the lack of some values in the dataset.
```
print('Training Set\n')
print(all_data_df.isnull().sum())
print('-'*40)
print('Test Set\n')
print(test_set_df.isnull().sum())
```
We first look at the correlations between features, looking for some models which can use to predict the missing values, by interpolations. Here by default, we seek for linear models.
```
sns.pairplot(all_data_df, hue = 'Survived', kind='reg', diag_kind='kde')
```
Thanks to Güneş Evitan's efforts here: https://www.kaggle.com/gunesevitan/titanic-advanced-feature-engineering-tutorial#1.-Exploratory-Data-Analysis
When I googled Stone, Mrs. George Nelson (Martha Evelyn), I found that she embarked from S (Southampton) with her maid Amelie Icard, in this page Martha Evelyn Stone: Titanic Survivor.
We know that two missing values are actually S. Lets fill those.
```
all_data_df.Embarked = all_data_df.Embarked.fillna('S')
all_data_df.isnull().sum()
```
For Age and Fare, we are going to fit two multivariate linear regressions on the test set, before using them to predict the missing ages and fare
```
sns.pairplot(all_data_df.groupby(['Sex', 'Pclass','Embarked']).mean(),kind='reg', diag_kind='kde')
```
Finally regressions seem not really performante... The solution suggested at https://www.kaggle.com/elifapaydn/logistic-regression-with-titanic-data looks quite relevant and shown good results.
```
all_data_df["Age"]=all_data_df.groupby(['Sex','Pclass','Embarked'])["Age"].apply(lambda x : x.fillna(x.median()))
all_data_df.isnull().sum()
```
The identical idea is developed for Fare
```
all_data_df.groupby(['Pclass', 'Parch']).median().Fare
all_data_df.Fare=all_data_df.Fare.fillna(all_data_df.groupby(['Pclass', 'Parch']).median().Fare[3][0])
all_data_df.isnull().sum()
```
At this point, remains the problem of missing cabin numbers.
We use the solution to classified people by desks, suggested and detailed here : https://www.kaggle.com/gunesevitan/titanic-advanced-feature-engineering-tutorial#1.-Exploratory-Data-Analysis
For those where the cabin number is not specified, we create a "M" deck label, for missing.
More details are provided by the author.
```
all_data_df['Deck'] = all_data_df.Cabin.fillna('M').apply(lambda x : str(x)[0])
all_data_df.groupby(['Deck','Pclass']).Pclass.count()
df = pd.DataFrame(all_data_df.groupby(['Deck']).Survived.sum())
df['Count'] = all_data_df.groupby(['Deck']).Survived.count()
df
all_data_df.isnull().sum()
```
Even if the cabin feature still contains null values, we will drop it later, and will only use the deck feature.
## Features engineering
Now that we filled the missing values, we can do better, and add some relevant features, built from the original one.
We first add the feature Family_Size, built from the SibSp, and Parch.
```
all_data_df['Family_Size'] = all_data_df['SibSp'] + all_data_df['Parch'] + 1
all_data_df
```
We also use the feature Title, which will reveal insides on the social background, the welfare, and so on.
It allow use to use the native feature Name, and it is inspired from https://www.kaggle.com/elifapaydn/logistic-regression-with-titanic-data
```
all_data_df['Title']=all_data_df.Name.str.split(',', expand=True)[1].str.split('.', expand=True)[0].str.replace(" ","")
all_data_df['Title'] = all_data_df['Title'].replace(['Lady', 'theCountess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
all_data_df['Title'] = all_data_df['Title'].replace('Mlle', 'Miss')
all_data_df['Title'] = all_data_df['Title'].replace('Ms', 'Miss')
all_data_df['Title'] = all_data_df['Title'].replace('Mme', 'Mrs')
```
## Encoding the data
We will use logistic regression, so we want to work only with number. So we encode the nonnumerical features with numbers. Moreover, for categorical features, we create as many features as possible classes, and use the labels 0 or 1 in these 'class-features'.
```
all_data_df.head()
all_data_df.info()
pd.get_dummies(all_data_df.Deck, prefix="Deck")
categorical_col = ["Pclass",'Embarked','SibSp','Deck', "Title"]
numerical_col = ["Age", "Fare", "Parch", "Family_Size"]
all_data_df.Sex = LabelEncoder.fit_transform(all_data_df.Sex, all_data_df.Sex)
for col in categorical_col:
dummies = pd.get_dummies(all_data_df[col], prefix=col)
all_data_df = pd.concat([all_data_df, dummies], axis=1)
all_data_df = all_data_df.drop(col , axis=1)
all_data_df.info()
```
We finally got our data processed, we drop the useless columns
```
all_data_df.drop(['Name','Cabin','Ticket', "PassengerId"], axis='columns', inplace=True)
all_data_df.head()
```
We will use the homomorphic encryption scheme CKKS to ensure private computations. As this scheme allows only multiplication and addition, we have to approximate non-linear function (as sigmoid and log) with polynomials. We use a minmax approximation, which is developed in the notebook XX.
The sigmoid approximation is relevant between -5 and 5. To ensure that we stay between these values, we normalize all the features.
```
col_to_reg = ['Age' , 'Fare', 'Family_Size']
for col in col_to_reg:
all_data_df[col] = (all_data_df[col] - all_data_df[col].mean())/all_data_df[col].std()
all_data_df
```
## Saving the preprocessed data
```
train_set_df=all_data_df.iloc[:train_set_df.shape[0]]
test_set_df=all_data_df.iloc[train_set_df.shape[0]:].drop('Survived',axis=1)
train_set_df.to_csv(WRITE_PATH + "processed_train.csv")
test_set_df.to_csv(WRITE_PATH + "processed_test.csv")
```
| github_jupyter |
# Program1 : Implement and demonstrate the FIND-S algorithm for finding the most specific hypothesis based on a given set of training data samples. Read the training data from a .CSV file.
```
import random
import csv
from pprint import pprint
attributes = [['Sunny','Rainy'],
['Warm','Cold'],
['Normal','High'],
['Strong','Weak'],
['Warm','Cool'],
['Same','Change']]
num_attributes = len(attributes)
print("The most general hypothesis : ['?','?','?','?','?','?']")
print(" The most specific hypothesis : ['0','0','0','0','0','0']")
a = []
with open('ws.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
a.append(row)
#print(row)
print(" The initial value of hypothesis: ")
print()
hypothesis = ['0'] * num_attributes
print(hypothesis)
for j in range(0,num_attributes):
hypothesis[j] = a[0][j];
print(hypothesis)
print()
for i in range(0,len(a)):
if a[i][num_attributes]=='Yes':
for j in range(0,num_attributes):
if a[i][j]!=hypothesis[j]:
hypothesis[j]='?'
else :
hypothesis[j]= a[i][j]
#find S algorithms
print(hypothesis)
#test_case = list(map(str, input("Enter a test case with elements comma seperated ").split(',')))
def is_case_valid(attributes, case):
attribs = [ [i.lower() for i in l] for l in attributes]
case = [ i.lower() for i in case]
if len(case) != len(attribs):
return False
for j,i in enumerate(case):
if i not in attribs[j]:
return False
return True
```
```
def outcome_finder(hypothesis, case):
not_question_mark_indices = [ j for j,i in enumerate(hypothesis) if i != '?' ]
for i in not_question_mark_indices :
if case[i] != hypothesis[i]:
return "No"
return 'Yes'
```
```
import random, pandas
records= []
for _ in range(100):
case = [ random.choice(i) for i in attributes]
if is_case_valid(attributes, case):
records.append( [ ','.join(case), outcome_finder(hypothesis,case)] )
data_frame = pandas.DataFrame(records)
data_frame
is_case_valid(attributes, ['Sunny', 'Warm', 'Normal', 'Strong', 'Warm', 'Same'])
#all possible cases possible
from itertools import product
all_cases = [combination for combination in product(*attributes)]
records= []
for case in all_cases:
if is_case_valid(attributes, case):
records.append( [ ','.join(case), outcome_finder(hypothesis,case)] )
data_frame = pandas.DataFrame(records)
data_frame
```
| github_jupyter |
# Collaboration and Competition
---
You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!
### 1. Start the Environment
Run the next code cell to install a few packages. This line will take a few minutes to run!
```
!pip -q install ./python
```
The environment is already saved in the Workspace and can be accessed at the file path provided below.
```
from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name="/data/Tennis_Linux_NoVis/Tennis")
```
Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
```
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
```
### 2. Examine the State and Action Spaces
Run the code cell below to print some information about the environment.
```
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
```
### 3. Take Random Actions in the Environment
In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
Note that **in this coding environment, you will not be able to watch the agents while they are training**, and you should set `train_mode=True` to restart the environment.
```
for i in range(5): # play game for 5 episodes
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
```
When finished, you can close the environment.
```
#env.close()
```
### 4. It's Your Turn!
Now it's your turn to train your own agent to solve the environment! A few **important notes**:
- When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
```python
env_info = env.reset(train_mode=True)[brain_name]
```
- To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook.
- In this coding environment, you will not be able to watch the agents while they are training. However, **_after training the agents_**, you can download the saved model weights to watch the agents on your own machine!
```
def train(n_episodes=5000, max_t=1000, print_every=100):
scores_deque = deque(maxlen=100)
scores_list = []
window_avgs = []
max_score = 0
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
scores = np.zeros(num_agents)
agents.reset()
for t in range(max_t):
actions = agents.act(states)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations # get all next states for each agents
rewards = env_info.rewards # get all rewards for each agents
dones = env_info.local_done # get all finished status for each agent
agents.step(states, actions, rewards, next_states, dones)
states = next_states
scores += rewards
if np.any(dones):
break
scores_deque.append(np.max(scores))
scores_list.append(np.max(scores))
window_avg = np.mean(scores_deque) # calculate average from score window
window_avgs.append(window_avg)
print('\rEpisode {}\tAverage Score: {:.4f}'.format(i_episode, window_avg), end="")
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.4f}'.format(i_episode, window_avg))
if window_avg >= 0.5:
print('\nProblem Solved in {} episodes.\tAverage Score: {:.4f}'.format(i_episode, window_avg))
torch.save(agents.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agents.critic_local.state_dict(), 'checkpoint_critic.pth')
break
return scores_list, window_avgs
from collections import deque
import matplotlib.pyplot as plt
import torch
from maddpg_agent import Agent
agents = Agent(24,2,2,42)
scores, window_avgs = train()
# finally we can plot the scores as shown in the lectures
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.plot(np.arange(1, len(window_avgs)+1), window_avgs)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.savefig('maddqn_scores.png', bbox_inches='tight')
plt.show()
env.close()
```
| github_jupyter |
```
# from google.colab import drive
# drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]].type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx].type("torch.DoubleTensor"))
label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
class Focus(nn.Module):
def __init__(self):
super(Focus, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, padding=0)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=6, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=12, out_channels=32, kernel_size=3, padding=0)
self.fc1 = nn.Linear(1014, 512)
self.fc2 = nn.Linear(512, 64)
# self.fc3 = nn.Linear(512, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc3 = nn.Linear(64,1)
def forward(self,z): #y is avg image #z batch of list of 9 images
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
y = y.to("cuda")
x = x.to("cuda")
for i in range(9):
x[:,i] = self.helper(z[:,i])[:,0]
x = F.softmax(x,dim=1)
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
return x, y
def helper(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = (F.relu(self.conv2(x)))
# print(x.shape)
# x = (F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# x = F.relu(self.fc3(x))
# x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
focus_net = Focus().double()
focus_net = focus_net.to("cuda")
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, padding=0)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=6, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, padding=0)
self.fc1 = nn.Linear(1014, 512)
self.fc2 = nn.Linear(512, 64)
# self.fc3 = nn.Linear(512, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc3 = nn.Linear(64,3)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = (F.relu(self.conv2(x)))
# print(x.shape)
# x = (F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# x = F.relu(self.fc3(x))
# x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
classify = Classification().double()
classify = classify.to("cuda")
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer_classify = optim.Adam(classify.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
optimizer_focus = optim.Adam(focus_net.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
col1.append(0)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
nos_epochs = 200
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for epoch in range(nos_epochs): # loop over the dataset multiple times
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
running_loss = 0.0
epoch_loss = []
cnt=0
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
# zero the parameter gradients
optimizer_focus.zero_grad()
optimizer_classify.zero_grad()
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss = criterion(outputs, labels)
loss.backward()
optimizer_focus.step()
optimizer_classify.step()
running_loss += loss.item()
mini = 60
if cnt % mini == mini-1: # print every 40 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
epoch_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
if epoch % 5 == 0:
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
if(np.mean(epoch_loss) <= 0.005):
break;
if epoch % 5 == 0:
# focus_net.eval()
# classify.eval()
col1.append(epoch+1)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
#************************************************************************
#testing data set
with torch.no_grad():
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
print('Finished Training')
# torch.save(focus_net.state_dict(),"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/"+name+"_focus_net.pt")
# torch.save(classify.state_dict(),"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/"+name+"_classify.pt")
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = col1
df_train[columns[1]] = col2
df_train[columns[2]] = col3
df_train[columns[3]] = col4
df_train[columns[4]] = col5
df_train[columns[5]] = col6
df_train[columns[6]] = col7
df_test[columns[0]] = col1
df_test[columns[1]] = col8
df_test[columns[2]] = col9
df_test[columns[3]] = col10
df_test[columns[4]] = col11
df_test[columns[5]] = col12
df_test[columns[6]] = col13
df_train
# plt.figure(12,12)
plt.plot(col1,col2, label='argmax > 0.5')
plt.plot(col1,col3, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.show()
plt.plot(col1,col4, label ="focus_true_pred_true ")
plt.plot(col1,col5, label ="focus_false_pred_true ")
plt.plot(col1,col6, label ="focus_true_pred_false ")
plt.plot(col1,col7, label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.savefig("train_ftpt.pdf", bbox_inches='tight')
plt.show()
df_test
# plt.figure(12,12)
plt.plot(col1,col8, label='argmax > 0.5')
plt.plot(col1,col9, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.title("On Testing set")
plt.show()
plt.plot(col1,col10, label ="focus_true_pred_true ")
plt.plot(col1,col11, label ="focus_false_pred_true ")
plt.plot(col1,col12, label ="focus_true_pred_false ")
plt.plot(col1,col13, label ="focus_false_pred_false ")
plt.title("On Testing set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.savefig("test_ftpt.pdf", bbox_inches='tight')
plt.show()
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
max_alpha =[]
alpha_ftpt=[]
argmax_more_than_half=0
argmax_less_than_half=0
for i, data in enumerate(test_loader):
inputs, labels,fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
alphas, avg = focus_net(inputs)
outputs = classify(avg)
mx,_ = torch.max(alphas,1)
max_alpha.append(mx.cpu().detach().numpy())
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if (focus == fore_idx[j] and predicted[j] == labels[j]):
alpha_ftpt.append(alphas[j][focus].item())
max_alpha = np.concatenate(max_alpha,axis=0)
print(max_alpha.shape)
plt.figure(figsize=(6,6))
_,bins,_ = plt.hist(max_alpha,bins=50,color ="c")
plt.title("alpha values histogram")
plt.savefig("alpha_hist.pdf")
plt.figure(figsize=(6,6))
_,bins,_ = plt.hist(np.array(alpha_ftpt),bins=50,color ="c")
plt.title("alpha values in ftpt")
plt.savefig("alpha_hist_ftpt.pdf")
```
| github_jupyter |
```
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
# warfarin data import
wd = pd.read_csv('../warfarin_data.csv', header=None)
# format data
X = wd.to_numpy()
# correct dosage bucket (arm)
y = X[:,-2]
# correct dosage exact amount (continuous var)
y_val = X[:,-1]
# features
X = X[:,:-2]
N = X.shape[0]
#X = np.concatenate((np.ones((N,1)),X),axis=1)
k = X.shape[1]
# number of arms
arms = np.unique(y).astype(int)
n_arms = arms.shape[0]
# standardize X
X = (X - X.mean(axis=0))/X.std(axis=0)
xbar = X.mean(axis=0)
def make_col_vec(x):
N = x.shape[0]
return x.reshape((N,1))
# 0-1 Loss
def loss01(y_pull, y_true):
return (y_pull == y_true)*(-1.) + 1
# empirical losses
yloss = np.zeros((N,n_arms))
for y_pull in arms:
yloss[:,y_pull] = loss01(y_pull,y)
# linear arm priors
mu = 1 - (-1*yloss + 1).sum(axis=0)/len(yloss)
# additional Bayesian parameters
rho2 =.5 # strictness of prior
s2 = .0001 # variance of errors
p = 1 # precision (1/variance) of prior betas
# Gaussian posterior and sampling
def hard_post(Omega, nu, s2, xbar, mu, y, x):
Omega_tilde = Omega + np.outer(x - xbar, x - xbar)/s2
nu_tilde = np.linalg.inv(Omega_tilde) @ (Omega @ nu - (y - mu)/s2 * (x-xbar))
return [Omega_tilde, nu_tilde]
def soft_post(Omega, nu, s2, y, x):
x = np.concatenate([np.array([1]),x])
Omega_tilde = Omega + np.outer(x,x)/s2
nu_tilde = np.linalg.inv(Omega_tilde) @ (Omega @ nu - y/s2 * x)
return [Omega_tilde, nu_tilde]
def post(Omega, nu, s2, xbar, mu, y, x, soft=True):
if soft:
return soft_post(Omega, nu, s2, y, x)
else:
return hard_post(Omega, nu, s2, xbar, mu, y, x)
def draw_norm(Omega, nu):
cOi = np.linalg.inv(np.linalg.cholesky(Omega))
return nu + cOi @ np.random.normal(size=Omega.shape[0])
# initial priors
Omega0_hard = p*np.eye(k)
nu0_hard = np.zeros((k,))
OmegaH = [Omega0_hard for i in range(n_arms)]
nuH = [nu0_hard for i in range(n_arms)]
Omega0_soft = np.concatenate(
[make_col_vec(np.concatenate(
[np.array([xbar @ Omega0_hard @ xbar + rho2]), Omega0_hard @ xbar])),
np.concatenate(
[make_col_vec(Omega0_hard @ xbar).T, Omega0_hard], axis=0)],
axis=1)
OmegaS = [Omega0_soft for i in range(n_arms)]
nuS = [np.concatenate([np.array([xbar @ nu0_hard - mu[i]]), nu0_hard])
for i in range(n_arms)]
Omega0_vanilla = p*np.eye(k+1)
nu0_vanilla = np.zeros((k+1,))
OmegaV = [Omega0_vanilla for i in range(n_arms)]
nuV = [nu0_vanilla for i in range(n_arms)]
def Thompson(y, x, Omega, nu, xbar, mu, n_arms, soft=True):
betas = [draw_norm(Omega[i],nu[i]) for i in range(n_arms)]
if soft:
fits = [betas[i][0] + np.dot(x,betas[i][1:]) for i in range(n_arms)]
else:
beta0s = [mu[i] - np.dot(xbar, betas[i]) for i in range(n_arms)]
fits = [beta0s[i] + np.dot(x, betas[i]) for i in range(n_arms)]
arm = np.argmin(fits)
regret = y[arm]
return [arm, regret]
draw_norm(OmegaS[0], nuS[0])
ss = 500
aa = np.zeros((ss,))
for i in range(ss):
aa[i] = Thompson(yloss[0,:],xbar,OmegaH, nuH, xbar, mu, n_arms, soft=False)[0]
print((aa==0).sum()/ss)
print((aa==1).sum()/ss)
print((aa==2).sum()/ss)
[nuS[0][0], nuS[1][0], nuS[2][0]]
def RunThompson(y, X, Omega, nu, s2, xbar, mu, n_arms, soft=True):
N = X.shape[0]
r = np.zeros((N,2)) # results vector
for i in range(N):
[arm, regret] = Thompson(y[i,:], X[i,:],
Omega, nu, xbar, mu, n_arms, soft=soft)
[Omega[arm], nu[arm]] = post(Omega[arm], nu[arm], s2, xbar,
mu[arm], y[i,arm], X[i,:], soft=soft)
r[i,:] = [arm, regret]
return r
def shuffle(N, replace=False):
idx = np.random.choice(N, size=(N,), replace=replace)
return idx
nsim = 1
res_hard = []
res_vanilla = []
res_soft = []
best_arm = []
for i in range(nsim):
idx = shuffle(N)
res_hard.append(RunThompson(yloss[idx,:], X[idx,:], OmegaH, nuH, s2,
xbar, mu, n_arms, soft=False))
res_vanilla.append(RunThompson(yloss[idx,:], X[idx,:], OmegaV, nuV, s2,
xbar, mu, n_arms, soft=True))
res_soft.append(RunThompson(yloss[idx,:], X[idx,:], OmegaS, nuS, s2,
xbar, mu, n_arms, soft=True))
best_arm.append(yloss[idx,:].argmin(axis=1))
def make_frame(res, i, label='none'):
x = pd.DataFrame(res_hard[i])
x.reset_index(inplace=True)
x.columns = ['Iteration','Decision','Regret']
x['Best'] = best_arm[i]
x['Cumulative Regret'] = x['Regret'].cumsum()
x['Frac Regret'] = x['Cumulative Regret']/(1+x['Iteration'])
x['Permutation'] = i
x['Bandit'] = label
x = x[['Permutation','Bandit','Iteration','Decision','Best','Regret','Cumulative Regret','Frac Regret']]
return x
th = pd.concat([make_frame(res_hard, i, label='Thompson Hard') for i in range(nsim)])
ts = pd.concat([make_frame(res_soft, i, label='Thompson Soft') for i in range(nsim)])
tv = pd.concat([make_frame(res_vanilla, i, label='Thompson Vanilla') for i in range(nsim)])
def thompsonagg(t):
tagg = t.groupby('Iteration')['Frac Regret'].agg(
['mean',lambda x: np.quantile(x,.025),lambda x: np.quantile(x,.975)]).reset_index()
tagg.columns = ['Iteration','Avg','Lower','Upper']
return tagg
thagg = thompsonagg(th)
tsagg = thompsonagg(ts)
tvagg = thompsonagg(tv)
def thompsonplot(t, label):
plt.plot(t['Iteration'],t['Avg'], label=label)
plt.fill_between(t['Iteration'],t['Lower'],t['Upper'])
thompsonplot(thagg, 'Hard')
thompsonplot(tsagg, 'Soft')
thompsonplot(tvagg, 'Vanilla')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Frac Incorrect')
plt.show()
th.head(30)
```
| github_jupyter |
# Introduction: Anomaly Detection
Anomaly detection is a technique used to identify unusual patterns that do not conform to expected behavior, called outliers. It has many applications in business, from intrusion detection (identifying strange patterns in network traffic that could signal a hack) to system health monitoring (spotting a malignant tumor in an MRI scan), and from fraud detection in credit card transactions to fault detection in operating environments.
In this jupyter notebook we are going to take the credit card fraud detection as the case study for understanding this concept in detail.
## What Are Anomalies?
In data mining, anomaly detection (also outlier detection) is the identification of rare items, events or observations which raise suspicions by differing significantly from the majority of the data.
Anomalies can be broadly categorized as:
***Point anomalies:*** A single instance of data is anomalous if it's too far off from the rest. Business use case: Detecting credit card fraud based on "amount spent."
***Contextual anomalies:*** The abnormality is context specific. This type of anomaly is common in time-series data. Business use case: Spending $100 on food every day during the holiday season is normal, but may be odd otherwise.
***Collective anomalies:*** A set of data instances collectively helps in detecting anomalies. Business use case: Someone is trying to copy data form a remote machine to a local host unexpectedly, an anomaly that would be flagged as a potential cyber attack.
- Anomaly detection is similar to — but not entirely the same as — noise removal and novelty detection.
- ***Novelty detection*** is concerned with identifying an unobserved pattern in new observations not included in training data like a sudden interest in a new channel on YouTube during Christmas, for instance.
- ***Noise removal (NR)*** is the process of removing noise from an otherwise meaningful signal.
## 1. Anomaly Detection Techniques
#### Simple Statistical Methods
The simplest approach to identifying irregularities in data is to flag the data points that deviate from common statistical properties of a distribution, including mean, median, mode, and quantiles. Let's say the definition of an anomalous data point is one that deviates by a certain standard deviation from the mean. Traversing mean over time-series data isn't exactly trivial, as it's not static. You would need a rolling window to compute the average across the data points. Technically, this is called a ***rolling average or a moving average***, and it's intended to smooth short-term fluctuations and highlight long-term ones. Mathematically, an n-period simple moving average can also be defined as a ***"low pass filter."***
#### Challenges with Simple Statistical Methods
The low pass filter allows you to identify anomalies in simple use cases, but there are certain situations where this technique won't work. Here are a few:
- The data contains noise which might be similar to abnormal behavior, because the boundary between normal and abnormal behavior is often not precise.
- The definition of abnormal or normal may frequently change, as malicious adversaries constantly adapt themselves. Therefore, the threshold based on moving average may not always apply.
- The pattern is based on seasonality. This involves more sophisticated methods, such as decomposing the data into multiple trends in order to identify the change in seasonality.
## 2. Machine Learning-Based Approaches
Below is a brief overview of popular machine learning-based techniques for anomaly detection.
#### a.Density-Based Anomaly Detection
Density-based anomaly detection is based on the k-nearest neighbors algorithm.
Assumption: Normal data points occur around a dense neighborhood and abnormalities are far away.
The nearest set of data points are evaluated using a score, which could be Eucledian distance or a similar measure dependent on the type of the data (categorical or numerical). They could be broadly classified into two algorithms:
***K-nearest neighbor***: k-NN is a simple, non-parametric lazy learning technique used to classify data based on similarities in distance metrics such as Eucledian, Manhattan, Minkowski, or Hamming distance.
***Relative density of data***: This is better known as local outlier factor (LOF). This concept is based on a distance metric called reachability distance.
#### b.Clustering-Based Anomaly Detection
Clustering is one of the most popular concepts in the domain of unsupervised learning.
Assumption: Data points that are similar tend to belong to similar groups or clusters, as determined by their distance from local centroids.
***K-means*** is a widely used clustering algorithm. It creates 'k' similar clusters of data points. Data instances that fall outside of these groups could potentially be marked as anomalies.
#### c.Support Vector Machine-Based Anomaly Detection
- A support vector machine is another effective technique for detecting anomalies.
- A SVM is typically associated with supervised learning, but there are extensions (OneClassCVM, for instance) that can be used to identify anomalies as an unsupervised problems (in which training data are not labeled).
- The algorithm learns a soft boundary in order to cluster the normal data instances using the training set, and then, using the testing instance, it tunes itself to identify the abnormalities that fall outside the learned region.
- Depending on the use case, the output of an anomaly detector could be numeric scalar values for filtering on domain-specific thresholds or textual labels (such as binary/multi labels).
In this jupyter notebook we are going to take the credit card fraud detection as the case study for understanding this concept in detail using the following Anomaly Detection Techniques namely
#### Isolation Forest Anomaly Detection Algorithm
#### Density-Based Anomaly Detection (Local Outlier Factor)Algorithm
#### Support Vector Machine Anomaly Detection Algorithm
## Credit Card Fraud Detection
## Problem Statement:
The Credit Card Fraud Detection Problem includes modeling past credit card transactions with the knowledge of the ones that turned out to be fraud. This model is then used to identify whether a new transaction is fraudulent or not. Our aim here is to detect 100% of the fraudulent transactions while minimizing the incorrect fraud classifications.
#### DataSet :
The dataset that is used for credit card fraud detection is derived from the following Kaggle URL :
https://www.kaggle.com/mlg-ulb/creditcardfraud
#### Observations
- The data set is highly skewed, consisting of 492 frauds in a total of 284,807 observations. This resulted in only 0.172% fraud cases. This skewed set is justified by the low number of fraudulent transactions.
- The dataset consists of numerical values from the 28 ‘Principal Component Analysis (PCA)’ transformed features, namely V1 to V28. Furthermore, there is no metadata about the original features provided, so pre-analysis or feature study could not be done.
- The ‘Time’ and ‘Amount’ features are not transformed data.
- There is no missing value in the dataset.
## Preprocessing
### Import Libraries
```
import numpy as np
import pandas as pd
import sklearn
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from pylab import rcParams
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import plotly.figure_factory as ff
from plotly.offline import init_notebook_mode, iplot
data = pd.read_csv('../input/creditcard.csv',sep=',')
print(data.columns)
data1= data.sample(frac = 0.1,random_state=1)
data1.shape
data.describe()
```
## Exploratory Data Analysis
```
data.shape
```
Let us now check the missing values in the dataset
```
data.isnull().values.any()
data.head()
count_classes = pd.value_counts(data['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction Class Distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency");
```
Determine the number of fraud and valid transactions in the entire dataset.
```
Fraud = data[data['Class']==1]
Normal = data[data['Class']==0]
Fraud.shape
Normal.shape
```
How different are the amount of money used in different transaction classes?
```
Fraud.Amount.describe()
Normal.Amount.describe()
```
Let's have a more graphical representation of the data
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(Fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(Normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
plt.show();
```
Do fraudulent transactions occur more often during certain time frame ? Let us find out with a visual representation.
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Time of transaction vs Amount by class')
ax1.scatter(Fraud.Time, Fraud.Amount)
ax1.set_title('Fraud')
ax2.scatter(Normal.Time, Normal.Amount)
ax2.set_title('Normal')
plt.xlabel('Time (in Seconds)')
plt.ylabel('Amount')
plt.show()
init_notebook_mode(connected=True)
plotly.offline.init_notebook_mode(connected=True)
# Create a trace
trace = go.Scatter(
x = Fraud.Time,
y = Fraud.Amount,
mode = 'markers'
)
data = [trace]
plotly.offline.iplot({
"data": data
})
```
Doesn't seem like the time of transaction really matters here as per above observation.
Now let us take a sample of the dataset for out modelling and prediction
```
data1.shape
```
Plot histogram of each parameter
```
data1.hist(figsize=(20,20))
plt.show()
```
Determine the number of fraud and valid transactions in the dataset.
```
Fraud = data1[data1['Class']==1]
Valid = data1[data1['Class']==0]
outlier_fraction = len(Fraud)/float(len(Valid))
```
Now let us print the outlier fraction and no of Fraud and Valid Transaction cases
```
print(outlier_fraction)
print("Fraud Cases : {}".format(len(Fraud)))
print("Valid Cases : {}".format(len(Valid)))
```
Correlation Matrix
```
correlation_matrix = data1.corr()
fig = plt.figure(figsize=(12,9))
sns.heatmap(correlation_matrix,vmax=0.8,square = True)
plt.show()
```
The above correlation matrix shows that none of the V1 to V28 PCA components have any correlation to each other however if we observe Class has some form positive and negative correlations with the V components but has no correlation with Time and Amount.
Get all the columns from the dataframe
```
columns = data1.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Class"]]
# Store the variable we are predicting
target = "Class"
# Define a random state
state = np.random.RandomState(42)
X = data1[columns]
Y = data1[target]
X_outliers = state.uniform(low=0, high=1, size=(X.shape[0], X.shape[1]))
# Print the shapes of X & Y
print(X.shape)
print(Y.shape)
```
## Model Prediction
Now it is time to start building the model .The types of algorithms we are going to use to try to do anomaly detection on this dataset are as follows
#### 1. Isolation Forest Algorithm:
One of the newest techniques to detect anomalies is called Isolation Forests. The algorithm is based on the fact that anomalies are data points that are few and different. As a result of these properties, anomalies are susceptible to a mechanism called isolation.
This method is highly useful and is fundamentally different from all existing methods. It introduces the use of isolation as a more effective and efficient means to detect anomalies than the commonly used basic distance and density measures. Moreover, this method is an algorithm with a low linear time complexity and a small memory requirement. It builds a good performing model with a small number of trees using small sub-samples of fixed size, regardless of the size of a data set.
Typical machine learning methods tend to work better when the patterns they try to learn are balanced, meaning the same amount of good and bad behaviors are present in the dataset.
#### How Isolation Forests Work
The Isolation Forest algorithm isolates observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature. The logic argument goes: isolating anomaly observations is easier because only a few conditions are needed to separate those cases from the normal observations. On the other hand, isolating normal observations require more conditions. Therefore, an anomaly score can be calculated as the number of conditions required to separate a given observation.
The way that the algorithm constructs the separation is by first creating isolation trees, or random decision trees. Then, the score is calculated as the path length to isolate the observation.
#### 2. Local Outlier Factor(LOF) Algorithm
The LOF algorithm is an unsupervised outlier detection method which computes the local density deviation of a given data point with respect to its neighbors. It considers as outlier samples that have a substantially lower density than their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically chosen 1) greater than the minimum number of objects a cluster has to contain, so that other objects can be local outliers relative to this cluster, and 2) smaller than the maximum number of close by objects that can potentially be local outliers. In practice, such informations are generally not available, and taking n_neighbors=20 appears to work well in general.
Define the outlier detection methods
```
classifiers = {
"Isolation Forest":IsolationForest(n_estimators=100, max_samples=len(X),
contamination=outlier_fraction,random_state=state, verbose=0),
"Local Outlier Factor":LocalOutlierFactor(n_neighbors=20, algorithm='auto',
leaf_size=30, metric='minkowski',
p=2, metric_params=None, contamination=outlier_fraction),
"Support Vector Machine":OneClassSVM(kernel='rbf', degree=3, gamma=0.1,nu=0.05,
max_iter=-1, random_state=state)
}
```
Fit the model
```
n_outliers = len(Fraud)
for i, (clf_name,clf) in enumerate(classifiers.items()):
#Fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_prediction = clf.negative_outlier_factor_
elif clf_name == "Support Vector Machine":
clf.fit(X)
y_pred = clf.predict(X)
else:
clf.fit(X)
scores_prediction = clf.decision_function(X)
y_pred = clf.predict(X)
#Reshape the prediction values to 0 for Valid transactions , 1 for Fraud transactions
y_pred[y_pred == 1] = 0
y_pred[y_pred == -1] = 1
n_errors = (y_pred != Y).sum()
# Run Classification Metrics
print("{}: {}".format(clf_name,n_errors))
print("Accuracy Score :")
print(accuracy_score(Y,y_pred))
print("Classification Report :")
print(classification_report(Y,y_pred))
```
#### Observations :
- Isolation Forest detected 73 errors versus Local Outlier Factor detecting 97 errors vs. SVM detecting 8516 errors
- Isolation Forest has a 99.74% more accurate than LOF of 99.65% and SVM of 70.09
- When comparing error precision & recall for 3 models , the Isolation Forest performed much better than the LOF as we can see that the detection of fraud cases is around 27 % versus LOF detection rate of just 2 % and SVM of 0%.
- So overall Isolation Forest Method performed much better in determining the fraud cases which is around 30%.
- We can also improve on this accuracy by increasing the sample size or use deep learning algorithms however at the cost of computational expense.We can also use complex anomaly detection models to get better accuracy in determining more fraudulent cases
Now let us look at one particular Deep Learning Algorithm called ***Autoencoders***
## Autoencoders
An autoencoder is a type of artificial neural network used to learn efficient data codings in an unsupervised manner.
The aim of an autoencoder is to learn a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction.
An autoencoder learns to compress data from the input layer into a short code, and then uncompress that code into something that closely matches the original data. This forces the autoencoder to engage in dimensionality reduction, for example by learning how to ignore noise. Some architectures use stacked sparse autoencoder layers for image recognition. The first autoencoder might learn to encode easy features like corners, the second to analyze the first layer's output and then encode less local features like the tip of a nose, the third might encode a whole nose, etc., until the final autoencoder encodes the whole image into a code that matches (for example) the concept of "cat".An alternative use is as a generative model: for example, if a system is manually fed the codes it has learned for "cat" and "flying", it may attempt to generate an image of a flying cat, even if it has never seen a flying cat before.
The simplest form of an autoencoder is a feedforward, non-recurrent neural network very similar to the many single layer perceptrons which makes a multilayer perceptron (MLP) – having an input layer, an output layer and one or more hidden layers connecting them – but with the output layer having the same number of nodes as the input layer, and with the purpose of reconstructing its own inputs (instead of predicting the target value Y given inputs X). Therefore, autoencoders are unsupervised learning models.
### If you like this kernel Greatly Appreciate if you can UPVOTE .Thank you
| github_jupyter |
```
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
import pandas as pd
```
# Estructuras de Datos
#### PANDAS MANEJA ESTRUCTURAS DE DATOS ESPECIALES PARA REPRESENTAR TABLAS Y OTROS TIPOS DE DATOS ORDENADOS
## Series
```
# Es un objeto que representa una serie de datos conectados a una descripción de los mismos
obj = pd.Series([4, 7, -5, 3])
obj
# ATRIBUTOS DE UN OBJETO SERIES
# Valores que guarda
obj.values
# Etiqueta que identifica a cada valor del Series
obj.index
# El Index puede ser especificado en la definición
obj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
obj2
# Podemos seleccionar/asignar un valor de un Series haciendo referencia a su índice
obj2['a'] # Los índices son strings...
obj2[2] # o enteros
obj2['d'] = 2 # Asignación
obj2[['c', 'a', 'd']] # Fancy indexing
# Podemos realizar operaciones que realizamos con ndarrays de NumPy
obj2[obj2 > 0] # Boolean indexing
obj2 * 2 # Multiplicación escalar
np.exp(obj2) # Aplicar ufuncs
# Podemos crear Series desde diccionarios
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj3 = pd.Series(sdata)
obj3
# Incluso podemos especificar el orden en el que queremos que estén los índices
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj4 = pd.Series(sdata, index=states) # California aparece como NaN
obj4
# Podemos hacer operaciones entre Series
obj3
obj4
obj3+obj4 # Los índices se ordenan automáticamente
# Podemos cambiar el nombre tanto de las etiquetas como de los valores del Index
obj4.name = "population" # ¿Qué estamos guardando?
obj.index.name = "state" # ¿A qué pertenecen estos valores?
obj4
```
## DataFrame
```
# CREACIÓN DE DATAFRAMES
# Representan tablas de datos
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2001, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
frame
# Podemos especificar el orden de las columnas al definir un DataFrame
pd.DataFrame(data, columns=['year', 'state', 'pop'])
# Igualmente podemos especificar el orden de los índices
frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],
index=['one', 'two', 'three', 'four', 'five', 'six'])
frame2
# ACCESO Y ASIGNACIÓN
# Podmeos acceder a una columna de dos maneras_
# Por etiqueta
frame2['state']
# Como atributo
frame2.state
# Podemos asignar valores al DataFrame
frame2
frame2['debt'] = 16.6 # Asignación de escalares...
frame2
frame2['debt'] = np.arange(6.) # o de arrays (¡cuidado con la longitud!)
frame2
# NUEVAS COLUMNAS
# Si asignamos a una columna inexsistente, se creará
frame2['eastern'] = frame2.state == 'Ohio'
frame2
# Podemos remover las columnas usando la palabra clave "del"
del frame2['eastern']
frame2.columns # Atributo de la clase DataFrame (es un Index)
# ATRIBUTOS DE UN DATAFRAME
pop = {
'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}
}
frame3 = pd.DataFrame(pop)
frame3
# Atributos "Nombre del índice" y "Nombre de las columnas"
frame3.index.name = 'year'; frame3.columns.name = 'state'
frame3.values # Sólo los valores
frame3 # Valores y etiquetas
```
# Operaciones con Estructuras de Datos
## Indexing
```
obj = pd.Series(np.arange(4.), index=['a', 'b', 'c', 'd'])
obj
# EXISTEN MUCHAS FORMAS DE REFERENCIAR UN VALOR
obj['b'] # Por etiqueta
obj[1] # Por número entero (por posición)
obj[1:3] # Slicing de posiciones (el último valor NO se incluye)
obj['b':'d'] # Slicing de etiquetas (el último valor se incluye)
obj[['b', 'a', 'd']] # Por lista de etiquetas
obj[obj < 2] # Por indexado booleano ("boolean indexing")
# Asignar a un slice modifica el Series original
obj['b': 'c'] = 5
obj
# INDEXING EN DATAFRAMES
data = pd.DataFrame(np.arange(16).reshape((4,4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data
# Básicamente es lo mismo...
data['two'] # Por etiqueta
data[['three', 'one']] # BPor lista de etiquetas
data[:2] # Slicing
data[data['three']>5] # Indexado booleano (máscara es un Series)
# También podemos asignar utilizando cualquiera de los métodos anteriores
data[data < 5] = 0
data
```
## Selection (loc e iloc)
```
# EXISTE UN PROBLEMA: ¿Y SI TENEMOS ETIQUETAS QUE SON NUMÉRICAS?
# ¿HAGO INDEXING POR ETIQUETA O POR NÚMERO?
# UNA BUENA PRÁCTICA ES UTILIZAR LOC() E ILOC()
data
# loc() selecciona por etiquetas
data.loc['Colorado', ['two', 'three']]
# iloc() is para selección por enteros/por posición
data.iloc[2, [3, 0, 1]]
data.iloc[[1, 2, 3], [3, 0, 1]] # No es Fancy Indexing
# TAMBIÉN PODEMOS UTILIZAR SLICES CON ESTAS FUNCIONES
data.loc[:'Utah', 'two']
# No podemos utilizar indexado booleano dentro de iloc
data.iloc[:, :3][data.three > 5] # Filtramos > 5 después
```
## Reindexing
```
# REINDEX EN SERIES
# Podemos definir el índice al crear
obj = pd.Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])
obj
# reindex devuelve un objeto con los índices en un orden escpecífico
obj2 = obj.reindex(['a', 'b', 'c', 'd', 'e']) # Nota: Valores NaN debido a que "e" no existe
obj2
# Podemos lidiar con los valores NaN de la siguiente manera
obj3 = pd.Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])
obj3
# La opción ffill permite copiar valores no NaN anteriores
obj3.reindex(range(6), method='ffill')
# REINDEX EN DATAFRAMES
frame = pd.DataFrame(np.arange(9).reshape((3, 3)),
index=['a', 'c', 'd'],
columns=['Ohio', 'Texas', 'California'])
frame
# Hacer reindex en el índice es los mismo
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
frame2
# Aunque tenemos la posibilidad de hacer reindex por columnas
states = ['Texas', 'Utah', 'California']
frame.reindex(columns=states)
```
## Dropping
```
# DROPPING EN SERIES
obj = pd.Series(np.arange(5), index=['a', 'b', 'c', 'd', 'e'])
obj
# drop crea un nuevo objeto sin el valor cuyo índice se especificó
new_obj = obj.drop('c') # Elimina valor con índice c
new_obj
obj.drop(['d', 'c']) # Elimina valores con índices d y c
# DROPPING EN DATAFRAMES
data = pd.DataFrame(np.arange(16).reshape((4,4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data
# Eliminar filas (por default)
data.drop(['Colorado', 'Ohio'])
# Eliminar columnas
data.drop('two', axis=1)
data.drop(['two', 'four'], axis='columns')
# ¡EL DROP PUEDE NO CREAR UN OBJETO NUEVO!
obj # Antes
obj.drop('c', inplace=True) # Opción inplace=True
obj # Después
```
## Ordenamiento/Sorting
```
# SORTING EN SERIES
obj = pd.Series(range(4), index=['d', 'a', 'b', 'c'])
obj.sort_index() # Ordena lexicográficamente
# SORTING EN DATAFRAMES
frame = pd.DataFrame(np.arange(8).reshape((2,4)),
index=['three', 'one'],
columns=['d', 'a', 'b', 'c'])
frame.sort_index() # Ordenar filas (por default)
frame.sort_index(axis=1) # Ordenar columnas
# Cambia el orden de ordenamiento (ascendente por defecto)
frame.sort_index(axis=1, ascending=False)
# ORDENAR POR VALORES, NO POR ÍNDICE
obj = pd.Series([4, 7, -3, 2])
obj.sort_values()
# En DataFrames, ordenamos usando una columna como criterio
frame = pd.DataFrame({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]})
frame
frame.sort_values(by='b')
```
## Aplicación de Funciones
```
# UFUNCS
# Podemos utilizar las ufuncs de Numpy en Series y DataFrames
frame = pd.DataFrame(np.random.randn(4, 3), columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
frame
# Valor absoluto
np.abs(frame)
# APLICANDO FUNCIONES DE MAPEO
f = lambda x: x.max() - x.min() # Rango de valores
frame.apply(f) # Retorna Series
# Aplicar función a lo largo de columnas (se aplica por defecto a las filas)
frame.apply(f, axis='columns') # Útil para Series de Tiempo
# Podemos retornar DataFrames también
def f(x):
return pd.Series([x.min(), x.max()], index=['min', 'max'])
frame.apply(f) # Retorna DataFrame
```
## Operaciones
```
## ALINEAMIENTO AUTOMÁTICO
# ¿Cómo sabe Pandas cómo ordenar índices de dos objetos al sumarlos?
s1 = pd.Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])
s2 = pd.Series([-2.1, 3.6, -1.5, 4, 3.1], index = ['a', 'c', 'e', 'f', 'g'])
s1
s2
# la f y g están después de e.
s1 + s2 # Hay valores NaN porque no hay índices comunes (d, f, g)
# DATAFRAMES
df1 = pd.DataFrame(np.arange(12.).reshape((3,4)),
columns=list('abcd'))
df2 = pd.DataFrame(np.arange(20.).reshape((4,5)),
columns=list('abcde'))
df2.iloc[1]['b'] = np.nan # Simulamos valores NaN
df1
df2
df1 + df2 # Muchos valores NaN
# Podemos solucionarlo con la opción fill_value
df1.add(df2, fill_value=0)
```
#### En general, es una buena práctica utilizar funciones add, sub, div, etcétera en vez de operadores +, -, /, entre otros
```
# Las funcioens tienen variantes 'r' que intercambian argumentos
df1.sub(df2) # df1 - df2
df1.rsub(df2) # df2 - df1
```
### Operaciones entre Series y DataFrames: Broadcasting
```
frame = pd.DataFrame(np.arange(12.).reshape((4,3)),
columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
series = frame.iloc[1]
series
frame
# ¿Como funciona una resta de un objetos 2D y 1D?
frame - series # ¡¡¡Broadcasting!!!!
# Broadcasting a lo largo de columnas (por defecto es por filas)
series3 = frame['d']
series3
frame
frame.sub(series3, axis='index') # o axis=0, es lo mismo
```
# Calculando Estadísticas
```
df = pd.DataFrame([[1.4, np.nan], [7.1, -4.5],
[np.nan, np.nan], [0.75, -1.3]],
index=['a', 'b', 'c', 'd'],
columns=['one', 'two'])
df
# Sumas
df.sum()
df.sum(axis='columns')
# Valores mínimos y máximos
df.idxmax()
# Acumulaciones
df.cumsum()
```
## Descripción y más información
```
# describe arroja mucha información acerca de un DataFrame
df.describe()
# Descripción de valores no numéricos
obj = pd.Series(['a', 'a', 'b', 'c']*4)
obj
obj.describe()
```
## Valores únicos y Conteo
```
# ¿Qué valores se repiten?
obj = pd.Series(['c','a','d','a','a','b','b','c','c'])
uniques = obj.unique() # Ordenados según se avistaron por primera vez
uniques
# ¿Cuántas veces se repiten esos valores?
obj.value_counts() # Ordenados descendentemente
# Estas funciones igual existen como funciones globales de Pandas
pd.value_counts(obj, sort=False)
# UN PEQUEÑO EJEMPLO: ENCUESTA
# Dado una tabla que muestra las respuestas de diferentes personas...
data = pd.DataFrame({'Qu1': [1,3,4,3,4],
'Qu2': [2,3,1,2,3],
'Qu3': [1,5,2,4,4],})
data
# ...queremos saber cuántas dieron qué respuesta
result = data.apply(pd.value_counts).fillna(0)
result
```
## Correlación y Covarianza
Nota: Instalar pandas_datareader usando la consola:
python3 -m pip install pandas-datareader
```
# Cargamos datos de Yahoo (stock prices and volumes)
import pandas_datareader.data as web
all_data = {ticker: web.get_data_yahoo(ticker)
for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']}
price = pd.DataFrame({ticker: data['Adj Close']
for ticker, data in all_data.items()})
volume = pd.DataFrame({ticker: data['Volume']
for ticker, data in all_data.items()})
# Calculamos el cambio porcentual de los precios (útil en Series de Tiempo)
returns = price.pct_change()
returns.tail()
# Correlación y covarianza (1 v 1)
returns['MSFT'].corr(returns['IBM'])
returns['MSFT'].cov(returns['IBM'])
# Matriz de correlación (Todos v Todos)
returns.corr()
# Matriz de covarianza
returns.cov()
# Correlation 1 v Todos
returns.corrwith(returns['IBM']) # Retorna Series
returns.corrwith(volume) # Retorna DataFrame
```
# Cargado y Guardado de Datos
## CSV
#### Es un formato de guardado de información en texto plano donde los valores se encuentran separados por comas. Es muy frecuente encontrar datasets con este formato.
Nota: En Linux, se utiliza el slash (/) para separar directorios. En Windows se utiliza el backslash (\\)
```
# Carguemos algo de datos
!cat ../examples/ex1.csv # Usa !type en Windows
df = pd.read_csv('../examples/ex1.csv')
df
# Podemos especificar el caracter que se usa para separar datos
pd.read_table('../examples/ex1.csv', sep=',')
# Archivos sin cabecera
!cat ../examples/ex2.csv
pd.read_csv('../examples/ex2.csv', header=None) # Las columnas serán números
# Especifica la cabecera
pd.read_csv('../examples/ex2.csv', names=['a','b','c','d','message'])
# Especifica tanto la cabecera como el índice
names = ['a','b','c','d','message']
pd.read_csv('../examples/ex2.csv', names=names, index_col='message')
# Datos separados por whitespaces
result = pd.read_table('../examples/ex3.txt', sep='\s+')
result # La primera columna es inferida como el Index
# MANEJANDO VALORES NAN
!cat ../examples/ex5.csv
result = pd.read_csv('../examples/ex5.csv')
# result
# Podemos usar una máscara booleana para reeamplzar los valores NaN
pd.isnull(result)
# GUARDANDO DATOS
data = pd.read_csv('../examples/ex5.csv')
data
# Usamos la funcion to_csv
data.to_csv('../examples/out.csv')
!cat ../examples/out.csv
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
```
# Math Concepts for Developers
## Exercise: High-school maths
### Problem 9: Perlin Noise
### Solution by: Lyubomir Tankishev
In this notebook I will show my solution for generating 2D Perlin Noise.
#### Contents
1. [What is Perlin Noise?](#what_is)
2. [How does it work?](#how_does_it_work)
3. [Solution](#my_solution)
1. [Defining the grid](#gen_the_grids)
2. [Generating the gradient vectors](#gen_gradient_vectors)
3. [Generating the distance matrix for the distance vectors](#gen_distance_vectors)
4. [Calculation of the dot products and gradient values](#calculate_gradient)
4. [Putting it all together](#perlin_noise)
5. [Playing around with the code](#lets_play)
6. Next steps
<a id='what_is'></a>
#### 1. What is Perlin Noise
If you are interested in the detailed definition, you can find a lot of detailed information on the internet. That said, in simple terms, Perlin Noise is an algorithm for generation of pseudo random noise with a more organic look & feel to it. This algorithm was created by Ken Perlin in the 1980s to solve the challenge of generating textures with computer algorithm that have a natural look to them. Such procedural generation of textures was needed for the increasing use of computer generated images in the movie industry. Later it was heavily adopted by the computer games as well.
To understand better the context lets first create a 2D fully random image.
```
def fully_random_generation(size):
arr = np.random.rand(size ** 2)
arr = arr.reshape([size, size])
plt.imshow(arr)
plt.gca().invert_yaxis()
plt.title("Fig.1: Fully random noise")
plt.show()
fully_random_generation(256)
```
Now, lets compare the image above with the next one which is generated using the 2D Perlin Noise algorithm that is described in this book. One can easily notice that although there is randomness in the image it is much more natural looking.
<p style="text-align: left;"><b>Fig.2:</b> Perlin Noise</p>
<img src="perlin_noise.png" width="256" height="256" align="left"/>
<a id='how_does_it_work'></a>
#### 2. How does it work?
The 2D Perlin Noise algorithm works by computing the a value for each pixel (cell) in an image (grid of pixels) as a function of the pixels location and a set of gradient vectors. The image is broken down to sub grids and a gradient vector is assigned to the corner of each sub grid. Each sub grid contains a number of pixels. The smaller the grid, the higher the granularity in the final image. Setting a sub grid of 1 pixel will ultimately generate a result similar to random noise.
<img src="https://upload.wikimedia.org/wikipedia/commons/0/09/PerlinNoiseGradientGrid.png" width="512" height="256" align="center"/><p style="text-align: center;"><b>Fig.3:</b>Grid and random gradient vectors <a href="https://en.wikipedia.org/wiki/Perlin_noise">(source)</a></p>
After that the the dot product of the applicable gradient vectors and the distance vectors of that pixel to the corners of the sub grid are computed.
<img src="https://codelirium.com/public/images/perlin-noise/perlin-algo-1.png" width="512" height="256" align="center"/><p style="text-align: center;"><b>Fig.4:</b>Gradient and distance vectors <a href="https://codelirium.com/article/perlin-noise">(source)</a></p>
Finaly the resulting dot products are interpolated to get the resulting gradient value of the selected pixel. This process is repeated for each pixel in the image to generate the final result.
<img src="https://codelirium.com/public/images/perlin-noise/perlin-algo-2.png
" width="512" height="256" align="center"/><p style="text-align: center;"><b>Fig.5:</b>Interpolation of dot products <a href="https://codelirium.com/article/perlin-noise">(source)</a></p>
This process is then repeated for each pixel in the image to generate the final result.
<a id='my_solution'></a>
#### 3. Solution
Now that we know the basics lets try to recreate the algorithm. But first, its disclaimer time :-)
<br>DISCLAMER: Although I understand the concept, my algorithm is likely going to be inefficient and probably can be written much better. Also, there might be some deviations due to uncertainty how to implement some parts of the algorithm.
Pfff..., after I got this off my chest, let's start.
My personal preference is to code in sections defining several functions each implementing a step from the final code.
<a id='gen_the_grids'></a>
<u>A. Defining the grids</u><br>
We start with generating the main grid and splitting it in gradient vector sub grid.
```
# First we will generate the main grid
def gen_main_grid(grid_size: int) -> tuple:
"""
The function returns a tuple of arrays with coordinates for each pixel
"""
side = np.arange(0, grid_size, 1)
x_axis = np.tile(side, grid_size)
y_axis = np.repeat(side, grid_size)
return x_axis, y_axis
# Test: plotting the output
main_grid_size = 12
x, y = gen_main_grid(main_grid_size)
plt.scatter(x, y, marker='o')
plt.show()
# Next we will select the gradient vectors grid size.
# The smaller the number of pixels in each subgrid the the higher the granularity in the final output.
def gen_grad_grid(main_grid_size: int, granularity=1) -> tuple:
"""
This fucnction makes sure that the main grid size can be split to the chosen granularity
"""
while main_grid_size % granularity != 0:
granularity -= 1
grad_grid_size = int(main_grid_size / granularity)
return granularity, grad_grid_size
# Test: Let's plot it
main_grid_size = 12
granularity = 3
x, y = gen_main_grid(main_grid_size)
plt.scatter(x, y, marker='o')
g, g_size = gen_grad_grid(main_grid_size, granularity)
line = np.linspace(0, main_grid_size - 1, 1000)
for i in range(0, g):
data = np.full(1000, g*(i+1) - 0.5)
plt.plot(line, data, 'r')
plt.plot(data, line, 'r')
plt.show()
```
<a id='gen_gradient_vectors'></a>
<u>B. Generating the gradient vectors</u><br>
Next we will generate our gradient vectors. For purpose we will first create a tuple of possible vectors. After that we will randomly select the number of vectors that we need in accordance with the size of our gradient sub grid. <br>
The usual implementation of Perlin Noise uses the following set of four or eight vectors:<br>
$\big\{(1, 1), (-1, 1), (1, -1), (-1, -1)\big\}$; or<br>
$\big\{(1, 1), (-1, 1), (1, -1), (-1, -1), (\sqrt{2}, 0), (0, \sqrt{2}), (-\sqrt{2}, 0), (0, -\sqrt{2})\big\}$<br>
That said i wanted to create I wanted to define my own vectors that are equally spaced in a circle with a radius of $1$ and having values between $-1$ and $1$. Using numpy.linspace we can easily achieve that.
```python
v = np.linspace(0, 2 * np.pi, number_of_unique_vectors + 1)
vx = np.round(np.cos(v), 5)
vy = np.round(np.sin(v), 5)
vectors = tuple(zip(vx, vy))
```
The `+ 1` in the first line is added because $\sin 0$ and $\sin 2\pi$ are the same (similary for $\cos x$). All that is left then is to randomly select a number of vectors for our grid.
```
def generate_gradient_vectors_matrix(number_of_unique_vectors: int, gradient_grid_size: int) -> np.ndarray:
v = np.linspace(0, 2 * np.pi, number_of_unique_vectors + 1)
vy = np.round(np.sin(v), 5)
vx = np.round(np.cos(v), 5)
vectors = tuple(zip(vx, vy))
vector_indexes = list(range(number_of_unique_vectors))
random_indexes = np.random.choice(vector_indexes, size=gradient_grid_size**2)
return np.array([vectors[idx] for idx in random_indexes]).reshape([gradient_grid_size, gradient_grid_size, 2])
# Test: Generating 8 unique vectors and selecting 9 (for a grid of 3x3) for our gradient vector matrix (gmv)
# Run the cell couple of times to see how the chosen vectors change.
gvm = generate_gradient_vectors_matrix(8 , 9)
print(gvm[0])
# print(gvm)
```
<a id='gen_distance_vectors'></a>
<u>C. Generating the distance matrix for the distance vectors</u><br>
In order to generate the distance vectors it will be helpful to create a matrix with the relative distance of each pixel from the gradient vectors that surround it. For that purpose we first need to find the relative index of that pixel and assign a distance value to it for both the $x$ and $y$ coordinates.
We generate the matrix only once as it is applicable for all pixels.
```python
dm = np.linspace(0, 1, grad_grid_size)
```
An then in the *calculate_gradient* fuction we will use it multiple times to get the distance to each corner of the gradient vector grid.
```python
# Calc the relative location of the cell in the dm
xloc, yloc = x % len(dm), y % len(dm)
# Calc the distance vectors (bottom left, bottom right, top left, top right)
dbl = np.array([dm[xloc], dm[yloc]])
dbr = np.array([dm[xloc] - 1, dm[yloc]])
dtl = np.array([dm[xloc], dm[yloc] - 1])
dtr = np.array([dm[xloc] - 1, dm[yloc] - 1])
```
<a id='calculate_gradient'></a>
<u>D. Calculation of the dot products and gradient values</u><br>
Now comes the part where we will compute the gradient values for each pixel. This is done in four steps:
* Find the relative position of the pixel in the gradient sub grid (see above);
* Calculate the distance vector to that position (see above);
* Get the correct gradient vectors from the precomputed set;
* Calculate the dot product of the gradient vectors and distance vectors; and
* Interpolate the results
We will pass to our function the coordinates of the `bottom left` gradient vector (`gvc`) and relative to that one, extract the `other three` vectors from the gradient vector matrix (`gvm`).
```python
gbl = gvm[gvc[0]][gvc[1]]
gbr = gvm[gvc[0] + 1][gvc[1]]
gtl = gvm[gvc[0]][gvc[1] + 1]
gtr = gvm[gvc[0] + 1][gvc[1] + 1]
```
Once we have done that we use the `numpy.dot` built in function to calculate the dot products of the vectors (where `_x` is the respective corner of the gradient sub grid.
```python
dp_x = np.dot(db_x, gb_x)
```
Finaly, we need to interpolate the results to get the gradient value of the pixel. To do that we will use the following equations (where $t_{x}$ is the relative position of the pixel on the x-axis and $t_{y}$ is the relative position of the pixel on the y-axis; we can get both from the `dm` matrix):
$$l_{0}=d_{00}+t_{x}*(d_{01}-d_{00})$$
$$l_{1}=d_{10}+t_{x}*(d_{11}-d_{10})$$
$$gradient=l_{0}+t_{y}*(l_{1}-l_{0})$$
We can use linear interpolation, but the Perlin Noise algorithm uses a S-curve one for better results. For this algorithm we will use the same fade function, namely:
$$6t^5-15t^4+10t^3$$
```
# Defining our smoothing function
def fade_noise(x):
return x**5 * 6 - x**4 * 15 + x**3 * 10
# Taking all of the above into account we get the following function for calulation of the gradent value:
def calc_gradient(x: int, y: int, dm: np.ndarray, gvc: np.ndarray, gvm: list):
"""
This fucntion calculates the gradient value for a given pixel (cell).
Args:
x, y (int): the x and y coordinates of the cell in the main grid
dm (numpy.ndarray): an array with the relative distance matrix for the gradient grid
gvc (numpy.ndarray): an array with the coordinates of the lower left gradient vector for this pixel
gvm (list): the matrix with gradient distance vectors
"""
# Calc the relative location of the cell in the dm
xloc, yloc = x % len(dm), y % len(dm)
# Calc the distance vectors (bottom left, bottom right, top left, top right)
dbl = np.array([dm[xloc], dm[yloc]])
dbr = np.array([dm[xloc] - 1, dm[yloc]])
dtl = np.array([dm[xloc], dm[yloc] - 1])
dtr = np.array([dm[xloc] - 1, dm[yloc] - 1])
# Get the gradient vector (gvc = gradient vector coordinates for the bottom left vector)
gbl = gvm[gvc[0]][gvc[1]]
gbr = gvm[gvc[0] + 1][gvc[1]]
gtl = gvm[gvc[0]][gvc[1] + 1]
gtr = gvm[gvc[0] + 1][gvc[1] + 1]
# Calc dot product
dpbl = np.dot(dbl, gbl)
dpbr = np.dot(dbr, gbr)
dptl = np.dot(dtl, gtl)
dptr = np.dot(dtr, gtr)
l1 = dpbl + (dpbr - dpbl) * fade_noise(dm[xloc])
l2 = dptl + (dptr - dptl) * fade_noise(dm[xloc])
result = l1 + (l2-l1) * fade_noise(dm[yloc])
return result
```
<a id='perlin_noise'></a>
#### 4. Putting it all together
Finally, we can put everything together and we should get a nice image with pseudo random noise.
```
main_grid_size = 256
set_granularity = 3
set_number_unique_vectors = 8
x, y = gen_main_grid(main_grid_size)
granularity, grad_grid_size = gen_grad_grid(main_grid_size, set_granularity)
gvm = generate_gradient_vectors_matrix(set_number_unique_vectors, grad_grid_size)
dm = np.linspace(0, 1, grad_grid_size)
# Calc gradient vectors coordinates for each cell
g_c = np.vectorize(lambda n, s: math.floor(n / s))
gx = np.array(g_c(x, grad_grid_size))
gy = np.array(g_c(y, grad_grid_size))
gc = np.vstack((gx, gy)).T
gradient = [calc_gradient(x[i], y[i], dm, gc[i], gvm) for i in range(main_grid_size ** 2)]
gradient = np.reshape(gradient, [main_grid_size, main_grid_size])
plt.imshow(gradient)
plt.gca().invert_yaxis()
plt.show()
```
<a id='lets_play'></a>
#### 4. Playing around with the algorithm
The first thing to play around with is the granularity (trying the algorithm with different gradient grid sizes). We can notice how increasing the number of sub grids (and gradient thus number of gradient vectors) and effectively reducing the number of pixels in each gradient grid creates a more textured look.
<p style="text-align: center;"><b>Fig.5:</b>Adding more texture</p>
<img src="increasing_the_subgrid.png" align="center"/>
Another cool thing is to overlay several layers of Perlin noise on top of each other. By doing so we can add fine details to our main texture. First we create a base layer. We then create a number of layers each consecutive uitive one with higher granularity but also with lower amplitude of the output values. We can then sum all layers (flatten them) to get a more detailed texture. In the example below I have increase the granularity by 1 for each layer and have decreased the impact of that layer by $\frac{1}{2^{i}}$ (where $i$ is the level of granularity).
<p style="text-align: center;"><b>Fig.6:</b>Adding more detail</p>
<img src="adding_details.png" align="center"/>
Finally, we can combine the two effects above and use the apply the algorithm to generate a fine detail terrain map. In the example below I have generated a 1024 by 1014 pixel grid populated with Perlin Noise and colored with matplotlib's terrain color map. The map is computed with a gradient grid of 4 x 4 and 3 additional layers of fine details.
<p style="text-align: center;"><b>Fig.7:</b>Terrain map</p>
<img src="final_map.png" align="center"/>
I think it is pretty cool!
#### 5. Next steps
There are several areas for improvement of the current algorithm, including:
* Refactoring the code to generate also 3D and 4D noise;
* Creating proper octaves of frequency and amplitude;
* Play around with rotating the preselected gradient vectors to create animated 2D noise.
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20Wildlife%20Localization%20-%20(Extended%20version%20with%20sea%20turtles).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# About the network
1. Paper on EfficientDet: https://arxiv.org/abs/1911.09070
2. Blog 1 on EfficientDet: https://towardsdatascience.com/efficientdet-scalable-and-efficient-object-detection-review-4472ffc34fd9
3. Blog 2 on EfficientDet: https://medium.com/@nainaakash012/efficientdet-scalable-and-efficient-object-detection-ea05ccd28427
# Table of contents
## 1. Installation Instructions
## 2. Use trained model to detect traffic signs in images
## 3. How to train using Lisa Traffic Sign Dataset
# Installation
- Run these commands
- git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
- cd Monk_Object_Detection/10_pytorch_efficientdet/installation
- Select the right requirements file and run
- cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install
```
! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
# For colab use the command below
! cd Monk_Object_Detection/10_pytorch_efficientdet/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install
# Restart colab runtime now
# For Local systems and cloud select the right CUDA version
# ! cd Monk_Object_Detection/10_pytorch_efficientdet/installation && cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install
```
# Use already trained model for demo
```
import os
import sys
sys.path.append("Monk_Object_Detection/10_pytorch_efficientdet/lib/")
from infer_detector import Infer
gtf = Infer();
# Download the pretrained model
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1Wi_1CzKbByTMG8p6h9tLnZYL-v4YvcN-' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1Wi_1CzKbByTMG8p6h9tLnZYL-v4YvcN-" -O wildlife_localization_extended_trained.zip && rm -rf /tmp/cookies.txt
! unzip -qq wildlife_localization_extended_trained.zip
f = open("trained_weights/custom/classes.txt", 'r');
classes_list = f.readlines();
f.close();
for i in range(len(classes_list)):
classes_list[i] = classes_list[i][:len(classes_list[i])-1]
model_path = "trained_weights/custom/efficientdet-d3_trained.pth";
gtf.load_model(model_path, classes_list, use_gpu=True);
gtf.predict("trained_weights/test/9.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/10.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/11.jpg", threshold=0.3);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/12.jpg", threshold=0.3);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/13.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/14.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/15.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("trained_weights/test/16.jpg", threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
```
# Train your own detector
## Dataset credits
- Download link: https://lev.cs.rpi.edu/public/datasets/wild.tar.gz
## Reformat Dataset
```
import os
import sys
if(not os.path.isdir("dataset")):
os.mkdir("dataset");
if(not os.path.isdir("dataset/train")):
os.mkdir("dataset/train");
if(not os.path.isdir("dataset/val")):
os.mkdir("dataset/val");
if(not os.path.isdir("dataset/test")):
os.mkdir("dataset/test");
if(not os.path.isdir("dataset/train/images")):
os.mkdir("dataset/train/images");
if(not os.path.isdir("dataset/train/annos")):
os.mkdir("dataset/train/annos");
if(not os.path.isdir("dataset/val/images")):
os.mkdir("dataset/val/images");
if(not os.path.isdir("dataset/val/annos")):
os.mkdir("dataset/val/annos");
if(not os.path.isdir("dataset/test/images")):
os.mkdir("dataset/test/images");
if(not os.path.isdir("dataset/test/annos")):
os.mkdir("dataset/test/annos");
os.listdir("wild")
os.listdir("wild/ImageSets/Main")
from tqdm import tqdm
f = open("wild/ImageSets/Main/train.txt", 'r');
lines = f.readlines();
f.close();
for i in tqdm(range(len(lines))):
cmd1 = "cp wild/JPEGImages/" + lines[i][:len(lines[i])-1] + ".jpg dataset/train/images/";
cmd2 = "cp wild/Annotations/" + lines[i][:len(lines[i])-1] + ".xml dataset/train/annos/";
#print(cmd1);
#print(cmd2);
os.system(cmd1);
os.system(cmd2);
#break;
from tqdm import tqdm
f = open("wild/ImageSets/Main/val.txt", 'r');
lines = f.readlines();
f.close();
for i in tqdm(range(len(lines))):
cmd1 = "cp wild/JPEGImages/" + lines[i][:len(lines[i])-1] + ".jpg dataset/val/images/";
cmd2 = "cp wild/Annotations/" + lines[i][:len(lines[i])-1] + ".xml dataset/val/annos/";
#print(cmd1);
#print(cmd2);
os.system(cmd1);
os.system(cmd2);
#break;
from tqdm import tqdm
f = open("wild/ImageSets/Main/test.txt", 'r');
lines = f.readlines();
f.close();
for i in tqdm(range(len(lines))):
cmd1 = "cp wild/JPEGImages/" + lines[i][:len(lines[i])-1] + ".jpg dataset/test/images/";
cmd2 = "cp wild/Annotations/" + lines[i][:len(lines[i])-1] + ".xml dataset/test/annos/";
#print(cmd1);
#print(cmd2);
os.system(cmd1);
os.system(cmd2);
#break;
```
### VOC to coco type - Training
```
import os
import sys
import numpy as np
import pandas as pd
import xmltodict
import json
from tqdm.notebook import tqdm
from pycocotools.coco import COCO
root_dir = "dataset/train/";
img_dir = "images/";
anno_dir = "annos/";
files = os.listdir(root_dir + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(root_dir + "/train_labels.csv", index=False);
! pip install dicttoxml
import os
import numpy as np
import cv2
import dicttoxml
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from tqdm import tqdm
import shutil
import json
import pandas as pd
root = "dataset/train/";
img_dir = "images/";
anno_file = "train_labels.csv";
dataset_path = root;
images_folder = root + "/" + img_dir;
annotations_path = root + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root + "/" + anno_file;
output_dataset_path = root;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
#print(image_in_path)
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
```
### VOC to coco type - Val
```
import os
import sys
import numpy as np
import pandas as pd
import xmltodict
import json
from tqdm.notebook import tqdm
from pycocotools.coco import COCO
root_dir = "dataset/val/";
img_dir = "images/";
anno_dir = "annos/";
files = os.listdir(root_dir + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(root_dir + "/train_labels.csv", index=False);
import os
import numpy as np
import cv2
import dicttoxml
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from tqdm import tqdm
import shutil
import json
import pandas as pd
root = "dataset/val/";
img_dir = "images/";
anno_file = "train_labels.csv";
dataset_path = root;
images_folder = root + "/" + img_dir;
annotations_path = root + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root + "/" + anno_file;
output_dataset_path = root;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
#print(image_in_path)
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
```
### VOC to COCO Type - Test
```
import os
import sys
import numpy as np
import pandas as pd
import xmltodict
import json
from tqdm.notebook import tqdm
from pycocotools.coco import COCO
root_dir = "dataset/test/";
img_dir = "images/";
anno_dir = "annos/";
files = os.listdir(root_dir + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(root_dir + "/train_labels.csv", index=False);
import os
import numpy as np
import cv2
import dicttoxml
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from tqdm import tqdm
import shutil
import json
import pandas as pd
root = "dataset/test/";
img_dir = "images/";
anno_file = "train_labels.csv";
dataset_path = root;
images_folder = root + "/" + img_dir;
annotations_path = root + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root + "/" + anno_file;
output_dataset_path = root;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
#print(image_in_path)
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
```
## Training
```
import os
import sys
sys.path.append("Monk_Object_Detection/10_pytorch_efficientdet/lib/")
from train_detector import Detector
gtf = Detector();
f = open("dataset/train/annotations/classes.txt", 'r');
classes_list = f.readlines();
f.close();
for i in range(len(classes_list)):
classes_list[i] = classes_list[i][:len(classes_list[i])-1]
len(classes_list)
root_dir = "dataset/";
coco_dir = "train/";
img_dir = "";
set_dir = "images";
gtf.set_train_dataset(root_dir, coco_dir, img_dir, set_dir, classes_list=classes_list, batch_size=2, num_workers=4)
root_dir = "dataset/";
coco_dir = "val/";
img_dir = "";
set_dir = "images";
gtf.set_val_dataset(root_dir, coco_dir, img_dir, set_dir)
#Available models
"efficientdet-d0.pth"
"efficientdet-d1.pth"
"efficientdet-d2.pth"
"efficientdet-d3.pth"
"efficientdet-d4.pth"
"efficientdet-d5.pth"
"efficientdet-d6.pth"
"efficientdet-d7.pth"
gtf.set_model(model_name="efficientdet-d3.pth", num_gpus=1, freeze_head=False)
#Available optimizers
#adamw
#sgd
gtf.set_hyperparams(optimizer="adamw", lr=0.001, es_min_delta=0.0, es_patience=0)
gtf.train(num_epochs=20, val_interval=1, save_interval=1)
```
# Inference on an image
```
import os
import sys
sys.path.append("Monk_Object_Detection/10_pytorch_efficientdet/lib/")
from infer_detector import Infer
gtf = Infer();
f = open("dataset/train/annotations/classes.txt", 'r');
classes_list = f.readlines();
f.close();
for i in range(len(classes_list)):
classes_list[i] = classes_list[i][:len(classes_list[i])-1]
classes_list
model_path = "trained_weights/custom/efficientdet-d3_trained.pth";
gtf.load_model(model_path, classes_list, use_gpu=True);
img_list = os.listdir("dataset/test/images/");
img_list[10]
gtf.predict("dataset/test/images/" + img_list[20], threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("dataset/test/images/" + img_list[30], threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("dataset/test/images/" + img_list[41], threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("dataset/test/images/" + img_list[51], threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
gtf.predict("dataset/test/images/" + img_list[90], threshold=0.8);
from IPython.display import Image
Image(filename='output.jpg')
```
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
<!--NAVIGATION-->
< [IPython: Beyond Normal Python](01.00-IPython-Beyond-Normal-Python.ipynb) | [Contents](Index.ipynb) | [Keyboard Shortcuts in the IPython Shell](01.02-Shell-Keyboard-Shortcuts.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/01.01-Help-And-Documentation.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# Help and Documentation in IPython
If you read no other section in this chapter, read this one: I find the tools discussed here to be the most transformative contributions of IPython to my daily workflow.
When a technologically-minded person is asked to help a friend, family member, or colleague with a computer problem, most of the time it's less a matter of knowing the answer as much as knowing how to quickly find an unknown answer.
In data science it's the same: searchable web resources such as online documentation, mailing-list threads, and StackOverflow answers contain a wealth of information, even (especially?) if it is a topic you've found yourself searching before.
Being an effective practitioner of data science is less about memorizing the tool or command you should use for every possible situation, and more about learning to effectively find the information you don't know, whether through a web search engine or another means.
One of the most useful functions of IPython/Jupyter is to shorten the gap between the user and the type of documentation and search that will help them do their work effectively.
While web searches still play a role in answering complicated questions, an amazing amount of information can be found through IPython alone.
Some examples of the questions IPython can help answer in a few keystrokes:
- How do I call this function? What arguments and options does it have?
- What does the source code of this Python object look like?
- What is in this package I imported? What attributes or methods does this object have?
Here we'll discuss IPython's tools to quickly access this information, namely the ``?`` character to explore documentation, the ``??`` characters to explore source code, and the Tab key for auto-completion.
## Accessing Documentation with ``?``
The Python language and its data science ecosystem is built with the user in mind, and one big part of that is access to documentation.
Every Python object contains the reference to a string, known as a *doc string*, which in most cases will contain a concise summary of the object and how to use it.
Python has a built-in ``help()`` function that can access this information and prints the results.
For example, to see the documentation of the built-in ``len`` function, you can do the following:
```ipython
In [1]: help(len)
Help on built-in function len in module builtins:
len(...)
len(object) -> integer
Return the number of items of a sequence or mapping.
```
Depending on your interpreter, this information may be displayed as inline text, or in some separate pop-up window.
Because finding help on an object is so common and useful, IPython introduces the ``?`` character as a shorthand for accessing this documentation and other relevant information:
```ipython
In [2]: len?
Type: builtin_function_or_method
String form: <built-in function len>
Namespace: Python builtin
Docstring:
len(object) -> integer
Return the number of items of a sequence or mapping.
```
This notation works for just about anything, including object methods:
```ipython
In [3]: L = [1, 2, 3]
In [4]: L.insert?
Type: builtin_function_or_method
String form: <built-in method insert of list object at 0x1024b8ea8>
Docstring: L.insert(index, object) -- insert object before index
```
or even objects themselves, with the documentation from their type:
```ipython
In [5]: L?
Type: list
String form: [1, 2, 3]
Length: 3
Docstring:
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
```
Importantly, this will even work for functions or other objects you create yourself!
Here we'll define a small function with a docstring:
```ipython
In [6]: def square(a):
....: """Return the square of a."""
....: return a ** 2
....:
```
Note that to create a docstring for our function, we simply placed a string literal in the first line.
Because doc strings are usually multiple lines, by convention we used Python's triple-quote notation for multi-line strings.
Now we'll use the ``?`` mark to find this doc string:
```ipython
In [7]: square?
Type: function
String form: <function square at 0x103713cb0>
Definition: square(a)
Docstring: Return the square of a.
```
This quick access to documentation via docstrings is one reason you should get in the habit of always adding such inline documentation to the code you write!
## Accessing Source Code with ``??``
Because the Python language is so easily readable, another level of insight can usually be gained by reading the source code of the object you're curious about.
IPython provides a shortcut to the source code with the double question mark (``??``):
```ipython
In [8]: square??
Type: function
String form: <function square at 0x103713cb0>
Definition: square(a)
Source:
def square(a):
"Return the square of a"
return a ** 2
```
For simple functions like this, the double question-mark can give quick insight into the under-the-hood details.
If you play with this much, you'll notice that sometimes the ``??`` suffix doesn't display any source code: this is generally because the object in question is not implemented in Python, but in C or some other compiled extension language.
If this is the case, the ``??`` suffix gives the same output as the ``?`` suffix.
You'll find this particularly with many of Python's built-in objects and types, for example ``len`` from above:
```ipython
In [9]: len??
Type: builtin_function_or_method
String form: <built-in function len>
Namespace: Python builtin
Docstring:
len(object) -> integer
Return the number of items of a sequence or mapping.
```
Using ``?`` and/or ``??`` gives a powerful and quick interface for finding information about what any Python function or module does.
## Exploring Modules with Tab-Completion
IPython's other useful interface is the use of the tab key for auto-completion and exploration of the contents of objects, modules, and name-spaces.
In the examples that follow, we'll use ``<TAB>`` to indicate when the Tab key should be pressed.
### Tab-completion of object contents
Every Python object has various attributes and methods associated with it.
Like with the ``help`` function discussed before, Python has a built-in ``dir`` function that returns a list of these, but the tab-completion interface is much easier to use in practice.
To see a list of all available attributes of an object, you can type the name of the object followed by a period ("``.``") character and the Tab key:
```ipython
In [10]: L.<TAB>
L.append L.copy L.extend L.insert L.remove L.sort
L.clear L.count L.index L.pop L.reverse
```
To narrow-down the list, you can type the first character or several characters of the name, and the Tab key will find the matching attributes and methods:
```ipython
In [10]: L.c<TAB>
L.clear L.copy L.count
In [10]: L.co<TAB>
L.copy L.count
```
If there is only a single option, pressing the Tab key will complete the line for you.
For example, the following will instantly be replaced with ``L.count``:
```ipython
In [10]: L.cou<TAB>
```
Though Python has no strictly-enforced distinction between public/external attributes and private/internal attributes, by convention a preceding underscore is used to denote such methods.
For clarity, these private methods and special methods are omitted from the list by default, but it's possible to list them by explicitly typing the underscore:
```ipython
In [10]: L._<TAB>
L.__add__ L.__gt__ L.__reduce__
L.__class__ L.__hash__ L.__reduce_ex__
```
For brevity, we've only shown the first couple lines of the output.
Most of these are Python's special double-underscore methods (often nicknamed "dunder" methods).
### Tab completion when importing
Tab completion is also useful when importing objects from packages.
Here we'll use it to find all possible imports in the ``itertools`` package that start with ``co``:
```
In [10]: from itertools import co<TAB>
combinations compress
combinations_with_replacement count
```
Similarly, you can use tab-completion to see which imports are available on your system (this will change depending on which third-party scripts and modules are visible to your Python session):
```
In [10]: import <TAB>
Display all 399 possibilities? (y or n)
Crypto dis py_compile
Cython distutils pyclbr
... ... ...
difflib pwd zmq
In [10]: import h<TAB>
hashlib hmac http
heapq html husl
```
(Note that for brevity, I did not print here all 399 importable packages and modules on my system.)
### Beyond tab completion: wildcard matching
Tab completion is useful if you know the first few characters of the object or attribute you're looking for, but is little help if you'd like to match characters at the middle or end of the word.
For this use-case, IPython provides a means of wildcard matching for names using the ``*`` character.
For example, we can use this to list every object in the namespace that ends with ``Warning``:
```ipython
In [10]: *Warning?
BytesWarning RuntimeWarning
DeprecationWarning SyntaxWarning
FutureWarning UnicodeWarning
ImportWarning UserWarning
PendingDeprecationWarning Warning
ResourceWarning
```
Notice that the ``*`` character matches any string, including the empty string.
Similarly, suppose we are looking for a string method that contains the word ``find`` somewhere in its name.
We can search for it this way:
```ipython
In [10]: str.*find*?
str.find
str.rfind
```
I find this type of flexible wildcard search can be very useful for finding a particular command when getting to know a new package or reacquainting myself with a familiar one.
<!--NAVIGATION-->
< [IPython: Beyond Normal Python](01.00-IPython-Beyond-Normal-Python.ipynb) | [Contents](Index.ipynb) | [Keyboard Shortcuts in the IPython Shell](01.02-Shell-Keyboard-Shortcuts.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/01.01-Help-And-Documentation.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| github_jupyter |
# Similarity Evaluation Analysis (SEA) Dataset C
```
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
print('Libraries imported!!')
#define directory of functions and actual directory
HOME_PATH = '' #home path of the project
FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/PRIVACY'
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for univariate resemblance analisys
from similarity_evaluation import scale_data
from similarity_evaluation import pairwise_euclidean_distance
from similarity_evaluation import hausdorff_distance
from similarity_evaluation import rts_similarity
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
```
## 1. Read real and synthetic datasets
In this part real and synthetic datasets are read.
```
#Define global variables
DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']
SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']
FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/C_Obesity_Data_Real_Train.csv',
'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/C_Obesity_Data_Synthetic_GM.csv',
'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/C_Obesity_Data_Synthetic_SDV.csv',
'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/C_Obesity_Data_Synthetic_CTGAN.csv',
'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/C_Obesity_Data_Synthetic_WGANGP.csv'}
categorical_columns = ['Gender','family_history_with_overweight','FAVC','CAEC','SMOKE','SCC','CALC','MTRANS','Obesity_level']
data = dict()
#iterate over all datasets filepaths and read each dataset
for name, path in FILEPATHS.items() :
data[name] = pd.read_csv(path)
for col in categorical_columns :
data[name][col] = data[name][col].astype('category').cat.codes
data
```
## 2. Normalize data
```
#Scale the data
num_cols = (data['Real'].select_dtypes(include=['int64','float64'])).columns
scaled_data = dict()
for name in DATA_TYPES :
scaled_data[name] = scale_data(data[name][num_cols])
scaled_data
```
## 3. Calculate the Euclidean distances between each pair of values
```
distances_values = dict()
for name in SYNTHESIZERS :
#distances = distance.cdist(scaled_data[name].values, real_data_scaled, 'euclidean')
distances_values[name] = pairwise_euclidean_distance(scaled_data[name].values, scaled_data['Real'].values)
distances_values
```
## 4. Calculate the Hausdorff distance between synthetic data and real data
```
hausdorff_values = dict()
for name in SYNTHESIZERS :
hausdorff_values[name] = hausdorff_distance(scaled_data[name].values, scaled_data['Real'].values)
hausdorff_values
```
## 5. Calculate maximum RTS similarity
```
str_values = dict()
for name in SYNTHESIZERS :
str_values[name] = rts_similarity(scaled_data[name].values, scaled_data['Real'].values)
str_values
```
| github_jupyter |
# Pre-processing and training LDA
The purpose of this tutorial is to show you how to pre-process text data, and how to train the LDA model on that data. This tutorial will **not** explain you the LDA model, how inference is made in the LDA model, and it will not necessarily teach you how to use Gensim's implementation. There are plenty of resources for all of those things, but what is somewhat lacking is a hands-on tutorial that helps you train an LDA model with good results... so here is my contribution towards that.
I have used a corpus of NIPS papers in this tutorial, but if you're following this tutorial just to learn about LDA I encourage you to consider picking a corpus on a subject that you are familiar with. Qualitatively evaluating the output of an LDA model is challenging and can require you to understand the subject matter of your corpus (depending on your goal with the model).
I would also encourage you to consider each step when applying the model to your data, instead of just blindly applying my solution. The different steps will depend on your data and possibly your goal with the model.
In the following sections, we will go through pre-processing the data and training the model.
> **Note:**
>
> This tutorial uses the nltk library, although you can replace it with something else if you want. Python 3 is used, although Python 2.7 can be used as well.
In this tutorial we will:
* Load data.
* Pre-process data.
* Transform documents to a vectorized form.
* Train an LDA model.
If you are not familiar with the LDA model or how to use it in Gensim, I suggest you read up on that before continuing with this tutorial. Basic understanding of the LDA model should suffice. Examples:
* Gentle introduction to the LDA model: http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/
* Gensim's LDA API documentation: https://radimrehurek.com/gensim/models/ldamodel.html
* Topic modelling in Gensim: http://radimrehurek.com/topic_modeling_tutorial/2%20-%20Topic%20Modeling.html
## Data
We will be using some papers from the NIPS (Neural Information Processing Systems) conference. NIPS is a machine learning conference so the subject matter should be well suited for most of the target audience of this tutorial.
You can download the data from Sam Roweis' website (http://www.cs.nyu.edu/~roweis/data.html).
Note that the corpus contains 1740 documents, and not particularly long ones. So keep in mind that this tutorial is not geared towards efficiency, and be careful before applying the code to a large dataset.
Below we are simply reading the data.
```
# Read data.
import os
# Folder containing all NIPS papers.
data_dir = 'nipstxt/'
# Folders containin individual NIPS papers.
yrs = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
dirs = ['nips' + yr for yr in yrs]
# Read all texts into a list.
docs = []
for yr_dir in dirs:
files = os.listdir(data_dir + yr_dir)
for filen in files:
# Note: ignoring characters that cause encoding errors.
with open(data_dir + yr_dir + '/' + filen, errors='ignore') as fid:
txt = fid.read()
docs.append(txt)
```
## Pre-process and vectorize the documents
Among other things, we will:
* Split the documents into tokens.
* Lemmatize the tokens.
* Compute bigrams.
* Compute a bag-of-words representation of the data.
First we tokenize the text using a regular expression tokenizer from NLTK. We remove numeric tokens and tokens that are only a single character, as they don't tend to be useful, and the dataset contains a lot of them.
```
# Tokenize the documents.
from nltk.tokenize import RegexpTokenizer
# Split the documents into tokens.
tokenizer = RegexpTokenizer(r'\w+')
for idx in range(len(docs)):
docs[idx] = docs[idx].lower() # Convert to lowercase.
docs[idx] = tokenizer.tokenize(docs[idx]) # Split into words.
# Remove numbers, but not words that contain numbers.
docs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
docs = [[token for token in doc if len(token) > 1] for doc in docs]
```
We use the WordNet lemmatizer from NLTK. A lemmatizer is preferred over a stemmer in this case because it produces more readable words. Output that is easy to read is very desirable in topic modelling.
```
# Lemmatize the documents.
from nltk.stem.wordnet import WordNetLemmatizer
# Lemmatize all words in documents.
lemmatizer = WordNetLemmatizer()
docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]
```
We find bigrams in the documents. Bigrams are sets of two adjacent words. Using bigrams we can get phrases like "machine_learning" in our output (spaces are replaced with underscores); without bigrams we would only get "machine" and "learning".
Note that in the code below, we find bigrams and then add them to the original data, because we would like to keep the words "machine" and "learning" as well as the bigram "machine_learning".
Note that computing n-grams of large dataset can be very computationally intentensive and memory intensive.
```
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
```
We remove rare words and common words based on their *document frequency*. Below we remove words that appear in less than 20 documents or in more than 50% of the documents. Consider trying to remove words only based on their frequency, or maybe combining that with this approach.
```
# Remove rare and common tokens.
from gensim.corpora import Dictionary
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 20 documents, or more than 50% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.5)
```
Finally, we transform the documents to a vectorized form. We simply compute the frequency of each word, including the bigrams.
```
# Vectorize data.
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
```
Let's see how many tokens and documents we have to train on.
```
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
```
## Training
We are ready to train the LDA model. We will first discuss how to set some of the training parameters.
First of all, the elephant in the room: how many topics do I need? There is really no easy answer for this, it will depend on both your data and your application. I have used 10 topics here because I wanted to have a few topics that I could interpret and "label", and because that turned out to give me reasonably good results. You might not need to interpret all your topics, so you could use a large number of topics, for example 100.
The `chunksize` controls how many documents are processed at a time in the training algorithm. Increasing chunksize will speed up training, at least as long as the chunk of documents easily fit into memory. I've set `chunksize = 2000`, which is more than the amount of documents, so I process all the data in one go. Chunksize can however influence the quality of the model, as discussed in Hoffman and co-authors [2], but the difference was not substantial in this case.
`passes` controls how often we train the model on the entire corpus. Another word for passes might be "epochs". `iterations` is somewhat technical, but essentially it controls how often we repeat a particular loop over each document. It is important to set the number of "passes" and "iterations" high enough.
I suggest the following way to choose iterations and passes. First, enable logging (as described in many Gensim tutorials), and set `eval_every = 1` in `LdaModel`. When training the model look for a line in the log that looks something like this:
2016-06-21 15:40:06,753 - gensim.models.ldamodel - DEBUG - 68/1566 documents converged within 400 iterations
If you set `passes = 20` you will see this line 20 times. Make sure that by the final passes, most of the documents have converged. So you want to choose both passes and iterations to be high enough for this to happen.
We set `alpha = 'auto'` and `eta = 'auto'`. Again this is somewhat technical, but essentially we are automatically learning two parameters in the model that we usually would have to specify explicitly.
```
# Train LDA model.
from gensim.models import LdaModel
# Set training parameters.
num_topics = 10
chunksize = 2000
passes = 20
iterations = 400
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
%time model = LdaModel(corpus=corpus, id2word=id2word, chunksize=chunksize, \
alpha='auto', eta='auto', \
iterations=iterations, num_topics=num_topics, \
passes=passes, eval_every=eval_every)
```
We can compute the topic coherence of each topic. Below we display the average topic coherence and print the topics in order of topic coherence.
Note that we use the "Umass" topic coherence measure here (see docs, https://radimrehurek.com/gensim/models/ldamodel.html#gensim.models.ldamodel.LdaModel.top_topics), Gensim has recently obtained an implementation of the "AKSW" topic coherence measure (see accompanying blog post, http://rare-technologies.com/what-is-topic-coherence/).
If you are familiar with the subject of the articles in this dataset, you can see that the topics below make a lot of sense. However, they are not without flaws. We can see that there is substantial overlap between some topics, others are hard to interpret, and most of them have at least some terms that seem out of place. If you were able to do better, feel free to share your methods on the blog at http://rare-technologies.com/lda-training-tips/ !
```
top_topics = model.top_topics(corpus, num_words=20)
# Average topic coherence is the sum of topic coherences of all topics, divided by the number of topics.
avg_topic_coherence = sum([t[1] for t in top_topics]) / num_topics
print('Average topic coherence: %.4f.' % avg_topic_coherence)
from pprint import pprint
pprint(top_topics)
```
## Things to experiment with
* `no_above` and `no_below` parameters in `filter_extremes` method.
* Adding trigrams or even higher order n-grams.
* Consider whether using a hold-out set or cross-validation is the way to go for you.
* Try other datasets.
If you have other ideas, feel free to comment on http://rare-technologies.com/lda-training-tips/.
## Where to go from here
* Check out a RaRe blog post on the AKSW topic coherence measure (http://rare-technologies.com/what-is-topic-coherence/).
* pyLDAvis (https://pyldavis.readthedocs.io/en/latest/index.html).
* Read some more Gensim tutorials (https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials).
* If you haven't already, read [1] and [2] (see references).
## References
1. "Latent Dirichlet Allocation", Blei et al. 2003.
2. "Online Learning for Latent Dirichlet Allocation", Hoffman et al. 2010.
| github_jupyter |
```
from IPython.display import YouTubeVideo
```
# MSE 8900 / CHE 8450 - Multiscale Modeling
Rachel B. Getman, Sapna Sarupria, Ulf D. Schiller
Clemson University
## Lecture 3.6: Coupling Molecular Dynamics and Lattice Boltzmann
1. Hybrid LB-MD Coupling
2. Boundary Conditions
### References
1. Sauro Succi, _The Lattice Boltzmann Equation for Complex States of Flowing Matter_, Oxford University Press, 2018.
2. B. Dünweg and A. J. C. Ladd, Lattice Boltzmann Simualtions of Soft Matter, _Adv. Polym. Sci._ **221**, 89, 2009.
### From Newton to Navier-Stokes

### Fluid-Particle Coupling
* bead-spring model
* bond potential (FENE)

* elastic surface with marker points
* force coupling at the boundary

### Molecular Dynamics (MD)
* evolution equation for phase space vector $\Gamma$
\begin{equation*}
\Gamma(t) = e^{i\mathcal{L} t} \Gamma(0)
\end{equation*}
* Liouville operator ($\vec{F}_i$ conservative forces)
\begin{equation*}
i \mathcal{L} = \sum_i \left( \frac{\vec{p}_i}{m_i} \cdot \frac{\partial}{\partial\vec{r}_i} + \vec{F}_i \cdot \frac{\partial}{\partial\vec{p}_i}\right) = i\mathcal{L}_{\vec{r}} + i\mathcal{L}_{\vec{p}}
\end{equation*}
* updates for positions and momenta
\begin{align*}
e^{ i\mathcal{L}_{\vec{r}} \Delta t } \vec{r}_i(t) &= \vec{r}_i(t) + \frac{\Delta t}{m_i} \vec{p}_i &
e^{ i\mathcal{L}_{\vec{p}} \Delta t } \vec{p}_i(t) &= \vec{p}_i(t) + \Delta t \vec{F}_i
\end{align*}
* Trotter formula
\begin{equation*}
e^{ i ( \mathcal{L}_1 + \mathcal{L}_2 ) t } = \lim_{n\rightarrow\infty} \left[ e^{ \frac{i\mathcal{L}_2 t}{2n} } e^{ \frac{i \mathcal{L}_1 t}{n} } e^{ \frac{i\mathcal{L}_2 t}{2n} } \right]^n
\end{equation*}
* Verlet splitting
\begin{equation*}
e^{ i(\mathcal{L}_1 + \mathcal{L}_2) \Delta t } = e^{ i\mathcal{L}_2 \frac{\Delta t}{2} } e^{ i\mathcal{L}_1 \Delta t} e^{ i\mathcal{L}_2 \frac{\Delta t}{2}} + \mathcal{O}(\Delta t^3)
\end{equation*}
### Mapping between particles and lattice Boltzmann

* interpolation scheme for velocity
\begin{equation*}\label{eq:IBM-interpolation}
\vec{u}(\vec{R}_i,t) = \mathcal{I}_a[\vec{R}_i(t)] \, \vec{u} (\vec{x},t) = \sum_{\vec{x}} \vec{u}(\vec{x},t) \delta_a(\vec{R}_i(t)-\vec{x})
\end{equation*}
* spreading of momentum transfer
\begin{equation*}\label{eq:IBM-spreading}
\vec{F}(\vec{x},t) = \mathcal{I}_a^*[\vec{R}_i(t)] \, \vec{F}_i (t) = a^{-3} \sum_{i} \vec{F}_i(t) \delta_a(\vec{R}_i(t)-\vec{x})
\end{equation*}
\begin{equation*}
-\frac{\Delta t}{a^3}\vec{F} = {\Delta\vec{j}} = \frac{\mu}{a^2h} \sum_{\vec{x}\in\text{Cell}}\sum_i \Delta f_i(\vec{x},t)\vec{c}_i
\end{equation*}
### Spatial interpolation functions
* three-dimensional discrete $\delta$ function
\begin{align*}
\sum_{\vec{x}} \delta_a(\vec{x} - \vec{R}) &= 1 , \\ %\quad \forall \vec{R} \\
\sum_{\vec{x}} \vec{x} \, \delta_a(\vec{x} - \vec{R}) &= \vec{R} ,%\quad \forall \vec{R} \\
\end{align*}
* force and torque conservation
\begin{align*}
\sum_{\vec{x}} a^3 \vec{F}(\vec{x},t)
&= \sum_{\vec{x}} \sum_i \vec{F}_i(t) \delta_a(\vec{x} - \vec{R}_i)
= \sum_i \vec{F}_i(t) , \\
\sum_{\vec{x}} \vec{x} \times a^3 \vec{F}(\vec{x},t)
&= \sum_{\vec{x}} \sum_i \vec{x} \times \vec{F}_i(t) \delta_a(\vec{x} - \vec{R}_i)
= \sum_i \vec{R}_i \times \vec{F}_i(t) .
\end{align*}
### Spatial interpolation functions
* three-dimensional $\delta$ product of one-dimensional functions
\begin{equation*}
\delta_a(\vec{x}) = \phi(\frac{x}{a}) \phi(\frac{y}{a}) \phi(\frac{z}{a}) ,
\end{equation*}
* three-point interpolation gives smooth $\nabla\vec{u}$
\begin{equation*}
\phi_3(r) =
\begin{cases}
\frac{1}{3} \left( 1 + \sqrt{1 - 3r^2} \right) & 0 \le |r| \le \frac12 \\
\frac{1}{6} \left( 5 - 3|r| - \sqrt{6 |r| - 2 - 3r^2} \right) & \frac12 \le |r| \le \frac32 \\
0 & \frac32 \le |r| .
\end{cases}
\end{equation*}
* compatible with halo thickness of $1$
### Viscous coupling of particles and fluid

* Idea: treat monomers as point particles and apply Stokesian drag
\begin{equation*}
\vec{F}=-\zeta_\text{bare} \left[\vec{V}-\vec{u}(\vec{R},t)\right] + \vec{F}_\text{stoch}
\end{equation*}
* ensure momentum conservation by transferring momentum to the fluid
* add stochastic force to fulfill fluctuation-dissipation relation
### Why do all parts need to be thermalized?
* Equations of motion are stochastic differential equations
* Fokker-Planck formalism
* Kramers-Moyal expansion
\begin{eqnarray*}
%
\text{Particle, conservative}: \quad \mathcal{L}_1 &=& -\sum_i \left( \frac{\partial}{\partial\vec{r}_i}\cdot\frac{\vec{p}_i}{m_i} + \frac{\partial}{\partial \vec{p}_i}\cdot \vec{F}_i \right) \\
%
\text{Particle, Langevin}: \quad \mathcal{L}_2 &=& \sum_i \frac{\Gamma_i}{m_i}\frac{\partial}{\partial \vec{p}_i} \vec{p}_i \\
%
\text{Particle, stochastic}: \quad \mathcal{L}_3 &=& k_BT \sum_i \Gamma_i \frac{\partial^2}{\partial\vec{p}_i^2}
%
\end{eqnarray*}
* Fluctuation-Dissipation relation
$$\left(\sum_i \mathcal{L}_i\right) \exp(-\beta \mathcal{H}) = 0$$
### Why do all parts need to be thermalized?
\begin{eqnarray*}
%
\text{Particle, conservative}: \quad \mathcal{L}_1 &=& -\sum_i \left( \frac{\partial}{\partial\vec{r}_i}\cdot\frac{\vec{p}_i}{m_i} + \frac{\partial}{\partial \vec{p}_i}\cdot \vec{F}_i \right) \\
%
\text{Fluid, conservative:} \quad \mathcal{L}_4 &=& \int d\vec{r} \left( \frac{\delta}{\delta\rho} \partial_\alpha j_\alpha + \frac{\delta}{\delta j_\alpha} \partial_\beta \Pi^\text{eq}_{\alpha\beta} \right) \\
%
\text{Fluid, viscous:} \quad \mathcal{L}_5 &=& \eta_{\alpha\beta\gamma\delta} \int d\vec{r} \frac{\delta}{\delta j_\alpha} \partial_\beta \partial_\gamma u_\delta \\
%
\text{Fluid, stochastic:} \quad \mathcal{L}_6 &=& k_BT\eta_{\alpha\beta\gamma\delta} \int d\vec{r} \frac{\delta}{\delta j_\alpha} \partial_\beta \partial_\gamma \frac{\delta}{\delta j_\delta}\\
%
\text{Particle, coupling:} \quad \mathcal{L}_7 &=& -\sum_i \zeta_i \frac{\partial}{\partial p_{i\alpha}} u_{i\alpha} \\
%
\text{Fluid, coupling:} \quad \mathcal{L}_8 &=& -\sum_i \zeta_i \int d\vec{r} \Delta(\vec{r},\vec{r}_i) \frac{\delta}{\delta j_\alpha(\vec{r})} \left( \frac{p_{i\alpha}}{m_i} - u_{i\alpha} \right) \\
%
\text{Fluid, stochastic:} \quad \mathcal{L}_9 &=& k_BT \sum_i \zeta_i \int d\vec{r} \Delta(\vec{r},\vec{r}_i) \frac{\delta}{\delta j_\alpha(\vec{r})} \int d\vec{r}' \Delta(\vec{r}',\vec{r}_i) \frac{\delta}{\delta j_\alpha(\vec{r}')} \\
%
\text{Particle, stochastic:} \quad \mathcal{L}_{10} &=& -2 k_BT \sum_i \zeta_i \frac{\partial}{\partial p_{i\alpha}} \int d\vec{r} \Delta(\vec{r},\vec{r}_i) \frac{\delta}{\delta j_\alpha(\vec{r})}
%
\end{eqnarray*}
### Coupled equations of motion
* all force based coupling methods can be unified
\begin{align*}
\frac{\partial}{\partial t} \vec{v}_i(t) &= - \frac{1}{m_i} \left[ \zeta \left( \vec{v}_i - \vec{u}(\vec{R}_i,t) \right) - \xi_i - (1-r) \vec{F}_i^\text{int} \right] \\
\frac{\partial}{\partial t} \vec{u}(\vec{R}_i,t) &= \frac{1}{\rho a^3} \left[ \zeta \left( \vec{v}_i - \vec{u}(\vec{R}_i,t) \right) - \xi_i + r \vec{F}_i^\text{int} \right]
\end{align*}
* second-order accurate force scheme $\alpha = \frac{h \zeta}{m_i}$, $\beta = \frac{h \zeta}{\rho a^3}$
\begin{align*}
\vec{v}_i(t+h) &= \vec{v}_i(t) -
\frac{\alpha}{1 + \frac{\alpha}{2}+\frac{\beta}{2}}
\left[ \vec{v}_i(t) - \vec{u}(\vec{R}_i,t) - \frac{1}{\zeta} \xi_i
- \frac{1-r+\frac{\beta}{2}}{\zeta} \vec{F}_i^\text{int} \right]
\\
\vec{u}(\vec{R}_i,t+h) &= \vec{u}(\vec{R}_i,t) +
\frac{\beta}{1 + \frac{\alpha}{2}+\frac{\beta}{2}}
\left[ \vec{v}_i(t) - \vec{u}(\vec{R}_i,t) - \frac{1}{\zeta} \xi_i
+ \frac{r + \frac{\alpha}{2}}{\zeta} \vec{F}_i^\text{int} \right]
\end{align*}
[UDS, Comp. Phys. Comm. 185, 2586-2597 (2014)]
### Unification of forcing schemes
* no-slip boundary condition can be satisfied by the choice
\begin{align*}
\zeta &= \frac{\rho a^3}{h} \frac{2}{1 + \frac{\rho a^3}{m_i}}
= \frac{m_i}{h} \frac{2}{1 + \frac{m_i}{\rho a^3}}
%\rightarrow
%\begin{cases}
%2 \frac{\rho a^3}{h} & \text{for } m_i \gg \rho a^3 \\
%2 \frac{m_i}{h} & \text{for } m_i \ll \rho a^3
%\end{cases}
&r &= \frac{1}{1 + \frac{m_i}{\rho a^3}}
\end{align*}
* $r$ is controlled by ratio of the particle mass $m_i$ and the fluid
mass $\rho a^3$ per unit cell of the lattice
\begin{equation*}
r = \frac{1}{1 + \frac{m_i}{\rho a^3}}
\end{equation*}
* $r$ can be called "immersion number"
\begin{equation*}
\begin{cases}
m_i \gg \rho a^3 \Rightarrow r \rightarrow 0:
& \text{external boundary force (EBF)}\\
m_i \ll \rho a^3 \Rightarrow r \rightarrow 1:
& \text{immersed boundary method (IBM)}
\end{cases}
\end{equation*}
[UDS, Comp. Phys. Comm. 185, 2586-2597 (2014)]
### "Bare" vs. effective friction constant
* the input friction $\zeta_\text{bare}$ is not the real friction
* $D_0 > k_B T / \zeta_\text{bare}$ (due to long time tail)
\begin{align*}
\vec{V} &= \frac{1}{\zeta_\text{bare}} \vec{F} + \vec{u}_{av} \\
\vec{u} &\approx \frac{1}{8 \pi \eta r} \left(
{\mathsf{I}} + \hat{r} \otimes \hat{r} \right) \vec{F} \\
\vec{u}_{av} &= \frac{1}{g \eta a} \vec{F}
\end{align*}
\begin{equation*}
\frac{1}{\zeta_\text{eff}} = \frac{1}{\zeta_\text{bare}}
+ \frac{1}{g \eta a}
\end{equation*}
* Stokes contribution from interpolation with range $a$
* this _regularizes_ the theory (no point particles in nature!)
* $\zeta_\text{bare}$ has no physical meaning!
### Finite size effects
Study diffusion / sedimentation of a single object

* $L = \infty$: $u(r) \sim 1/r$
* $F \sim \eta R v = \eta R^2 (v/R)$
* area $R^2$, shear gradient $v/R$

* $L < \infty$: homogeneous background force (no net acceleration)
* backflow due to momentum conservation
* additional shear gradient $v/L$
* additional force $\eta R^2 (v/L) = \eta R v (R/L)$
* _finite size effect_ $\sim R/L$
### Polymer chain in solution

* bead-spring model
* bond potential (FENE)
$$V_\text{FENE} = - \frac{1}{2} k_\text{FENE} R_0^2 \ln \left[ 1 - \left(
\frac{r}{R_0} \right)^2 \right]$$
* excluded volume (LJ/WCA)
$$V_{\text{LJ}} = 4 \epsilon \left[ \left( \frac{\sigma}{r} \right)^{12} - \left(
\frac{\sigma}{r} \right)^6 + \frac{1}{4} \right], \quad r\leq2^{\frac{1}{6}}\sigma$$
### BD vs. LB for single polymer chain
#### Rouse modes
\begin{equation*}
\vec{X}_p=\frac{1}{N}\sum_{i=1}{N} \vec{r}_i \cos \left[ \frac{p \pi}{N} \left( i - \frac{1}{2} \right) \right]
\end{equation*}

#### Dynamic structure factor
\begin{equation*}
S(k,t) = \frac{1}{N} \sum_{i,j} \exp \left[ i \vec{k} \cdot \left( \vec{r}_i(t) - \vec{r}_j(0) \right) \right]
\end{equation*}

[T. Pham, UDS, J. R. Prakash, B. Duenweg, J. Chem. Phys. 131, 16114 (2009)]
### Finite size scaling
#### Center-of-mass diffusion

#### First Rouse mode $X_1(t)$

[T. Pham, UDS, J. R. Prakash, B. Duenweg, J. Chem. Phys. 131, 16114 (2009)]
### Scaling of the dynamic structure factor
* best data collapse for $z \approx 2.75$
* close to Zimm scaling



### Lattice representation of rigid objects

* determine the points where the surface of the rigid object intersects the lattice links
* surface markers
> "Accounting for these constraints may be trivial under idealized
conditions [...] but generally speaking, it constitutes a very
delicate (and sometimes nerve-probing!) task."
Sauro Succi
### Boundary conditions

* these rules are simple to implement
* but they are **only correct to first order**
* the boundary location is **always midway in between nodes**
### Interpolation boundary conditions

\begin{equation*}
\begin{aligned}
f_{i^-}(\vec{R}_B,t+h) &= 2q f_i^*(\vec{R}_B,t) + (1-2q) f_i^*(\vec{R}_B-h\vec{c}_i,t) , &\qquad& q < \frac{1}{2} , \\
f_{i^-}(\vec{R}_B,t+h) &= \frac{1}{2q} f_i^*(\vec{R}_B,t) + \frac{2q-1}{2q} f_{i^-}^*(\vec{R}_B,t) , &&q \geq \frac{1}{2} .
\end{aligned}
\end{equation*}
[Bouzidi et al., Phys. Fluids 13, 3452 (2001)]
### Multi-reflection boundary conditions

\begin{equation*}
\begin{split}
f_{i^-}(\vec{R}_B,t+h) &= f_i^*(\vec{R}_B,t)
- \frac{1-2q-2q^2}{(1+q)^2} f_{i^-}^*(\vec{R}_B,t)
+ \frac{1-2q-2q^2}{(1+q)^2} f_i^*(\vec{R}-h\vec{c}_i,t)\\
&\qquad - \frac{q^2}{(1+q)^2} f_{i^-}^*(\vec{R}-h\vec{c}_i,t)
+ \frac{q^2}{(1+q)^2} f_i^*(\vec{R}-2h\vec{c}_i,t) .
\end{split}
\end{equation*}
* match Taylor expansion at the boundary with Chapman-Enskog result
* yields a condition for the relaxation rate of the kinetic modes
$$\lambda_g(\lambda_s)=-8\frac{2+\lambda}{8+\lambda}$$
* sometimes called "magic" although there is no magic involved
[Ginzburg and d'Humieres, Phys. Rev. E 68, 066614 (2003)]
### Equilibrium interpolation
* interpolation for equilibrium
* bounce-back for non-equilibrium
* non-equilibrium enters Chapman-Enskog one order later than equilibrium
* still second order accurate!
\begin{equation*}
\begin{aligned}
f_{i^-}^\text{eq}(\vec{R}_B,t+h) &= 2q f_i^\text{eq}(\vec{R}_B,t) + (1-2q) f_i^\text{eq}(\vec{R}_B-h\vec{c}_i,t) \qquad && q<\frac{1}{2} \\
f_{i^-}^\text{eq}(\vec{R}_B,t+h) &= \frac{1-q}{q} f_i^\text{eq}(\vec{R},t) + \frac{2q-1}{q} f_i^\text{eq}(\vec{R}_B+qh\vec{c}_i) && q \geq \frac{1}{2} \\
f_{i^-}^\text{nq}(\vec{R}_B,t+h) &= f_i^\text{nq}(\vec{R}_B,t)
\end{aligned}
\end{equation*}
[Chun and Ladd, Phys. Rev. E 75, 066705 (2007)]
### LBM Summary
* Lattice Boltzmann: lattice kinetic approach to hydrodynamics
* Solid theoretical underpinning
* Coupling Molecular Dynamics and Lattice Boltzmann
* Unification of force coupling schemes
* Applications: polymers, cells, porous media
* consistent thermal fluctuations
* beyond Navier-Stokes: possible but can get complicated
* challenges: non-ideal fluids, multi-phase fluids, thermal flows
### Closing remarks
> "But, as with education in general, simulation must be kept honest,
because seeing is believing, and animated displays can be very
convincing irrespective of their veracity."
D. C. Rapaport, The Art of Molecular Dynamics Simulation
* A bug in the program is always more likely than discovery of new physics.
* Get the right answers for the right reasons!
### Hands-On Activity
1. Implement a coupling between your MD code and the LB code available at https://gist.github.com/uschille/8f65dd40572b2d943409.
2. Simulate a tagged particle in an LB fluid and observe the behavior of the displacement and velocity.
3. Move the particle with a constant velocity through the fluid and measure the drag force on the particle. Can you validate the Stokes-Einstein relation?
4. Choose a few positions on the lattice and repeat the measurement of the drag force when the particle is kept fixed. Does the drag force depend on the chosen position?
### References
1. B. Dünweg, and A. J. C. Ladd. Lattice Boltzmann Simulations of Soft Matter Systems. _Adv. Poly. Sci._ **221**, 89 (2008)
2. C. Aidun and J. Clausen. Lattice-Boltzmann Method for Complex Flows. _Annu. Rev. Fluid Mech._ **42**, 439-472 (2010)
3. UDS, Timm Krüger, and Oliver Henrich. Mesoscopic Modelling and Simulation of Soft Matter. _Soft Matter_ (2017)
4. UDS and O. Kuksenok. Lattice-Boltzmann Modeling of Multicomponent Systems: An Introduction. _Rev. Comput. Chem._ (2017)
5. S. Succi. _The Lattice Boltzmann Equation for Complex States of Flowing Matter_. Oxford University Press (2018)
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
from scipy.optimize import minimize_scalar, minimize
from time import time
import seaborn as sns
import cvxpy as cxv
sns.set_style('darkgrid')
sns.set_context('notebook')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import GaussNoise, SmoothFirstDifference, SparseFirstDiffConvex, Boolean, MarkovChain, SmoothSecondDifference
from osd.utilities import progress
import cvxpy as cvx
from admm_helpers import markov_process_simulator, run_admm
def proj_l2_d0(data, theta=1, c=1):
"""Sum of squares"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum_squares(y) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def proj_l1_d0(data, theta=1, c=1):
"""Sum of squares"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum(cvx.abs(y)) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def proj_l1_d1(data, theta=1, c=1):
"""Sum of absolute value of first difference"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum(cvx.abs(cvx.diff(y, k=1))) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def proj_l1_d2(data, theta=1, c=1):
"""Sum of absolute value of second difference"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum(cvx.abs(cvx.diff(y, k=2))) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def proj_l2_d2(data, theta=1, c=1):
"""Sum of squares of second difference"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum_squares(cvx.diff(y, k=2)) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def proj_l2_d1(data, theta=1, c=1):
"""Sum of squares of first difference"""
x = data
y = cvx.Variable(len(x))
cost = cvx.sum_squares(x - y)
objective = cvx.Minimize(cost)
constraints = [theta * cvx.sum_squares(cvx.diff(y, k=1)) <= c]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
return y.value
def make_data(length, points=None, shifts=None):
if points is None:
points = [0, int(length * 0.2), int(length * 0.55), int(length * 0.85), length]
if shifts is None:
shifts = [0, .5, -0.75, .2]
cp = np.zeros(length)
for ix, shft in enumerate(shifts):
a = points[ix]
b = points[ix + 1]
cp[a:b] = shft
return cp
np.random.seed(4)
T = 200
X_real = np.zeros((2, T))
X_real[0] = 0.15 * np.random.randn(T)
X_real[1] = 5 * proj_l2_d1(np.random.randn(T), theta=3e2)
X_real[1] -= np.average(X_real[1])
# X_real[2] = markov_process_simulator([[0.9, 0.1], [0.1, 0.9]], T=T, plot=False)
y = np.sum(X_real, axis=0)
np.random.seed(25)
remove_ix = np.random.choice(np.arange(len(y)), int(len(y) * 0.1), replace=False)
remove_ix.sort()
use_ix = np.ones_like(y, dtype=bool)
use_ix[remove_ix] = False
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(14, 7))
ax[0].set_title('Smooth component')
ax[0].plot(X_real[1])
# ax[1].set_title('boolean component')
# ax[1].plot(X_real[2])
ax[2].set_title('Observed signal')
ax[2].plot(np.arange(T)[use_ix], y[use_ix], linewidth=1, marker='.')
ax[2].plot(np.arange(T)[~use_ix], y[~use_ix], linewidth=1, marker='x', color='red', ls='none', label='missing data')
ax[2].legend()
# ax[2].plot(signal1 + signal2, label='true signal minus noise', ls='--')
plt.tight_layout()
plt.show()
np.average(np.abs(np.random.randn(1000) * .15))
K = len(components)
fig, ax = plt.subplots(nrows=K+1, sharex=True, figsize=(5.5,2.9))
for k in range(K+1):
if k <= K-1:
ax[k].plot(X_real[k], linewidth=0.75)
ax[k].set_title('component {}'.format(k+1))
else:
ax[k].plot(np.arange(T)[use_ix], y[use_ix], linewidth=1, marker='.', ms=2)
ax[k].plot(np.arange(T)[~use_ix], y[~use_ix], linewidth=1, marker='x', ms=3,
color='red', ls='none', label='missing data', alpha=0.6)
ax[k].set_title('signal, $y$')
ax[k].legend(loc=(1.01, 0.2))
ax[1].set_ylim(-1.25, 1.25)
ax[3].set_ylim(-2, 2)
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/example-components.pgf')
plt.show()
lmbdas = np.logspace(-1, 1, 10)
lambda2 = 10
c1 = GaussNoise()
c2 = SmoothFirstDifference(theta=lambda2)
components = [c1, c2]
problem = Problem(y, components)
problem.decompose(solver='MOSEK')
problem.decompose(admm=True, rho=1, num_iter=100, stop_early=False)#, use_set=use_ix)
sos = np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~use_ix], 2))
sae = np.sum(np.abs((y - np.sum(problem.estimates, axis=0))[~use_ix]))
print('sos: {:.2f}, sae: {:.2f}'.format(sos, sae))
import matplotlib
sns.set_context('paper')
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.figure(figsize=(3.75,2.75))
plt.plot((problem.admm_result['obj_vals']), linewidth=0.75)
plt.axhline((problem.admm_result['best_obj']), color='red', ls='--', label='min obj val', linewidth=0.75)
# plt.yscale('log')
plt.xlabel('iteration')
plt.ylabel('obj val')
# plt.title('ADMM convergence,\nconvex problem')
plt.legend()
fig = plt.gcf()
fig.tight_layout()
fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/converge-convex.pgf')
# ylim = (-0.65, 1.2)
plt.plot((y - np.sum(problem.estimates, axis=0)))
plt.plot(np.arange(len(y))[~use_ix], (y - np.sum(problem.estimates, axis=0))[~use_ix], color='red', marker='.', ls='none')
plt.title('holdout errors')
# plt.ylim(*ylim)
# import matplotlib
# sns.set_context('paper')
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# })
K = len(components)
fs = np.array([5.5,2.9])
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=2*fs)
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(est, label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Component {}'.format(k+1))
else:
ax[k].plot(
np.arange(T)[use_ix],
np.sum(X_real, axis=0)[use_ix],
label='observed', linewidth=0.5, marker='.', color='green', ms=1, alpha=0.5
)
ax[k].plot(
np.arange(T)[~use_ix],
np.sum(X_real, axis=0)[~use_ix],
label='missing', marker='x', color='red', ls='none', alpha=0.5
)
ax[k].plot(np.sum(X_real[1:], axis=0), label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Composed Signal')
ax[k].legend(loc=[1.01, 0.1])
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/simple-example.pgf')
~np.all(np.isclose(np.diff(problem.estimates[-1]), 0))
K = len(components)
fig, ax = plt.subplots(nrows=K+1, sharex=True, figsize=(5.5,2.9))
for k in range(K+1):
if k <= K-1:
ax[k].plot(X_real[k], linewidth=0.75, label='true', alpha=0.75)
ax[k].plot(problem.estimates[k], label='estimate', linewidth=0.75, alpha=0.75)
ax[k].set_title('component {}'.format(k+1))
ax[k].legend(loc=[1.01, 0.2])
else:
ax[k].plot(
np.arange(T)[use_ix],
np.sum(X_real, axis=0)[use_ix],
label='observed', linewidth=0.5, marker='.', color='green', ms=1.5, alpha=0.5
)
ax[k].plot(
np.arange(T)[~use_ix],
np.sum(X_real, axis=0)[~use_ix],
label='missing', marker='x', color='red', ls='none', alpha=0.5, ms=3
)
ax[k].plot(np.sum(X_real[1:], axis=0), label='true', linewidth=0.75, alpha=0.75)
ax[k].plot(problem.estimates[1] + problem.estimates[2], label='estimate',
linewidth=0.75, alpha=0.75)
ax[k].set_title('composed signal')
ax[k].legend(loc=[1.01, 0.0])
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/simple-example-bad.pgf')
```
Using a single test set (20% reserved), wide search, fine grid. Set to run before went to bed
```
# l2s = np.logspace(-0.5,3.5,60)
# l3s = np.logspace(-1.5,1.5,60)
# errors = np.zeros((len(l3s), len(l2s)))
# count_switches = np.zeros((len(l3s), len(l2s)))
# smoothness = np.zeros((len(l3s), len(l2s)))
# counter = 0
# for j, l2 in enumerate(l2s):
# for i, l3 in enumerate(l3s):
# progress(counter, errors.size)
# c1 = GaussNoise()
# c2 = SmoothFirstDifference(theta=l2)
# p = 0.25
# c3 = MarkovChain([[1-p, p], [p, 1-p]], theta=l3)
# components = [c1, c2, c3]
# problem = Problem(y, components)
# problem.decompose(admm=True, rho=1, num_iter=100, use_set=use_ix, verbose=False)
# error = np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~use_ix], 2))
# errors[i, j] = error
# smoothness[i, j] = np.sum(np.power(np.diff(problem.estimates[1]), 2))
# count_switches[i, j] = np.sum(~np.isclose(np.diff(problem.estimates[-1]), 0))
# counter += 1
# progress(counter, errors.size)
# run1 = {
# 'l2': np.copy(l2s),
# 'l3': np.copy(l3s),
# 'error': np.copy(errors),
# 'smoothness': np.copy(smoothness),
# 'switches': np.copy(count_switches)
# }
# with open('validation_run_1.pkl', 'wb') as f:
# pickle.dump(run1, f)
with open('validation_run_1.pkl', 'rb') as f:
run1 = pickle.load(f)
f2 = interp1d(run1['l2'], np.arange(len(run1['l2'])))
f3 = interp1d(run1['l3'], np.arange(len(run1['l3'])))
xticks = [f2(i).item() for i in np.logspace(0, 3, 4)]
yticks = [f3(i).item() for i in np.logspace(-1, 1, 3)]
xticklabels = ['$10^{'+'{}'.format(i)+'}$' for i in range(len(xticks))]
yticklabels = ['$10^{'+'{}'.format(i - 1)+'}$' for i in range(len(yticks))]
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1)
cp = ax.imshow(run1['error'], cmap='plasma')
plt.colorbar(cp)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sos error');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1)
cp = ax.imshow(np.log10(run1['smoothness']), cmap='plasma')
plt.colorbar(cp)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('log of smoothness penalty in 2nd component');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1)
cp = ax.imshow(run1['switches'], cmap='plasma')
plt.colorbar(cp)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('count of switches in 3rd component');
from sklearn.model_selection import KFold
```
Tighten upper and lower bounds a bit, and use Stephens bootstrap resampling strategy. Caution, this takes all afternoon to execute.
```
# l2s = np.logspace(0,3,20)
# l3s = np.logspace(-1,0.5,20)
# num_splits = 20
# hold = 0.2
# splits = []
# for s in range(num_splits):
# remove_ix = np.random.choice(np.arange(len(y)), int(len(y) * hold), replace=False)
# remove_ix.sort()
# use_ix = np.ones_like(y, dtype=bool)
# use_ix[remove_ix] = False
# splits.append(use_ix)
# l2_errors = np.zeros((len(l3s), len(l2s)))
# l1_errors = np.zeros((len(l3s), len(l2s)))
# count_switches = np.zeros((len(l3s), len(l2s)))
# smoothness = np.zeros((len(l3s), len(l2s)))
# counter = 0
# for j, l2 in enumerate(l2s):
# for i, l3 in enumerate(l3s):
# progress(counter, errors.size)
# c1 = GaussNoise()
# c2 = SmoothFirstDifference(theta=l2)
# p = 0.25
# c3 = MarkovChain([[1-p, p], [p, 1-p]], theta=l3)
# components = [c1, c2, c3]
# problem = Problem(y, components)
# sos = 0
# sae = 0
# smth = 0
# count_sw = 0
# for uix in splits:
# problem.decompose(admm=True, rho=1, num_iter=100, use_set=uix, verbose=False)
# # print(np.sum(np.power((y - np.sum(problem.estimates, axis=0))[test_ix], 2)))
# sos += np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~uix], 2))
# sae += np.sum(np.abs((y - np.sum(problem.estimates, axis=0))[~uix]))
# smth =+ np.sum(np.power(np.diff(problem.estimates[1]), 2))
# count_sw +=np.sum(~np.isclose(np.diff(problem.estimates[-1]), 0))
# l2_errors[i, j] = sos / (num_splits * np.sum(~uix))
# l1_errors[i, j] = sae / (num_splits * np.sum(~uix))
# smoothness[i, j] = smth / (num_splits)
# count_switches[i, j] = count_sw / (num_splits)
# counter += 1
# progress(counter, errors.size)
# run2 = {
# 'l2': np.copy(l2s),
# 'l3': np.copy(l3s),
# 'sos': np.copy(l2_errors),
# 'sae': np.copy(l1_errors),
# 'smoothness': np.copy(smoothness),
# 'switches': np.copy(count_switches)
# }
# with open('validation_run_2.pkl', 'wb') as f:
# pickle.dump(run2, f)
with open('validation_run_2.pkl', 'rb') as f:
run2 = pickle.load(f)
from scipy.interpolate import interp1d
f2 = interp1d(run2['l2'], np.arange(len(run2['l2'])))
f3 = interp1d(run2['l3'], np.arange(len(run2['l3'])))
xticks = [f2(i).item() for i in np.logspace(0, 3, 4)]
yticks = [f3(i).item() for i in np.logspace(-1, 0, 2)]
xticklabels = ['$10^{'+'{}'.format(i)+'}$' for i in range(len(xticks))]
yticklabels = ['$10^{'+'{}'.format(i - 1)+'}$' for i in range(len(yticks))]
i = 10
j = 7
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run2['sos'], cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sos error');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run2['sae'], cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sae error');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(np.log10(run2['smoothness']), cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('log of smoothness penalty in 2nd component');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run2['switches'], cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('count of switches in 3rd component');
i = 10
j = 7
X, Y = np.meshgrid(run2['l2'], run2['l3'])
plt.plot(X[i], run2['sae'][i])
plt.xscale('log')
plt.xlabel('$\lambda_2$')
plt.title('error for $\lambda_3={:.2e}$'.format(l3s[i]))
plt.axvline(l2s[j], color='red', ls='--')
plt.figure()
plt.plot(Y[:, j], run2['sae'][:, j])
plt.xscale('log')
plt.xlabel('$\lambda_3$')
plt.axvline(l3s[i], color='red', ls='--')
plt.title('error for $\lambda_2={:.2e}$'.format(l2s[j]))
lambda2 = l2s[j] * .75
lambda3 = l3s[i]
c1 = GaussNoise()
c2 = SmoothFirstDifference(theta=lambda2)
p = 0.25
c3 = MarkovChain([[1-p, p], [p, 1-p]], theta=lambda3)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)#, use_set=use_ix)
sos = np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~use_ix], 2))
sae = np.sum(np.abs((y - np.sum(problem.estimates, axis=0))[~use_ix]))
print('sos: {:.2f}, sae: {:.2f}'.format(sos, sae))
K = len(components)
fs = np.array([5.5,2.9])
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=2*fs)
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(est, label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Component {}'.format(k+1))
else:
ax[k].plot(
np.arange(T)[use_ix],
np.sum(X_real, axis=0)[use_ix],
label='observed', linewidth=0.5, marker='.', color='green', ms=1, alpha=0.5
)
ax[k].plot(
np.arange(T)[~use_ix],
np.sum(X_real, axis=0)[~use_ix],
label='missing', marker='x', color='red', ls='none', alpha=0.5
)
ax[k].plot(np.sum(X_real[1:], axis=0), label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Composed Signal')
ax[k].legend(loc=[1.01, 0.1])
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/simple-example.pgf')
```
Okay, narrow the search space even more, and don't go overkill on the number of bootstraps :/
this is "run 4". "run 3" is in the other notebook and has a higher switching rate
```
np.logspace(np.log10(0.2), np.log10(2), 10)
# l2s = np.logspace(0, 2, 10)
# l3s = np.logspace(-1, 1, 10)
# num_splits = 12
# hold = 0.1
# splits = []
# for s in range(num_splits):
# remove_ix = np.random.choice(np.arange(len(y)), int(len(y) * hold), replace=False)
# remove_ix.sort()
# use_ix = np.ones_like(y, dtype=bool)
# use_ix[remove_ix] = False
# splits.append(use_ix)
# l2_errors = np.zeros((len(l3s), len(l2s)))
# l1_errors = np.zeros((len(l3s), len(l2s)))
# count_switches = np.zeros((len(l3s), len(l2s)))
# smoothness = np.zeros((len(l3s), len(l2s)))
# counter = 0
# for j, l2 in enumerate(l2s):
# for i, l3 in enumerate(l3s):
# progress(counter, l2_errors.size)
# c1 = GaussNoise()
# c2 = SmoothFirstDifference(theta=l2)
# p = 0.25
# c3 = MarkovChain([[1-p, p], [p, 1-p]], theta=l3)
# components = [c1, c2, c3]
# problem = Problem(y, components)
# sos = 0
# sae = 0
# smth = 0
# count_sw = 0
# for uix in splits:
# problem.decompose(admm=True, rho=1, num_iter=100, use_set=uix, verbose=False)
# # print(np.sum(np.power((y - np.sum(problem.estimates, axis=0))[test_ix], 2)))
# sos += np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~uix], 2))
# sae += np.sum(np.abs((y - np.sum(problem.estimates, axis=0))[~uix]))
# smth =+ np.sum(np.power(np.diff(problem.estimates[1]), 2))
# count_sw +=np.sum(~np.isclose(np.diff(problem.estimates[-1]), 0))
# l2_errors[i, j] = sos / (num_splits * np.sum(~uix))
# l1_errors[i, j] = sae / (num_splits * np.sum(~uix))
# smoothness[i, j] = smth / (num_splits)
# count_switches[i, j] = count_sw / (num_splits)
# counter += 1
# progress(counter, l2_errors.size)
# run4 = {
# 'l2': np.copy(l2s),
# 'l3': np.copy(l3s),
# 'sos': np.copy(l2_errors),
# 'sae': np.copy(l1_errors),
# 'smoothness': np.copy(smoothness),
# 'switches': np.copy(count_switches)
# }
# with open('validation_run_4.pkl', 'wb') as f:
# pickle.dump(run4, f)
with open('validation_run_4.pkl', 'rb') as f:
run4 = pickle.load(f)
f2 = interp1d(run4['l2'], np.arange(len(run4['l2'])))
f3 = interp1d(run4['l3'], np.arange(len(run4['l3'])))
xticks = [f2(i).item() for i in np.logspace(0, 2, 3)]
yticks = [f3(i).item() for i in np.logspace(-1, 1, 3)]
xticklabels = ['$10^{'+'{}'.format(i)+'}$' for i in range(len(xticks))]
# yticklabels = [.5, 1]
yticklabels = ['$10^{'+'{}'.format(i - 1)+'}$' for i in range(len(yticks))]
run2['sos'].shape
i = 10
j = 7
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run4['sos'], cmap='plasma')
plt.colorbar(cp)
# ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sos error');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run4['sae'], cmap='plasma')
plt.colorbar(cp)
# ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sae error');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(np.log10(run4['smoothness']), cmap='plasma')
plt.colorbar(cp)
# ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('log of smoothness penalty in 2nd component');
plt.figure()
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(run4['switches'], cmap='plasma')
plt.colorbar(cp)
# ax.scatter(f2(l2s[j]), f3(l3s[i]), color='red')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('count of switches in 3rd component');
X, Y = np.meshgrid(run4['l2'], run4['l3'])
msk = run4['switches'] >= 1
i_best = np.argmin(run4['sae'][msk])
print(
X[msk][i_best],
Y[msk][i_best],
run4['sae'][msk][i_best]
)
import matplotlib
sns.set_context('paper')
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
slct = run4['sae'] <= run4['sae'].ravel()[i_best]*1.02
best_lambda2 = np.max(X[slct])
best_lambda3 = np.min(Y[slct])
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(4,3))
im = np.copy(run4['sae'])
# im[~msk] = np.nan
cp = ax.imshow(im, cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(X.ravel()[i_best]), f3( Y.ravel()[i_best]), color='red', label='min error')
ax.scatter(f2(best_lambda2), f3(best_lambda3), color='orange', label='best value within 2%')
ax.legend(loc=2)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('holdout error');
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/holdout-heatmap.pgf')
X, Y = np.meshgrid(run4['l2'], run4['l3'])
msk = run4['switches'] >= 1
i_best = np.argmin(run4['sos'][msk])
print(
X[msk][i_best],
Y[msk][i_best],
run4['sos'][msk][i_best]
)
slct = run4['sos'] <= run4['sos'].ravel()[i_best]*1.025
best_lambda2 = np.max(X[slct])
best_lambda3 = np.min(Y[slct])
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
im = np.copy(run4['sos'])
# im[~msk] = np.nan
cp = ax.imshow(np.sqrt(im), cmap='plasma')
plt.colorbar(cp)
ax.scatter(f2(X.ravel()[i_best]), f3( Y.ravel()[i_best]), color='red', label='min error')
ax.scatter(f2(best_lambda2), f3(best_lambda3), color='pink', label='best value within 2%')
ax.legend()
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('rms error');
slct = np.isclose(X, 16.984992522418107)
plt.plot(Y[slct], run4['sae'][slct])
# plt.xscale('log')
slct = run4['sae'] <= run4['sae'].ravel()[i_best]*1.05
with sns.axes_style('white'):
fig,ax=plt.subplots(1,1, figsize=(8,6))
cp = ax.imshow(slct, cmap='plasma')
plt.colorbar(cp)
# ax.scatter(f2(X[i_best]), f3(Y[i_best]), color='red')
# ax.scatter(f2(X[i_best]), f3(Y[i_best]*0.5), color='pink')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
ax.invert_yaxis()
plt.xlabel('$\lambda_2$')
plt.ylabel('$\lambda_3$')
plt.title('sae error');
plt.figure()
np.max(X[slct])
np.min(Y[slct])
np.min(Y[slct])
lambda2 = best_lambda2
lambda3 = best_lambda3
c1 = GaussNoise()
c2 = SmoothFirstDifference(theta=lambda2)
p = 0.25
c3 = MarkovChain([[1-p, p], [p, 1-p]], theta=lambda3)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)#, use_set=use_ix)
sos = np.sum(np.power((y - np.sum(problem.estimates, axis=0))[~use_ix], 2))
sae = np.sum(np.abs((y - np.sum(problem.estimates, axis=0))[~use_ix]))
print('sos: {:.2f}, sae: {:.2f}'.format(sos, sae))
K = len(components)
fs = np.array([5.5,2.9])
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=2*fs)
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(est, label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Component {}'.format(k+1))
else:
ax[k].plot(
np.arange(T)[:],
np.sum(X_real, axis=0)[:],
label='observed', linewidth=0.5, marker='.', color='green', ms=1, alpha=0.5
)
# ax[k].plot(
# np.arange(T)[~use_ix],
# np.sum(X_real, axis=0)[~use_ix],
# label='missing', marker='x', color='red', ls='none', alpha=0.5
# )
ax[k].plot(np.sum(X_real[1:], axis=0), label='true', linewidth=0.75, alpha=0.5)
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated', linewidth=0.75, alpha=0.5)
ax[k].set_title('Composed Signal')
ax[k].legend(loc=[1.01, 0.1])
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/simple-example.pgf')
print(lambda2, lambda3)
K = len(components)
fig, ax = plt.subplots(nrows=K+1, sharex=True, figsize=(5.5,2.9))
for k in range(K+1):
if k <= K-1:
ax[k].plot(X_real[k], linewidth=0.75, label='true', alpha=0.75)
ax[k].plot(problem.estimates[k], label='estimate', linewidth=0.75, alpha=0.75)
ax[k].set_title('component {}'.format(k+1))
ax[k].legend(loc=[1.01, 0.2])
else:
ax[k].plot(
np.arange(T),
np.sum(X_real, axis=0),
label='observed', linewidth=0.5, marker='.', color='green', ms=1.5, alpha=0.5
)
ax[k].plot(np.sum(X_real[1:], axis=0), label='true', linewidth=0.75, alpha=0.75)
ax[k].plot(problem.estimates[1] + problem.estimates[2], label='estimate',
linewidth=0.75, alpha=0.75)
ax[k].set_title('composed signal')
ax[k].legend(loc=[1.01, 0.0])
plt.tight_layout(pad=0.05)
# fig.savefig('/Users/bennetmeyers/Documents/Boyd-work/OSD-presentations/April2021/figs/simple-example-final.pgf')
```
| github_jupyter |
## Diameter of a Binary Tree
```
class Node():
def __init__(self, value):
self.value = value
self.left = None
self.right = None
self.p = None
def __str__(self):
return "(" + str(self.value) + ")-"
def height(node):
if node is None:
return 0
lheight = height(node.left)
rheight = height(node.right)
return 1 + max(lheight, rheight)
def diameter(node):
if node is None:
return 0
lh = height(node.left)
rh = height(node.right)
ldiameter = diameter(node.left)
rdiameter = diameter(node.right)
return max(lh + rh + 1, max(ldiameter, rdiameter))
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print(height(root))
print(diameter(root))
```
## Basic Operation
```
def inorder(node):
if node is not None:
print("[", end='')
inorder(node.left)
print("(" + str(node.value) + ")", end='')
inorder(node.right)
print("]", end='')
def insert(root, node):
p = None
cur = root
while cur != None:
p = cur
if node.value < cur.value:
cur = cur.left
else:
cur = cur.right
print("node(" + str(p))
if node.value < p.value:
p.left = node
print("insert at left:" + str(node))
else:
p.right = node
print("insert at right:" + str(node))
node.p = p
root = Node(6)
insert(root, Node(5))
insert(root, Node(7))
insert(root, Node(8))
insert(root, Node(2))
insert(root, Node(5))
inorder(root)
def minimum(node):
cur = node
while cur.left != None:
cur = cur.left
return cur
def search(node, value):
cur = node
while cur != None:
if value == cur.value:
return cur
elif value < cur.value:
cur = cur.left
else:
cur = cur.right
return cur
def successor(node):
if node.right != None:
return minimum(node)
cur = node
p = node.p
while (p != None) and (cur == p.right):
cur = p
p = p.p
return p
root = Node(15)
insert(root, Node(6))
insert(root, Node(18))
insert(root, Node(17))
insert(root, Node(20))
insert(root, Node(3))
insert(root, Node(2))
insert(root, Node(4))
insert(root, Node(7))
insert(root, Node(13))
insert(root, Node(9))
inorder(root)
target = search(root, 13)
print()
print(target)
print(successor(target))
def kth_small(node, k, i):
if node != None:
retNode = kth_small(node.left, k, i)
if retNode != None:
return retNode
i[0] = i[0] + 1
print(i[0], node.value)
if i[0] == k:
print("!")
return node
retNode = kth_small(node.right, k, i)
if retNode != None:
return retNode
return retNode
else:
return None
root = Node(15)
insert(root, Node(6))
insert(root, Node(18))
insert(root, Node(17))
insert(root, Node(20))
insert(root, Node(3))
insert(root, Node(2))
insert(root, Node(4))
insert(root, Node(7))
insert(root, Node(13))
insert(root, Node(9))
print(kth_small(root, 4, [0]))
ret = None
count = 0
def kth_small2(node, k):
global count
global ret
if node != None:
retNode = kth_small2(node.left, k)
if ret != None:
return
count += 1
print(count, node.value)
if count == k:
print("!")
ret = node
return
retNode = kth_small2(node.right, k)
if ret != None:
return
return
else:
return
print(kth_small2(root, 4))
print(ret)
```
| github_jupyter |
```
def Transfer_full():
u1 = input()
def transfer_1():
if u1 == key1['name']:
T1 = key1['Bal_amount']
print(f'\nWelcome{u1},{T1}')
print("""\nWlcome to transfer Window !
Please, Check the details about the receiver Correctly...
NOTE : Once the process is done it cannot be Changed!!!
""")
while True:
TUD = input('Enter the Receiver ID : ')
if TUD == key1['name']:
TUN1 = key1['name']
T1 = T1
TUNA1 = key1['Bal_amount']
print(f'Enter Transfer amount to {TUN1} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key1['Bal_amountl'] = int(TUNA1) + Amount
print("Amount Transfered successfully!!")
TUNA1 = key1['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
def Transfer_full():
def transfer_1():
if u1 == key1['name']:
T1 = key1['Bal_amount']
print(f'\nWelcome{u1},{T1}')
print("""\nWlcome to transfer Window !
Please, Check the details about the receiver Correctly...
NOTE : Once the process is done it cannot be Changed!!!
""")
while True:
TUD = input('Enter the Receiver ID : ')
break
return TUD,u1,T1
TUD,u1,T1 = transfer_1()
x = Transfer_nn(TUD,u1,T1)
def Transfer_nn(TUD,u1,T1):
print(T1)
def tansfer_amount1(T1):
if TUD == key1['name']:
TUN1 = key1['name']
TUNA1 = key1['Bal_amount']
print(f'Enter Transfer amount to {TUN1} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key1['Bal_amount'] = int(TUNA1) + Amount
print("Amount Transfered successfully!!")
TUNA1 = key1['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN1,TUNA1,u1,T1
def tansfer_amount2(T1):
if TUD == key2['name']:
TUN2 = key2['name']
TUNA2 = key2['Bal_amount']
print(f'Enter Transfer amount to {TUN2} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key2['Bal_amount'] = int(TUNA2) + Amount
print("Amount Transfered successfully!!")
TUNA2 = key2['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN2,TUNA2,u1,T1
tansfer_amount1(T1)
tansfer_amount2(T1)
def Transfer_nn(TUD,u1,T1):
print(T1)
def tansfer_amount1(T1):
if TUD == key1['name']:
TUN1 = key1['name']
TUNA1 = key1['Bal_amount']
print(f'Enter Transfer amount to {TUN1} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key1['Bal_amount'] = int(TUNA1) + Amount
print("Amount Transfered successfully!!")
TUNA1 = key1['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN1,TUNA1,u1,T1
def tansfer_amount2(T1):
if TUD == key2['name']:
TUN2 = key2['name']
TUNA2 = key2['Bal_amount']
print(f'Enter Transfer amount to {TUN2} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key2['Bal_amount'] = int(TUNA2) + Amount
print("Amount Transfered successfully!!")
TUNA2 = key2['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN2,TUNA2,u1,T1
def tansfer_amount3(T1):
if TUD == key3['name']:
TUN3 = key3['name']
TUNA3 = key3['Bal_amount']
print(f'Enter Transfer amount to {TUN3} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key3['Bal_amount'] = int(TUNA3) + Amount
print("Amount Transfered successfully!!")
TUNA3 = key3['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN3,TUNA3,u1,T1
def tansfer_amount4(T1):
if TUD == key4['name']:
TUN4 = key4['name']
TUNA4 = key4['Bal_amount']
print(f'Enter Transfer amount to {TUN4} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key4['Bal_amount'] = int(TUNA4) + Amount
print("Amount Transfered successfully!!")
TUNA4 = key4['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN4,TUNA4,u1,T1
def tansfer_amount5(T1):
if TUD == key5['name']:
TUN5 = key5['name']
TUNA5 = key5['Bal_amount']
print(f'Enter Transfer amount to {TUN5} !')
while True:
try:
Amount = int(input('Amount : '))
except ValueError:
print('Please enter only respective values!')
else:
if T1 < Amount :
print('Insufficient Balance !\n')
print('Try again!!!')
else:
key5['Bal_amount'] = int(TUNA5) + Amount
print("Amount Transfered successfully!!")
TUNA5 = key5['Bal_amount']
T1 -= Amount
print(f'Current balance of {u1} is {T1}')
break
return TUN5,TUNA5,u1,T1
tansfer_amount1(T1)
tansfer_amount2(T1)
tansfer_amount3(T1)
tansfer_amount4(T1)
tansfer_amount5(T1)
```
| github_jupyter |
```
from collections import defaultdict
from itertools import chain
from pathlib import Path
from pprint import pprint
from uuid import uuid4
import json
import os
import re
import requests
import shutil
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from schematics.types import IntType, StringType, ListType
%cd /home/ubuntu/hidebound/python
import hidebound.core.tools as tools
from hidebound.core.parser import AssetNameParser
from hidebound.core.database import Database
import hidebound.core.database_tools as db_tools
from hidebound.core.database_test import DatabaseTests
from hidebound.core.specification_base import *
from hidebound.core.specifications import *
from hidebound.core.validators import *
from hidebound.core.traits import *
import hidebound.server.components as components
class Spec001(SequenceSpecificationBase):
filename_fields = ['project', 'specification', 'descriptor', 'version', 'coordinate', 'frame', 'extension']
descriptor = ListType(StringType(), required=True, validators=[is_descriptor, is_homogenous])
frame = ListType(IntType(), required=True, validators=[is_frame])
extension = ListType(StringType(), required=True, validators=[is_extension, lambda x: is_eq(x, 'png')])
coordinate = ListType(ListType(IntType(), required=True, validators=[is_coordinate]))
# file_traits = dict(
# width=get_image_width
# )
class Spec002(SequenceSpecificationBase):
filename_fields = ['project', 'specification', 'descriptor', 'version', 'frame', 'extension']
width = ListType(IntType(), required=True, validators=[lambda x: is_eq(x, 1024)])
height = ListType(IntType(), required=True, validators=[lambda x: is_eq(x, 1024)])
frame = ListType(IntType(), required=True, validators=[is_frame, lambda x: is_gt(x, -1)])
extension = ListType(StringType(), required=True, validators=[is_extension, lambda x: is_eq(x, 'exr')])
file_traits = dict(
width=get_image_width
)
class Vdb001(FileSpecificationBase):
filename_fields = ['project', 'specification', 'descriptor', 'version', 'extension']
extension = ListType(StringType(), required=True, validators=[is_extension, lambda x: is_eq(x, 'vdb')])
# db = Database(root, [Spec001, Spec002, Vdb001])
# db = Database(root, [Spec001, Spec002], exclude_regex='misc\.txt|vdb001')
root = '/tmp/projects'
hb_root = '/tmp/hidebound'
os.makedirs(hb_root, exist_ok=True)
# db = Database(root, hb_root, [Spec001, Spec002, Vdb001])
db = Database(root, hb_root, [Vdb001])
db.update()
db.data.T
db.update().create()
# db.delete()
!tree /tmp/hidebound/; tree /tmp/projects
db.search('SELECT filename, asset_valid FROM data WHERE specification == "vdb001"')
root = '/tmp/projects'
if Path(root).exists():
shutil.rmtree(root)
os.makedirs(root)
fullpaths = DatabaseTests().create_files(root)
hb_root = '/tmp/hidebound'
if Path(hb_root).exists():
shutil.rmtree(hb_root)
os.makedirs(hb_root)
!tree /tmp/projects/; tree /tmp/hidebound/
url = 'http://0.0.0.0:80/api/initialize'
config = dict(
root_directory='/tmp/projects',
hidebound_directory='/tmp/hidebound',
specification_files=['/home/ubuntu/hidebound/python/hidebound/core/test_specifications.py']
)
# config = dict(
# root_directory='/mnt/storage/projects',
# hidebound_directory='/mnt/storage/hidebound',
# specification_files=['/home/ubuntu/hidebound/python/hidebound/core/test_specifications.py']
# )
config = json.dumps(config)
response = requests.post(url, json=config).json()['message']
print(response)
url = 'http://0.0.0.0:80/api/update'
response = requests.post(url).json()['message']
print(response)
url = 'http://0.0.0.0:80/api/read'
response = requests.post(url).json()['response']
data = response
print(response[0])
url = 'http://0.0.0.0:80/api/search'
params = json.dumps({'query': 'SELECT * FROM data WHERE asset_valid'})
response = requests.post(url, json=params).json()['response']
data = response
# DataFrame(response).T
# url = 'http://0.0.0.0:80/api/search'
# query = json.dumps({'query': 'SELECT * FROM data WHERE version == 3'})
# response = requests.post(url, json=query).json()['response']
# url = 'http://0.0.0.0:80/api/create'
# response = requests.post(url).json()['message']
# print(response)
!tree /tmp/hidebound/; tree /tmp/projects
class Spec001(SequenceSpecificationBase):
name = 'spec001'
filename_fields = ['project', 'specification', 'descriptor', 'version', 'coordinate', 'frame', 'extension']
descriptor = ListType(StringType(), required=True, validators=[is_descriptor, is_homogenous])
frame = ListType(IntType(), required=True, validators=[is_frame])
extension = ListType(StringType(), required=True, validators=[is_extension, lambda x: is_eq(x, 'png')])
coordinate = ListType(ListType(IntType(), required=True, validators=[is_coordinate]))
# file_traits = dict(
# width=get_image_width
# )
class Spec002(SequenceSpecificationBase):
name = 'spec002'
filename_fields = ['project', 'specification', 'descriptor', 'version', 'frame', 'extension']
width = ListType(IntType(), required=True, validators=[lambda x: is_eq(x, 1024)])
height = ListType(IntType(), required=True, validators=[lambda x: is_eq(x, 1024)])
frame = ListType(IntType(), required=True, validators=[is_frame, lambda x: is_gt(x, -1)])
extension = ListType(StringType(), required=True, validators=[is_extension, lambda x: is_eq(x, 'exr')])
file_traits = dict(
width=get_image_width
)
class Vdb001(FileSpecificationBase):
name = 'vdb001'
filename_fields = ['project', 'specification', 'descriptor', 'version', 'extension']
extension = StringType(required=True, validators=[is_extension, lambda x: is_eq(x, 'vdb')])
# db = Database(root, [Spec001, Spec002, Vdb001])
# db = Database(root, [Spec001, Spec002], exclude_regex='misc\.txt|vdb001')
root = '/tmp/projects'
hb_root = '/tmp/hidebound'
os.makedirs(hb_root, exist_ok=True)
db = Database(root, hb_root, [Spec001, Spec002])
db.update()
db.data.T
# db.update().create()
# !tree /tmp/hidebound/; tree /tmp/projects
db.search('SELECT * FROM data WHERE version == 3')
data = db.data
asset_errors = data[data.file_error.notnull()].copy()
asset_errors['file_errors'] = asset_errors.apply(lambda x: [x.filepath, x.file_error], axis=1)
asset_errors = asset_errors.groupby('asset_path', as_index=False).agg(lambda x: x.tolist())
length = 500
print('-' * length)
for i, row in asset_errors.iterrows():
print(row.asset_path)
for item in row.file_errors:
print(f'\t{item[0]:<150}{item[1]}')
print('-' * length)
```
| github_jupyter |
## Language servers
By default `jupyter-lsp` does not come with any language servers preinstalled.
However, we will try to use them if they _are_ installed and we know about them
(i.e. someone contributed a full specification).
> You can disable auto-detection by configuring
> [autodetect](./Configuring.html#autodetect)
You can add another language server for languages that are not listed on this
page:
- using a minimal JSON or Python
[configuration file](./Configuring.html#language-servers) (good for
experimenting or configuring a niche server), or
- contributing a [full specification](./Contributing.html#specs) (to enable
better integration and help other users of the same language)
The existing language servers are listed on the [official
list][lsp-implementations] and on the [community-curated list][langserver].
For the language servers in the tables below, use one of the suggested package
managers to install them: these implementations are tested to work with
`jupyter-lsp`.
[language-server]:
https://microsoft.github.io/language-server-protocol/specification
[langserver]: https://langserver.org
[lsp-implementations]:
https://microsoft.github.io/language-server-protocol/implementors/servers
```
import pathlib
import IPython
from jinja2 import Template
from jupyter_lsp import LanguageServerManager
mgr = LanguageServerManager(extra_node_roots=[str(pathlib.Path.cwd().parent)])
mgr.init_language_servers()
def lang_server_table(specs):
return IPython.display.HTML(
Template(
"""
<table class="langservers">
<thead>
<tr>
<th>Languages</th>
<th>Implementation</th>
<th>Installation</th>
</tr>
</thead>
<tbody>
{% for key, spec in specs.items() %}
<tr>
<th>
{% for lang in spec.languages %}
<a name="language-{{lang}}"/>{{ lang }}<br/>
{% endfor %}
</th>
<td>
<a href="{{spec.urls.home}}">{{key}}</a>
</td>
<td>
<ul>
{% for pkgmgr, inst in spec.install.items() %}
<li>{{pkgmgr}}: <code>{{ inst }}</code></li>
{% endfor %}
</ul>
</td>
</tr>
{% endfor %}
</tbody>
</table>
"""
).render(specs=specs)
)
```
### Notebook-optimized Language Servers
These servers have support for notebooks and file editors. The `pyls` and
`r-languageserver` are well-tested, while `jedi` and `Julia` servers are
experimental. If you choose to install multiple language servers for the same
language, the one with the highest `priority` (which can be set in the _Advanced
Settings Editor_) will be used.
```
nb_langs = [
"pylsp",
"r-languageserver",
"julia-language-server",
"jedi-language-server",
]
lang_server_table(
{key: spec for key, spec in mgr.all_language_servers.items() if key in nb_langs}
)
```
The Scala language server (`metals`) is not currently auto-detected, but can be
configured as demonstrated in the
[configuration example](./Configuring.html#example-scala-language-server-metals-integration).
If you plan to add a custom language server for the use with notebooks, please
note that a complete set of information should be provided by the kernel, as
described in
[making custom servers work with notebooks](./Configuring.html#making-custom-servers-work-with-notebooks).
### NodeJS-based Language Servers
These servers have mostly been tested with file editors.
```
lang_server_table(
{
key: spec
for key, spec in mgr.all_language_servers.items()
if "npm" in spec["install"]
}
)
```
NodeJS is a prerequisite for installation of any of the above language servers;
you can get it with:
```bash
conda install -c conda-forge nodejs
# or one of the following, as an administrator
choco install nodejs # Windows with Chocolatey
sudo apt-get install nodejs # Debian/Ubuntu
sudo brew install nodejs # MacOS with Homebrew
sudo dnf install nodejs # Fedora
sudo yum install nodejs # RHEL/CentOS
```
#### Example: Getting All the NodeJS-based Language Servers
A number of language servers are built on the
[reference implementation](https://github.com/microsoft/vscode-languageserver-node),
powered by NodeJS. The most reliable place to install these is in a
`node_modules` in the directory where you launch `jupyter lab`.
For example, to install all the servers which are tested as part of
`jupyterlab-lsp`:
```bash
jlpm add --dev \
bash-language-server \
vscode-css-languageserver-bin \
dockerfile-language-server-nodejs \
vscode-html-languageserver-bin \
typescript-language-server \
vscode-json-languageserver-bin \
yaml-language-server
```
This will create (or add to):
- `package.json` (check this in!)
- `yarn.lock` (check this in!)
- `node_modules/` (add to your VCS ignore file)
If you wish to install these someplace else, you may need to specify where you
install them with [extra_node_roots](./Configuring.html#extra_node_roots).
### Other scientific languages
These servers have been mostly tested with file editor.
```
sci_langs = ["texlab"]
lang_server_table(
{key: spec for key, spec in mgr.all_language_servers.items() if key in sci_langs}
)
```
#### Example: Getting a $\LaTeX$ stack
```bash
conda install -y conda-forge tectonic texlab chktex
```
This will install:
- `tectonic`, a cross-platform $\LaTeX$ processing tool
- note, it will download a large number of packages when first executed
- `texlab`, a Language Server for `.tex` files that supports completion and
reference navigation
- `chktex`, a `.tex` style checker
### Troubleshooting
```
troubleshooting_data = {
key: spec
for key, spec in mgr.all_language_servers.items()
if (
"troubleshoot" in spec
# ignore trivial Node.js advice if only this is present
and spec["troubleshoot"] != "Node.js is required to install this server."
# ignore trivial shell advice if only this is present
and spec["troubleshoot"] != f"{spec['argv'][0]} not found."
)
}
IPython.display.HTML(
Template(
"""
{% for key, spec in specs.items() %}
<h4>{{ key }}</h4>
<p style="white-space: pre-wrap">{{ spec.troubleshoot }}</p>
{% endfor %}
"""
).render(specs=troubleshooting_data)
)
```
| github_jupyter |
# High-level Keras (CNTK) Example
```
# Parameters
EPOCHS = 10
N_CLASSES=10
BATCHSIZE = 64
LR = 0.01
MOMENTUM = 0.9
GPU = True
LOGGER_URL='msdlvm.southcentralus.cloudapp.azure.com'
LOGGER_USRENAME='admin'
LOGGER_PASSWORD='password'
LOGGER_DB='gpudata'
LOGGER_SERIES='gpu'
import os
import sys
import numpy as np
os.environ['KERAS_BACKEND'] = "cntk"
import keras as K
import cntk
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from os import path
from utils import cifar_for_library, yield_mb, create_logger, Timer
from gpumon.influxdb import log_context
from influxdb import InfluxDBClient
client = InfluxDBClient(LOGGER_URL, 8086, LOGGER_USRENAME, LOGGER_PASSWORD, LOGGER_DB)
node_id = os.getenv('AZ_BATCH_NODE_ID', default='node')
task_id = os.getenv('AZ_BATCH_TASK_ID', default='keras_cntk')
job_id = os.getenv('AZ_BATCH_JOB_ID', default='keras_cntk')
logger = create_logger(client, node_id=node_id, task_id=task_id, job_id=job_id)
# channels_first is faster
K.backend.set_image_data_format('channels_first')
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Keras: ", K.__version__)
print("Numpy: ", np.__version__)
print("CNTK: ", cntk.__version__)
print(K.backend.backend())
# Check that channels_last is selected (otherwise slow)
print(K.backend.image_data_format())
data_path = path.join(os.getenv('AZ_BATCHAI_INPUT_DATASET'), 'cifar-10-batches-py')
def create_symbol():
model = Sequential()
model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(3, 32, 32)))
model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(N_CLASSES, activation='softmax'))
return model
def init_model(m):
m.compile(
loss = "categorical_crossentropy",
optimizer = K.optimizers.SGD(LR, MOMENTUM),
metrics = ['accuracy'])
return m
%%time
# Data into format for library
x_train, x_test, y_train, y_test = cifar_for_library(data_path, channel_first=True, one_hot=True)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
%%time
# Load symbol
sym = create_symbol()
%%time
# Initialise model
model = init_model(sym)
model.summary()
with Timer() as t:
with log_context(LOGGER_URL, LOGGER_USRENAME, LOGGER_PASSWORD, LOGGER_DB, LOGGER_SERIES,
node_id=node_id, task_id=task_id, job_id=job_id):
# Train model
model.fit(x_train,
y_train,
batch_size=BATCHSIZE,
epochs=EPOCHS,
verbose=1)
print('Training took %.03f sec.' % t.interval)
logger('training duration', value=t.interval)
%%time
y_guess = model.predict(x_test, batch_size=BATCHSIZE)
y_guess = np.argmax(y_guess, axis=-1)
y_truth = np.argmax(y_test, axis=-1)
acc=sum(y_guess == y_truth)/len(y_guess)
print("Accuracy: ", acc)
logger('accuracy', value=acc)
```
| github_jupyter |
# Poem Generator
In this notebook we build upon the web scraping to create a RNN that can generate Arabic poems in the style of Nizar Qabbani.
```
import glob
import re
import numpy as np
from collections import Counter
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import helper
#get list of text files in data
poem_txt_list = glob.glob('data/*.txt')
with open('raw_corpus.txt', 'w') as outfile:
for fname in poem_txt_list:
with open(fname) as infile:
outfile.write(infile.read())
data_dir = 'raw_corpus.txt'
text = helper.load_data(data_dir)
view_line_range = (0, 10)
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
lines = text.split('\n')
print('Number of lines: {}'.format(len(lines)))
word_count_line = [len(line.split()) for line in lines]
print('Average number of words in each line: {}'.format(np.average(word_count_line)))
print()
print('The lines {} to {}:'.format(*view_line_range))
print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]]))
```
---
## Implement Pre-processing Functions
The first thing to do to any dataset is pre-processing. We implement the following pre-processing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, we first need to transform the words to ids. In this function, we create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
We return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)`
```
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
#create a counter for all words in text
word_counts = Counter(text)
#sort words from most to least frequent in the text
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
#create int to vocab dictionatires
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word:ii for ii, word in int_to_vocab.items()}
# return tuple
return (vocab_to_int, int_to_vocab)
```
### Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids.
We implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". We create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( **.** )
- Comma ( **,** )
- Quotation Mark ( **"** )
- Semicolon ( **;** )
- Exclamation mark ( **!** )
- Question mark ( **?** )
- Left Parentheses ( **(** )
- Right Parentheses ( **)** )
- Dash ( **-** )
- Return ( **\n** )
This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Ensure we don't use a value that could be confused as a word; for example, instead of using the value "dash", try something like "||dash||".
**Note that we might need to copy from the text for some punctuation such as the question mark which has a fliped orientation in arabic script**
```
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
punct_dict = {'.': '||PERIOD||',
'*': '||COMMA||',
'\r': '||RECUR||',
'…': '||DOTDOTDOT||',
'!': '||EXCLAMATIONMARK||',
'؟ ': '||QUESTIONMARK||',
'(': '||LEFTPARANTH||',
')': '||RIGHTPARANTH||',
'–': '||DASH||',
'\n': '||RETURN||'}
return punct_dict
```
## Pre-process all the data and save it
Running the code cell below will pre-process all the data and save it to file. Please check out at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail.
```
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
```
## Build the Neural Network
In this section, we build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions.
### Check Access to GPU
```
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
```
## Input
Let's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions.
We can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual.
```
data = TensorDataset(feature_tensors, target_tensors)
data_loader = torch.utils.data.DataLoader(data,
batch_size=batch_size)
```
### Batching
We implement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes.
>We can batch words using the DataLoader, but it will be up to us to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`.
For example, say we have these as input:
```
words = [1, 2, 3, 4, 5, 6, 7]
sequence_length = 4
```
Our first `feature_tensor` should contain the values:
```
[1, 2, 3, 4]
```
And the corresponding `target_tensor` should just be the next "word"/tokenized word value:
```
5
```
This should continue with the second `feature_tensor`, `target_tensor` being:
```
[2, 3, 4, 5] # features
6 # target
```
```
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
#number of batches by integer definition
n_batches = len(words)//batch_size
#only full batches
words = words[:n_batches*batch_size]
y_len = len(words) - sequence_length
x, y = [], []
for idx in range(0, y_len):
end = idx + sequence_length
x_batch = words[idx:end]
y_batch = words[end]
x.append(x_batch)
y.append(y_batch)
#wrapping tensor
data = TensorDataset(torch.from_numpy(np.asarray(x)), torch.from_numpy(np.asarray(y)))
# Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset
data_loader = DataLoader(data, batch_size=batch_size)
# return a dataloader
return data_loader
```
### Test The Dataloader
We have to modify this code to test a batching function, but it should look fairly similar.
Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader.
Our code should return something like the following (likely in a different order, if we shuffled your data):
```
torch.Size([10, 5])
tensor([[ 28, 29, 30, 31, 32],
[ 21, 22, 23, 24, 25],
[ 17, 18, 19, 20, 21],
[ 34, 35, 36, 37, 38],
[ 11, 12, 13, 14, 15],
[ 23, 24, 25, 26, 27],
[ 6, 7, 8, 9, 10],
[ 38, 39, 40, 41, 42],
[ 25, 26, 27, 28, 29],
[ 7, 8, 9, 10, 11]])
torch.Size([10])
tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12])
```
### Sizes
Our sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10).
### Values
We should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`.
```
# test dataloader
test_text = range(50)
t_loader = batch_data(test_text, sequence_length=5, batch_size=10)
data_iter = iter(t_loader)
sample_x, sample_y = data_iter.next()
print(sample_x.shape)
print(sample_x)
print()
print(sample_y.shape)
print(sample_y)
```
---
## Build the Neural Network
We implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). We may choose to use a GRU or an LSTM. To complete the RNN, we have to implement the following functions for the class:
- `__init__` - The initialize function.
- `init_hidden` - The initialization function for an LSTM/GRU hidden state
- `forward` - Forward propagation function.
The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state.
**The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word.
### Notes
1. Ensure to stack the outputs of the lstm to pass to our fully-connected layer, we can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)`
2. We can get the last batch of word scores by shaping the output of the final, fully-connected layer like so:
```
# reshape into (batch_size, seq_length, output_size)
output = output.view(batch_size, -1, self.output_size)
# get last batch
out = output[:, -1]
```
```
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5, lr=0.001):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
#Implement function
#set class variables
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.output_size = output_size
# embedding layer
self.embedding = nn.Embedding(num_embeddings = vocab_size, embedding_dim = embedding_dim)
# define lstm
self.lstm = nn.LSTM(input_size = embedding_dim,
hidden_size = hidden_dim,
num_layers = n_layers,
bias = True,
batch_first = True,
dropout = dropout)
#define fc layer
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
#get the batch size
batch_size = nn_input.size(0)
#get embedding
embed = self.embedding(nn_input)
#get lstm output
out, hidden = self.lstm(embed, hidden)
#stack the outputs of the lstm
out = out.contiguous().view(-1, self.hidden_dim)
out = self.fc(out)
# reshape into (batch_size, seq_length, output_size)
out = out.view(batch_size, -1, self.output_size)
# get last batch
out = out[:, -1]
# return one batch of output word scores and the hidden state
return out, hidden
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
# Implement function
# initialize hidden state with zero weights, and move to GPU if available
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
```
### Define forward and backpropagation
Use the RNN class we implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows:
```
loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target)
```
And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that we can get this loss by computing it, as usual, and calling `loss.item()`.
**If a GPU is available, move your data to that GPU device, here.**
```
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden, clip=5):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:param clip: Max norm of the gradients
:return: The loss and the latest hidden state Tensor
"""
# move data to GPU, if available
if train_on_gpu:
rnn.cuda()
# perform backpropagation and optimization
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in hidden])
# zero accumulated gradients
rnn.zero_grad()
# get the output from the model
if train_on_gpu:
inp = inp.cuda()
target = target.cuda()
output, h = rnn(inp, h)
# calculate the loss and perform backprop
loss = criterion(output, target)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(rnn.parameters(), clip)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
```
## Neural Network Training
With the structure of the network complete and data ready to be fed in the neural network, it's time to train it.
### Train Loop
The training loop is implemented in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. We set this parameter along with other parameters in the next section.
```
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
```
### Hyperparameters
We set and train the neural network with the following parameters:
- Set `sequence_length` to the length of a sequence.
- Set `batch_size` to the batch size.
- Set `num_epochs` to the number of epochs to train for.
- Set `learning_rate` to the learning rate for an Adam optimizer.
- Set `vocab_size` to the number of uniqe tokens in our vocabulary.
- Set `output_size` to the desired size of the output.
- Set `embedding_dim` to the embedding dimension; smaller than the vocab_size.
- Set `hidden_dim` to the hidden dimension of our RNN.
- Set `n_layers` to the number of layers/cells in our RNN.
- Set `show_every_n_batches` to the number of batches at which the neural network should print progress.
If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class.
```
# Data params
# Sequence Length = # of words in a sequence
sequence_length = 10
# Batch Size
batch_size = 128
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 15
# Learning Rate
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 250
# Hidden Dimension
hidden_dim = 512
# Number of RNN Layers
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 500
```
### Train
In the next cell, we train the neural network on the pre-processed data. If we have a hard time getting a good loss, we may consider changing the hyperparameters. In general, we may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train.
> **Note: aim for a loss less than 3.5.**
We can also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn.
```
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./save/trained_rnn', trained_rnn)
print('Model Trained and Saved')
```
---
# Checkpoint
After running the above training cell, our model will be saved by name, `trained_rnn`, and if we save our notebook progress, **we can pause here and come back to this code at another time**. We can resume our progress by running the next cell, which will load in our word:id dictionaries _and_ load in our saved model by name.
```
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('trained_rnn')
```
## Generate Poems
With the network trained and saved, we can use it to generate a new Kabbani poem in this section.
### Generate Text
To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. We use the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores!
### How to Write Arabic Characters
Some users might have a built in arabic keyboard on their computers. However that might not be the case, so feel free to use smart keyboards such as [yamli.com](https://www.yamli.com/).
```
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
```
### Generate a Poem
It's time to generate the text. We set `gen_length` to the length of poem we want to generate and set `prime_word` to one of the following to start the prediction:
- "أنا" (I/me)
- "يا" (O)
- "نحن" (We)
- "امرأة" (Woman)
We can set the prime word to _any word_ in our dictionary, but it's best to start with a word that typically begins a sentence in Arabic, or even better: <br>
**Read the poems and try to begin like the original author**
```
# run the cell multiple times to get different results!
gen_length = 50 # modify the length to your preference
# name for starting the script
prime_word = 'أنا'
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn,
vocab_to_int[prime_word],
int_to_vocab,
token_dict,
vocab_to_int[pad_word],
gen_length)
print(generated_script)
prime_list = ["أنا", "يا", "نحن", "امرأة"]
for word in prime_list:
print("Generating poem for {}".format(word))
generated_script = generate(trained_rnn,
vocab_to_int[word],
int_to_vocab,
token_dict,
vocab_to_int[pad_word],
gen_length)
print(generated_script)
print(20*'-')
```
| github_jupyter |
# Multiclass classification with Amazon SageMaker XGBoost algorithm
_**Single machine and distributed training for multiclass classification with Amazon SageMaker XGBoost algorithm**_
---
---
## Contents
1. [Introduction](#Introduction)
2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
1. [Permissions and environment variables](#Permissions-and-environment-variables)
2. [Data ingestion](#Data-ingestion)
3. [Data conversion](#Data-conversion)
3. [Training the XGBoost model](#Training-the-XGBoost-model)
1. [Training on a single instance](#Training-on-a-single-instance)
2. [Training on multiple instances](#Training-on-multiple-instances)
4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
1. [Import model into hosting](#Import-model-into-hosting)
2. [Create endpoint configuration](#Create-endpoint-configuration)
3. [Create endpoint](#Create-endpoint)
5. [Validate the model for use](#Validate-the-model-for-use)
---
## Introduction
This notebook demonstrates the use of Amazon SageMaker’s implementation of the XGBoost algorithm to train and host a multiclass classification model. The MNIST dataset is used for training. It has a training set of 60,000 examples and a test set of 10,000 examples. To illustrate the use of libsvm training data format, we download the dataset and convert it to the libsvm format before training.
To get started, we need to set up the environment with a few prerequisites for permissions and configurations.
---
## Prequisites and Preprocessing
### Permissions and environment variables
Here we set up the linkage and authentication to AWS services.
1. The roles used to give learning and hosting access to your data. See the documentation for how to specify these.
2. The S3 bucket that you want to use for training and model data.
```
%%time
import os
import boto3
import re
import copy
import time
from time import gmtime, strftime
from sagemaker import get_execution_role
role = get_execution_role()
region = boto3.Session().region_name
bucket='<bucket-name>' # put your s3 bucket name here, and create s3 bucket
prefix = 'sagemaker/xgboost-multiclass-classification'
# customize to your bucket where you have stored the data
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket)
```
### Data ingestion
Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
```
%%time
import pickle, gzip, numpy, urllib.request, json
# Load the dataset
urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz")
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
```
### Data conversion
Since algorithms have particular input and output requirements, converting the dataset is also part of the process that a data scientist goes through prior to initiating training. In this particular case, the data is converted from pickle-ized numpy array to the libsvm format before being uploaded to S3. The hosted implementation of xgboost consumes the libsvm converted data from S3 for training. The following provides functions for data conversions and file upload to S3 and download from S3.
```
%%time
import struct
import io
import boto3
def to_libsvm(f, labels, values):
f.write(bytes('\n'.join(
['{} {}'.format(label, ' '.join(['{}:{}'.format(i + 1, el) for i, el in enumerate(vec)])) for label, vec in
zip(labels, values)]), 'utf-8'))
return f
def write_to_s3(fobj, bucket, key):
return boto3.Session().resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def get_dataset():
import pickle
import gzip
with gzip.open('mnist.pkl.gz', 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def upload_to_s3(partition_name, partition):
labels = [t.tolist() for t in partition[1]]
vectors = [t.tolist() for t in partition[0]]
num_partition = 5 # partition file into 5 parts
partition_bound = int(len(labels)/num_partition)
for i in range(num_partition):
f = io.BytesIO()
to_libsvm(f, labels[i*partition_bound:(i+1)*partition_bound], vectors[i*partition_bound:(i+1)*partition_bound])
f.seek(0)
key = "{}/{}/examples{}".format(prefix,partition_name,str(i))
url = 's3n://{}/{}'.format(bucket, key)
print('Writing to {}'.format(url))
write_to_s3(f, bucket, key)
print('Done writing to {}'.format(url))
def download_from_s3(partition_name, number, filename):
key = "{}/{}/examples{}".format(prefix,partition_name, number)
url = 's3n://{}/{}'.format(bucket, key)
print('Reading from {}'.format(url))
s3 = boto3.resource('s3')
s3.Bucket(bucket).download_file(key, filename)
try:
s3.Bucket(bucket).download_file(key, 'mnist.local.test')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print('The object does not exist at {}.'.format(url))
else:
raise
def convert_data():
train_set, valid_set, test_set = get_dataset()
partitions = [('train', train_set), ('validation', valid_set), ('test', test_set)]
for partition_name, partition in partitions:
print('{}: {} {}'.format(partition_name, partition[0].shape, partition[1].shape))
upload_to_s3(partition_name, partition)
%%time
convert_data()
```
## Training the XGBoost model
Now that we have our data in S3, we can begin training. We'll use Amazon SageMaker XGboost algorithm, and will actually fit two models in order to demonstrate the single machine and distributed training on SageMaker. In the first job, we'll use a single machine to train. In the second job, we'll use two machines and use the ShardedByS3Key mode for the train channel. Since we have 5 part file, one machine will train on three and the other on two part files. Note that the number of instances should not exceed the number of part files.
First let's setup a list of training parameters which are common across the two jobs.
```
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/xgboost:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/xgboost:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/xgboost:latest'}
container = containers[boto3.Session().region_name]
#Ensure that the train and validation data folders generated above are reflected in the "InputDataConfig" parameter below.
common_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": bucket_path + "/"+ prefix + "/xgboost"
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m4.10xlarge",
"VolumeSizeInGB": 5
},
"HyperParameters": {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"silent":"0",
"objective": "multi:softmax",
"num_class": "10",
"num_round": "10"
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 86400
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/train/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/validation/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
}
]
}
```
Now we'll create two separate jobs, updating the parameters that are unique to each.
### Training on a single instance
```
#single machine job params
single_machine_job_name = 'xgboost-single-machine-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", single_machine_job_name)
single_machine_job_params = copy.deepcopy(common_training_params)
single_machine_job_params['TrainingJobName'] = single_machine_job_name
single_machine_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-single"
single_machine_job_params['ResourceConfig']['InstanceCount'] = 1
```
### Training on multiple instances
You can also run the training job distributed over multiple instances. For larger datasets with multiple partitions, this can significantly boost the training speed. Here we'll still use the small/toy MNIST dataset to demo this feature.
```
#distributed job params
distributed_job_name = 'xgboost-distributed-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", distributed_job_name)
distributed_job_params = copy.deepcopy(common_training_params)
distributed_job_params['TrainingJobName'] = distributed_job_name
distributed_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-distributed"
#number of instances used for training
distributed_job_params['ResourceConfig']['InstanceCount'] = 2 # no more than 5 if there are total 5 partition files generated above
# data distribution type for train channel
distributed_job_params['InputDataConfig'][0]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
# data distribution type for validation channel
distributed_job_params['InputDataConfig'][1]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
```
Let's submit these jobs, taking note that the first will be submitted to run in the background so that we can immediately run the second in parallel.
```
%%time
region = boto3.Session().region_name
sm = boto3.Session().client('sagemaker')
sm.create_training_job(**single_machine_job_params)
sm.create_training_job(**distributed_job_params)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print(status)
sm.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=distributed_job_name)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print("Training job ended with status: " + status)
if status == 'Failed':
message = sm.describe_training_job(TrainingJobName=distributed_job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
```
Let's confirm both jobs have finished.
```
print('Single Machine:', sm.describe_training_job(TrainingJobName=single_machine_job_name)['TrainingJobStatus'])
print('Distributed:', sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus'])
```
# Set up hosting for the model
In order to set up hosting, we have to import the model from training to hosting. The step below demonstrated hosting the model generated from the distributed training job. Same steps can be followed to host the model obtained from the single machine job.
### Import model into hosting
Next, you register the model with hosting. This allows you the flexibility of importing models trained elsewhere.
```
%%time
import boto3
from time import gmtime, strftime
model_name=distributed_job_name + '-model'
print(model_name)
info = sm.describe_training_job(TrainingJobName=distributed_job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = sm.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
```
### Create endpoint configuration
SageMaker supports configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. In addition, the endpoint configuration describes the instance type required for model deployment.
```
from time import gmtime, strftime
endpoint_config_name = 'XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = sm.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
```
### Create endpoint
Lastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
```
%%time
import time
endpoint_name = 'XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = sm.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
while status=='Creating':
time.sleep(60)
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
```
## Validate the model for use
Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
```
runtime_client = boto3.client('runtime.sagemaker')
```
In order to evaluate the model, we'll use the test dataset previously generated. Let us first download the data from S3 to the local host.
```
download_from_s3('test', 0, 'mnist.local.test') # reading the first part file within test
```
Start with a single prediction. Lets use the first record from the test file.
```
!head -1 mnist.local.test > mnist.single.test
%%time
import json
file_name = 'mnist.single.test' #customize to your test file 'mnist.single.test' if use the data above
with open(file_name, 'r') as f:
payload = f.read()
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType='text/x-libsvm',
Body=payload)
result = response['Body'].read().decode('ascii')
print('Predicted label is {}.'.format(result))
```
OK, a single prediction works.
Let's do a whole batch and see how good is the predictions accuracy.
```
import sys
def do_predict(data, endpoint_name, content_type):
payload = '\n'.join(data)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
result = response['Body'].read().decode('ascii')
preds = [float(num) for num in result.split(',')]
return preds
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
arrs.extend(do_predict(data[offset:min(offset+batch_size, items)], endpoint_name, content_type))
sys.stdout.write('.')
return(arrs)
```
The following function helps us calculate the error rate on the batch dataset.
```
%%time
import json
file_name = 'mnist.local.test'
with open(file_name, 'r') as f:
payload = f.read().strip()
labels = [float(line.split(' ')[0]) for line in payload.split('\n')]
test_data = payload.split('\n')
preds = batch_predict(test_data, 100, endpoint_name, 'text/x-libsvm')
print ('\nerror rate=%f' % ( sum(1 for i in range(len(preds)) if preds[i]!=labels[i]) /float(len(preds))))
```
Here are a few predictions
```
preds[0:10]
```
and the corresponding labels
```
labels[0:10]
```
The following function helps us create the confusion matrix on the labeled batch test dataset.
```
import numpy
def error_rate(predictions, labels):
"""Return the error rate and confusions."""
correct = numpy.sum(predictions == labels)
total = predictions.shape[0]
error = 100.0 - (100 * float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.int32)
bundled = zip(predictions, labels)
for predicted, actual in bundled:
confusions[int(predicted), int(actual)] += 1
return error, confusions
```
The following helps us visualize the erros that the XGBoost classifier is making.
```
import matplotlib.pyplot as plt
%matplotlib inline
NUM_LABELS = 10 # change it according to num_class in your dataset
test_error, confusions = error_rate(numpy.asarray(preds), numpy.asarray(labels))
print('Test error: %.1f%%' % test_error)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
for i, cas in enumerate(confusions):
for j, count in enumerate(cas):
if count > 0:
xoff = .07 * len(str(count))
plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')
```
### Delete Endpoint
Once you are done using the endpoint, you can use the following to delete it.
```
sm.delete_endpoint(EndpointName=endpoint_name)
```
| github_jupyter |
```
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib as mpl
rng = np.random.default_rng()
def k_cap(input, cap_size):
output = np.zeros_like(input)
if len(input.shape) == 1:
idx = np.argsort(input)[-cap_size:]
output[idx] = 1
else:
idx = np.argsort(input, axis=-1)[:, -cap_size:]
np.put_along_axis(output, idx, 1, axis=-1)
return output
n_in = 1000
n_neurons = 1000
cap_size = 100
sparsity = 0.1
n_rounds = 5
beta = 1e0
mask = (rng.random((n_neurons, n_neurons)) < sparsity) & np.logical_not(np.eye(n_neurons, dtype=bool))
W = np.ones((n_neurons, n_neurons)) * mask
W /= W.sum(axis=0)
mask_a = rng.random((n_in, n_neurons)) < sparsity
A = np.ones((n_in, n_neurons)) * mask_a
A /= A.sum(axis=0)
n_samples = n_neurons
halfspace = np.zeros(n_in)
halfspace[:cap_size] = 1 / np.sqrt(cap_size)
mean = halfspace.sum() * cap_size / n_in
margin = 3
pos = np.zeros((n_samples, n_in))
neg = np.zeros((n_samples, n_in))
pos[:, n_in // 8 :] = rng.random((n_samples, n_in - n_in // 8)) < cap_size / n_in
neg[:, n_in // 8 :] = rng.random((n_samples, n_in - n_in // 8)) < cap_size / n_in
n_on_p = int(np.ceil((mean + margin) * np.sqrt(n_in / 8)))
n_on_n = int(np.floor((mean - margin) * np.sqrt(n_in / 8)))
for i in range(n_samples):
pos[i, rng.choice(n_in // 8, size=n_on_p, replace=False)] = 1.
if n_on_n > 0:
neg[i, rng.choice(n_in // 8, size=n_on_n, replace=False)] = 1.
W = np.ones_like(W) * mask
A = np.ones_like(A) * mask_a
W /= W.sum(axis=0)
A /= A.sum(axis=0)
activations = np.zeros((n_rounds, n_neurons))
act_h = np.zeros(n_neurons)
for j in range(n_rounds):
input = pos[j]
act_h_new = k_cap(act_h @ W + input @ A, cap_size)
activations[j] = act_h_new.copy()
A[(input > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
W[(act_h > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
act_h = act_h_new
A /= A.sum(axis=0)
W /= W.sum(axis=0)
outputs = np.zeros((2, n_rounds+1, n_samples, n_neurons))
for i in range(n_rounds):
outputs[0, i+1] = k_cap(outputs[0, i] @ W + pos @ A, cap_size)
outputs[1, i+1] = k_cap(outputs[1, i] @ W + neg @ A, cap_size)
idx = outputs[0, -1].sum(axis=0).argsort()
fig, axes = plt.subplots(2, n_rounds, figsize=(10, 2 * 2), sharex=True, sharey=True)
for ax, output in zip(axes, outputs):
for i in range(n_rounds):
ax[i].imshow((output[i+1] > 0)[:, idx])
fig.text(0.5, 0.04, 'Neurons', ha='center', va='center')
fig.text(0.04, 0.5, 'Samples', ha='center', va='center', rotation='vertical')
inp_overlap_mat = np.zeros((2, 2))
inp_overlap_mat[0, 0] = np.mean(np.sum(pos[:-1] * pos[1:], axis=-1))
inp_overlap_mat[0, 1] = np.mean(np.sum(pos[:-1] * neg[1:], axis=-1))
inp_overlap_mat[1, 0] = np.mean(np.sum(pos[:-1] * neg[1:], axis=-1))
inp_overlap_mat[1, 1] = np.mean(np.sum(neg[:-1] * neg[1:], axis=-1))
assm_overlap_mat = np.zeros((2, 2))
assm_overlap_mat[0, 0] = np.mean(np.sum(outputs[0, -1][:-1] * outputs[0, -1][1:], axis=-1))
assm_overlap_mat[0, 1] = np.mean(np.sum(outputs[0, -1][:-1] * outputs[1, -1][1:], axis=-1))
assm_overlap_mat[1, 0] = np.mean(np.sum(outputs[1, -1][:-1] * outputs[0, -1][1:], axis=-1))
assm_overlap_mat[1, 1] = np.mean(np.sum(outputs[1, -1][:-1] * outputs[1, -1][1:], axis=-1))
from matplotlib.gridspec import GridSpec
fig = plt.figure(figsize=(8, 3))
gs = GridSpec(2, 3, figure=fig)
ax0 = plt.subplot(gs[0, :2])
ax0.set_xticks([])
ax0.set_yticks([])
axes = [ax0] + [plt.subplot(gs[i+1, :2], sharex=ax0, sharey=ax0) for i in range(1)]
colors = ['#72c8c8', '#ffa994']
for i in range(2):
axes[i].bar(np.arange(n_neurons), outputs[i,-1].mean(axis=0)[idx], color=colors[i])
for side in ['top', 'right', 'left']:
axes[i].spines[side].set_visible(False)
ax0 = plt.subplot(gs[0, -1])
ax0.set_xticks([])
ax0.set_yticks([])
ax0.imshow(inp_overlap_mat, vmin=0, vmax=100, cmap='Reds')
for i in range(2):
for j in range(2):
ax0.text(j, i, np.round(inp_overlap_mat[i, j], 2), ha='center', va='center', c='k' if i == j else 'k')
ax = plt.subplot(gs[1, -1], sharex=ax0, sharey=ax0)
ax.imshow(assm_overlap_mat, vmin=0, vmax=100, cmap='Blues')
for i in range(2):
for j in range(2):
ax.text(j, i, np.round(assm_overlap_mat[i, j], 2), ha='center', va='center', c='w' if i == 0 and j == 0 else 'k')
fig.tight_layout()
fig, axes = plt.subplots(2, figsize=(9, 4), sharex=True, sharey=True)
axes[0].bar(np.arange(999, -1, -1), np.mean(outputs[0, 1], axis=0)[idx], color='tab:blue')
axes[1].bar(np.arange(999, -1, -1), np.mean(outputs[1, 1], axis=0)[idx], color='tab:orange')
for ax in axes:
ax.set_xticks([])
for spine in ['right', 'top']:
ax.spines[spine].set_visible(False)
fig.tight_layout()
n_in = 1000
n_neurons = 1000
cap_size = 100
sparsity = 0.1
n_rounds = 5
beta = 1e0
mask = (rng.random((n_neurons, n_neurons)) < sparsity) & np.logical_not(np.eye(n_neurons, dtype=bool))
W = (np.ones((n_neurons, n_neurons))) * mask
W /= W.sum(axis=0)
mask_a = (rng.random((n_in, n_neurons)) < sparsity)
A = np.ones((n_in, n_neurons)) * mask_a
A /= A.sum(axis=0)
mean = (cap_size / n_in) * np.sqrt(cap_size)
margins = np.linspace(0, np.sqrt(cap_size) - mean, 20)
n_trials = 20
accs = np.zeros((20, n_trials))
for k, margin in enumerate(margins):
for l in range(n_trials):
pos = np.zeros((n_samples, n_in))
neg = np.zeros((n_samples, n_in))
pos[:, cap_size :] = rng.random((n_samples, n_in - cap_size)) < cap_size / n_in
neg[:, cap_size :] = rng.random((n_samples, n_in - cap_size)) < cap_size / n_in
n_on_p = int(np.ceil((mean + margin) * np.sqrt(cap_size)))
n_on_n = int(np.floor((mean - margin) * np.sqrt(cap_size)))
for i in range(n_samples):
pos[i, rng.choice(cap_size, size=n_on_p, replace=False)] = 1.
if n_on_n > 0:
neg[i, rng.choice(cap_size, size=n_on_n, replace=False)] = 1.
beta = 1.0
W = np.ones_like(W) * mask
A = np.ones_like(A) * mask_a
W /= W.sum(axis=0)
A /= A.sum(axis=0)
activations = np.zeros((n_rounds, n_neurons))
act_h = np.zeros(n_neurons)
for j in range(n_rounds):
input = pos[j]
act_h_new = k_cap(act_h @ W + input @ A, cap_size)
activations[j] = act_h_new.copy()
A[(input > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
W[(act_h > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
act_h = act_h_new
A /= A.sum(axis=0)
W /= W.sum(axis=0)
outputs = np.zeros((2, n_samples, n_neurons))
outputs[0] = k_cap(pos @ A, cap_size)
outputs[1] = k_cap(neg @ A, cap_size)
c = np.full(n_neurons, 0)
c[outputs[0].sum(axis=0).argsort()[-cap_size:]] = 1
counts = np.count_nonzero((outputs @ c) > cap_size / 2, axis=-1)
accs[k, l] = (counts[0] + 1000 - counts[1]) / 2000
fig, ax = plt.subplots(figsize=(5,4))
mean = (cap_size / n_in) * np.sqrt(cap_size)
margins = np.linspace(0, np.sqrt(cap_size) - mean, 20)
ax.fill_between(margins, 100 * accs.min(axis=-1), 100 * accs.max(axis=-1), alpha=0.5)
ax.plot(margins, 100 * accs.mean(axis=-1))
ax.set_xlabel(r'Margin ($\Delta$)')
ax.set_ylabel('Accuracy (%)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig.tight_layout()
```
| github_jupyter |
# Comparison between different solutions
In this notebook we'll compare all the solutions that were tested before deciding on the final one.
## Installations and imports
```
!pip install haversine ortools pulp numba
import pandas as pd
from Simulation import Simulation
from GeneticAlgorithmGraph import GeneticAlgorithmGraph as GAGraph
from GoogleRouting import GoogleRouting
from AdjustedKMeans import AdjustedKMeans
```
## Data loading and cleaning
```
FILE_PATH = 'dataset.csv'
```
Removal of useless columns and records.
```
df = pd.read_csv(FILE_PATH)
df['detected_at'] = pd.to_datetime(df['detected_at'])
USELESS_COLUMNS = ['id','raw_data_id','raw_data_setting_id','Seriale','created_at','DataUltimaRilevazione','DataUltimaTrasmissione','DataPrimaInstallazione','Indirizzo','Cap','UnitaTerritoriale','Viario','Tronco', 'Esterno', 'AreaGestionale']
df.drop(USELESS_COLUMNS, axis=1, inplace=True)
df.occluded.replace({1:False, 2:True}, inplace=True)
df.fillna(value=False, inplace=True)
df = df[df.TipoAnomalia == False]
df.drop('TipoAnomalia', axis=1, inplace=True)
df.rename(columns={'Latitudine': 'latitude', 'Longitudine':'longitude'}, inplace=True)
df.set_index('detected_at', inplace=True, drop=True)
df.sort_index(inplace=True)
```
Remaining columns:
```
print(df.columns.values[1:])
```
## Functions
Slender distance:
```
from haversine import haversine
import math
def slender_distance(p1, p2, center, alpha_1=1, alpha_2=0):
ang_d = math.radians(get_angle(p1, p2, center))
radial_d = haversine(p1, p2)
return alpha_1*ang_d+alpha_2*radial_d
def get_angle(a, b, origin):
ang = math.degrees(math.atan2(b[1]-origin[1], b[0]-origin[0]) - math.atan2(a[1]-origin[1], a[0]-origin[0]))
ang = abs(ang) if abs(ang) < 180 else 360-abs(ang)
return ang
```
## Test
### Test configuration
```
def filter_function(data, level=3):
new_data = data.drop_duplicates(subset='bin_serial', keep='last')
new_data = new_data[(new_data.bin_level > level) | new_data.occluded]
return new_data
import warnings
warnings.filterwarnings('ignore')
start_date = '2019-09-01 00:00:00'
end_date = '2019-10-01 00:00:00'
data = df[start_date : end_date]
depot = (45.5069182, 9.2684501)
vehicle_capacities = 200
num_vehicles = 20
def run_simulation(config, name):
simulation = Simulation(depot, config, window_size=6, max_size=200, filter_function=filter_function, filter_kwargs={})
routes = simulation.compute_simulation(data, pd.to_datetime(start_date), pd.to_datetime(end_date), speed=30, emp_time=60, debug=False)
simulation.to_csv(f'Output\\{name}.csv')
score = simulation.get_score()
print(f'# {name} #')
print(f'Numero di turni eseguiti: {str(len(routes))}.')
print(f'Distanza totale: {str(score)} km.')
total_bins = sum([len(routes[w][c]) for w in range(len(routes)) for c in range(len(routes[w]))])
print(f'Numero di cestini svuotati: {str(total_bins)}.')
total_vehs = sum([len(routes[w]) for w in range(len(routes))])
print(f'Numero di veicoli usati: {str(total_vehs)}.')
```
### Baseline: Standard k-Means + Christofides
```
kmeans_kwargs = {
'max_size' : 200,
'balanced': False,
'distance': lambda p1, p2 : slender_distance(p1, p2, depot, 0, 1),
}
routing_kwargs = {
'distance_function': haversine,
'vehicle_capacities': 200,
'num_vehicles': 1,
}
baseline_config = {
'cluster_class': AdjustedKMeans,
'cluster_kwargs': kmeans_kwargs,
'graph_class': GoogleRouting,
'graph_kwargs': routing_kwargs,
}
run_simulation(baseline_config, 'Baseline')
```
### Google Routing
```
GR_kwargs = {
'distance_function': haversine,
'vehicle_capacities': 200,
'num_vehicles': 20,
}
GR_config = {
'cluster_class': None,
'cluster_kwargs': {},
'graph_class': GoogleRouting,
'graph_kwargs': GR_kwargs,
}
run_simulation(GR_config, 'GoogleRouting')
```
### SC2G - Shape-Controlled Clustering + Genetic algorithm
```
clustering_kwargs = {
'max_size' : 200,
'balanced': True,
'distance': lambda p1, p2 : slender_distance(p1, p2, depot, 1, 0),
}
# GA settings
pop_size = 500
elite_size = int(0.05*pop_size)
mutation_rate = 0.1
generations = 200
GA_kwargs = {
'pop_size': pop_size,
'elite_size': elite_size,
'mutation_rate': mutation_rate,
'generations': generations,
'metric': 'km',
}
SC2G_config = {
'cluster_class': AdjustedKMeans,
'cluster_kwargs': clustering_kwargs,
'graph_class': GAGraph,
'graph_kwargs': GA_kwargs,
}
run_simulation(SC2G_config, 'SC2G')
```
### SC3 balanced
```
clustering_balanced = {
'max_size' : 200,
'balanced': True,
'distance': lambda p1, p2 : slender_distance(p1, p2, depot, 1, 0),
}
routing_kwargs = {
'distance_function': haversine,
'vehicle_capacities': 200,
'num_vehicles': 1,
}
SC3_balanced_config = {
'cluster_class': AdjustedKMeans,
'cluster_kwargs': clustering_kwargs,
'graph_class': GoogleRouting,
'graph_kwargs': routing_kwargs,
}
run_simulation(SC3_balanced_config, 'SC3Balanced')
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.